]> git.ipfire.org Git - people/arne_f/ipfire-3.x.git/blame - kernel/patches/grsecurity-2.2.2-3.1.5-201112101853.patch
kernel: Update to 3.1.5.
[people/arne_f/ipfire-3.x.git] / kernel / patches / grsecurity-2.2.2-3.1.5-201112101853.patch
CommitLineData
fe2de317
MT
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index dfa6fc6..0095943 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -5,6 +5,7 @@
6 *.cis
7 *.cpio
8 *.csp
9+*.dbg
10 *.dsp
11 *.dvi
12 *.elf
13@@ -14,6 +15,7 @@
14 *.gcov
15 *.gen.S
16 *.gif
17+*.gmo
18 *.grep
19 *.grp
20 *.gz
21@@ -48,9 +50,11 @@
22 *.tab.h
23 *.tex
24 *.ver
25+*.vim
26 *.xml
27 *.xz
28 *_MODULES
29+*_reg_safe.h
30 *_vga16.c
31 *~
32 \#*#
33@@ -70,6 +74,7 @@ Kerntypes
34 Module.markers
35 Module.symvers
36 PENDING
37+PERF*
38 SCCS
39 System.map*
40 TAGS
41@@ -93,19 +98,24 @@ bounds.h
42 bsetup
43 btfixupprep
44 build
45+builtin-policy.h
46 bvmlinux
47 bzImage*
48 capability_names.h
49 capflags.c
50 classlist.h*
51+clut_vga16.c
52+common-cmds.h
53 comp*.log
54 compile.h*
55 conf
56 config
57 config-*
58 config_data.h*
59+config.c
60 config.mak
61 config.mak.autogen
62+config.tmp
63 conmakehash
64 consolemap_deftbl.c*
65 cpustr.h
66@@ -119,6 +129,7 @@ dslm
67 elf2ecoff
68 elfconfig.h*
69 evergreen_reg_safe.h
70+exception_policy.conf
71 fixdep
72 flask.h
73 fore200e_mkfirm
74@@ -126,12 +137,15 @@ fore200e_pca_fw.c*
75 gconf
76 gconf.glade.h
77 gen-devlist
78+gen-kdb_cmds.c
79 gen_crc32table
80 gen_init_cpio
81 generated
82 genheaders
83 genksyms
84 *_gray256.c
85+hash
86+hid-example
87 hpet_example
88 hugepage-mmap
89 hugepage-shm
90@@ -146,7 +160,7 @@ int32.c
91 int4.c
92 int8.c
93 kallsyms
94-kconfig
95+kern_constants.h
96 keywords.c
97 ksym.c*
98 ksym.h*
99@@ -154,7 +168,6 @@ kxgettext
100 lkc_defs.h
101 lex.c
102 lex.*.c
103-linux
104 logo_*.c
105 logo_*_clut224.c
106 logo_*_mono.c
107@@ -166,14 +179,15 @@ machtypes.h
108 map
109 map_hugetlb
110 maui_boot.h
111-media
112 mconf
113+mdp
114 miboot*
115 mk_elfconfig
116 mkboot
117 mkbugboot
118 mkcpustr
119 mkdep
120+mkpiggy
121 mkprep
122 mkregtable
123 mktables
124@@ -209,6 +223,7 @@ r300_reg_safe.h
125 r420_reg_safe.h
126 r600_reg_safe.h
127 recordmcount
128+regdb.c
129 relocs
130 rlim_names.h
131 rn50_reg_safe.h
132@@ -219,6 +234,7 @@ setup
133 setup.bin
134 setup.elf
135 sImage
136+slabinfo
137 sm_tbl*
138 split-include
139 syscalltab.h
140@@ -229,6 +245,7 @@ tftpboot.img
141 timeconst.h
142 times.h*
143 trix_boot.h
144+user_constants.h
145 utsrelease.h*
146 vdso-syms.lds
147 vdso.lds
148@@ -246,7 +263,9 @@ vmlinux
149 vmlinux-*
150 vmlinux.aout
151 vmlinux.bin.all
152+vmlinux.bin.bz2
153 vmlinux.lds
154+vmlinux.relocs
155 vmlinuz
156 voffset.h
157 vsyscall.lds
158@@ -254,9 +273,11 @@ vsyscall_32.lds
159 wanxlfw.inc
160 uImage
161 unifdef
162+utsrelease.h
163 wakeup.bin
164 wakeup.elf
165 wakeup.lds
166 zImage*
167 zconf.hash.c
168+zconf.lex.c
169 zoffset.h
170diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
171index d6e6724..a024ce8 100644
172--- a/Documentation/kernel-parameters.txt
173+++ b/Documentation/kernel-parameters.txt
174@@ -1898,6 +1898,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
175 the specified number of seconds. This is to be used if
176 your oopses keep scrolling off the screen.
177
178+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
179+ virtualization environments that don't cope well with the
180+ expand down segment used by UDEREF on X86-32 or the frequent
181+ page table updates on X86-64.
182+
183+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
184+
185 pcbit= [HW,ISDN]
186
187 pcd. [PARIDE]
188diff --git a/Makefile b/Makefile
189index 94ab2ad..1e4a6e8 100644
190--- a/Makefile
191+++ b/Makefile
192@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
193
194 HOSTCC = gcc
195 HOSTCXX = g++
196-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
197-HOSTCXXFLAGS = -O2
198+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
199+HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
200+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
201
202 # Decide whether to build built-in, modular, or both.
203 # Normally, just do built-in.
204@@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
205 # Rules shared between *config targets and build targets
206
207 # Basic helpers built in scripts/
208-PHONY += scripts_basic
209-scripts_basic:
210+PHONY += scripts_basic gcc-plugins
211+scripts_basic: gcc-plugins
212 $(Q)$(MAKE) $(build)=scripts/basic
213 $(Q)rm -f .tmp_quiet_recordmcount
214
215@@ -564,6 +565,42 @@ else
216 KBUILD_CFLAGS += -O2
217 endif
218
219+ifndef DISABLE_PAX_PLUGINS
220+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
221+ifndef DISABLE_PAX_CONSTIFY_PLUGIN
222+CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
223+endif
224+ifdef CONFIG_PAX_MEMORY_STACKLEAK
225+STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
226+STACKLEAK_PLUGIN += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
227+endif
228+ifdef CONFIG_KALLOCSTAT_PLUGIN
229+KALLOCSTAT_PLUGIN := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
230+endif
231+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
232+KERNEXEC_PLUGIN := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
233+KERNEXEC_PLUGIN += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD)
234+endif
235+ifdef CONFIG_CHECKER_PLUGIN
236+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
237+CHECKER_PLUGIN := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
238+endif
239+endif
240+GCC_PLUGINS := $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN) $(KALLOCSTAT_PLUGIN) $(KERNEXEC_PLUGIN) $(CHECKER_PLUGIN)
241+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
242+gcc-plugins:
243+ $(Q)$(MAKE) $(build)=tools/gcc
244+else
245+gcc-plugins:
246+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
247+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
248+else
249+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
250+endif
251+ $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
252+endif
253+endif
254+
255 include $(srctree)/arch/$(SRCARCH)/Makefile
256
257 ifneq ($(CONFIG_FRAME_WARN),0)
258@@ -708,7 +745,7 @@ export mod_strip_cmd
259
260
261 ifeq ($(KBUILD_EXTMOD),)
262-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
263+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
264
265 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
266 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
267@@ -932,6 +969,7 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
268
269 # The actual objects are generated when descending,
270 # make sure no implicit rule kicks in
271+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS)
272 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
273
274 # Handle descending into subdirectories listed in $(vmlinux-dirs)
275@@ -941,7 +979,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
276 # Error messages still appears in the original language
277
278 PHONY += $(vmlinux-dirs)
279-$(vmlinux-dirs): prepare scripts
280+$(vmlinux-dirs): gcc-plugins prepare scripts
281 $(Q)$(MAKE) $(build)=$@
282
283 # Store (new) KERNELRELASE string in include/config/kernel.release
284@@ -986,6 +1024,7 @@ prepare0: archprepare FORCE
285 $(Q)$(MAKE) $(build)=. missing-syscalls
286
287 # All the preparing..
288+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS),$(KBUILD_CFLAGS))
289 prepare: prepare0
290
291 # Generate some files
292@@ -1087,6 +1126,7 @@ all: modules
293 # using awk while concatenating to the final file.
294
295 PHONY += modules
296+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
297 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
298 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
299 @$(kecho) ' Building modules, stage 2.';
300@@ -1102,7 +1142,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
301
302 # Target to prepare building external modules
303 PHONY += modules_prepare
304-modules_prepare: prepare scripts
305+modules_prepare: gcc-plugins prepare scripts
306
307 # Target to install modules
308 PHONY += modules_install
309@@ -1198,7 +1238,7 @@ distclean: mrproper
310 @find $(srctree) $(RCS_FIND_IGNORE) \
311 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
312 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
313- -o -name '.*.rej' -o -size 0 \
314+ -o -name '.*.rej' -o -name '*.so' -o -size 0 \
315 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
316 -type f -print | xargs rm -f
317
318@@ -1360,6 +1400,7 @@ PHONY += $(module-dirs) modules
319 $(module-dirs): crmodverdir $(objtree)/Module.symvers
320 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
321
322+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
323 modules: $(module-dirs)
324 @$(kecho) ' Building modules, stage 2.';
325 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
326@@ -1486,17 +1527,19 @@ else
327 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
328 endif
329
330-%.s: %.c prepare scripts FORCE
331+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS)
332+%.s: %.c gcc-plugins prepare scripts FORCE
333 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
334 %.i: %.c prepare scripts FORCE
335 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
336-%.o: %.c prepare scripts FORCE
337+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS)
338+%.o: %.c gcc-plugins prepare scripts FORCE
339 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
340 %.lst: %.c prepare scripts FORCE
341 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
342-%.s: %.S prepare scripts FORCE
343+%.s: %.S gcc-plugins prepare scripts FORCE
344 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
345-%.o: %.S prepare scripts FORCE
346+%.o: %.S gcc-plugins prepare scripts FORCE
347 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
348 %.symtypes: %.c prepare scripts FORCE
349 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
350@@ -1506,11 +1549,13 @@ endif
351 $(cmd_crmodverdir)
352 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
353 $(build)=$(build-dir)
354-%/: prepare scripts FORCE
355+%/: KBUILD_CFLAGS += $(GCC_PLUGINS)
356+%/: gcc-plugins prepare scripts FORCE
357 $(cmd_crmodverdir)
358 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
359 $(build)=$(build-dir)
360-%.ko: prepare scripts FORCE
361+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS)
362+%.ko: gcc-plugins prepare scripts FORCE
363 $(cmd_crmodverdir)
364 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
365 $(build)=$(build-dir) $(@:.ko=.o)
366diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
367index da5449e..7418343 100644
368--- a/arch/alpha/include/asm/elf.h
369+++ b/arch/alpha/include/asm/elf.h
370@@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
58c5fc13
MT
371
372 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
373
374+#ifdef CONFIG_PAX_ASLR
375+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
376+
377+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
378+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
379+#endif
380+
381 /* $0 is set by ld.so to a pointer to a function which might be
382 registered using atexit. This provides a mean for the dynamic
383 linker to call DT_FINI functions for shared libraries that have
fe2de317
MT
384diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
385index de98a73..bd4f1f8 100644
386--- a/arch/alpha/include/asm/pgtable.h
387+++ b/arch/alpha/include/asm/pgtable.h
58c5fc13
MT
388@@ -101,6 +101,17 @@ struct vm_area_struct;
389 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
390 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
391 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
392+
393+#ifdef CONFIG_PAX_PAGEEXEC
394+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
395+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
396+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
397+#else
398+# define PAGE_SHARED_NOEXEC PAGE_SHARED
399+# define PAGE_COPY_NOEXEC PAGE_COPY
400+# define PAGE_READONLY_NOEXEC PAGE_READONLY
401+#endif
402+
403 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
404
405 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
fe2de317
MT
406diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
407index 2fd00b7..cfd5069 100644
408--- a/arch/alpha/kernel/module.c
409+++ b/arch/alpha/kernel/module.c
410@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
58c5fc13
MT
411
412 /* The small sections were sorted to the end of the segment.
413 The following should definitely cover them. */
414- gp = (u64)me->module_core + me->core_size - 0x8000;
415+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
416 got = sechdrs[me->arch.gotsecindex].sh_addr;
417
418 for (i = 0; i < n; i++) {
fe2de317
MT
419diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
420index 01e8715..be0e80f 100644
421--- a/arch/alpha/kernel/osf_sys.c
422+++ b/arch/alpha/kernel/osf_sys.c
423@@ -1147,7 +1147,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
57199397
MT
424 /* At this point: (!vma || addr < vma->vm_end). */
425 if (limit - len < addr)
426 return -ENOMEM;
427- if (!vma || addr + len <= vma->vm_start)
428+ if (check_heap_stack_gap(vma, addr, len))
429 return addr;
430 addr = vma->vm_end;
431 vma = vma->vm_next;
fe2de317 432@@ -1183,6 +1183,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
58c5fc13
MT
433 merely specific addresses, but regions of memory -- perhaps
434 this feature should be incorporated into all ports? */
435
436+#ifdef CONFIG_PAX_RANDMMAP
437+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
438+#endif
439+
440 if (addr) {
441 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
442 if (addr != (unsigned long) -ENOMEM)
fe2de317 443@@ -1190,8 +1194,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
58c5fc13
MT
444 }
445
446 /* Next, try allocating at TASK_UNMAPPED_BASE. */
447- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
448- len, limit);
449+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
450+
451 if (addr != (unsigned long) -ENOMEM)
452 return addr;
453
fe2de317
MT
454diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
455index fadd5f8..904e73a 100644
456--- a/arch/alpha/mm/fault.c
457+++ b/arch/alpha/mm/fault.c
458@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
58c5fc13
MT
459 __reload_thread(pcb);
460 }
461
462+#ifdef CONFIG_PAX_PAGEEXEC
463+/*
464+ * PaX: decide what to do with offenders (regs->pc = fault address)
465+ *
466+ * returns 1 when task should be killed
467+ * 2 when patched PLT trampoline was detected
468+ * 3 when unpatched PLT trampoline was detected
469+ */
470+static int pax_handle_fetch_fault(struct pt_regs *regs)
471+{
472+
473+#ifdef CONFIG_PAX_EMUPLT
474+ int err;
475+
476+ do { /* PaX: patched PLT emulation #1 */
477+ unsigned int ldah, ldq, jmp;
478+
479+ err = get_user(ldah, (unsigned int *)regs->pc);
480+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
481+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
482+
483+ if (err)
484+ break;
485+
486+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
487+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
488+ jmp == 0x6BFB0000U)
489+ {
490+ unsigned long r27, addr;
491+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
492+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
493+
494+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
495+ err = get_user(r27, (unsigned long *)addr);
496+ if (err)
497+ break;
498+
499+ regs->r27 = r27;
500+ regs->pc = r27;
501+ return 2;
502+ }
503+ } while (0);
504+
505+ do { /* PaX: patched PLT emulation #2 */
506+ unsigned int ldah, lda, br;
507+
508+ err = get_user(ldah, (unsigned int *)regs->pc);
509+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
510+ err |= get_user(br, (unsigned int *)(regs->pc+8));
511+
512+ if (err)
513+ break;
514+
515+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
516+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
517+ (br & 0xFFE00000U) == 0xC3E00000U)
518+ {
519+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
520+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
521+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
522+
523+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
524+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
525+ return 2;
526+ }
527+ } while (0);
528+
529+ do { /* PaX: unpatched PLT emulation */
530+ unsigned int br;
531+
532+ err = get_user(br, (unsigned int *)regs->pc);
533+
534+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
535+ unsigned int br2, ldq, nop, jmp;
536+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
537+
538+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
539+ err = get_user(br2, (unsigned int *)addr);
540+ err |= get_user(ldq, (unsigned int *)(addr+4));
541+ err |= get_user(nop, (unsigned int *)(addr+8));
542+ err |= get_user(jmp, (unsigned int *)(addr+12));
543+ err |= get_user(resolver, (unsigned long *)(addr+16));
544+
545+ if (err)
546+ break;
547+
548+ if (br2 == 0xC3600000U &&
549+ ldq == 0xA77B000CU &&
550+ nop == 0x47FF041FU &&
551+ jmp == 0x6B7B0000U)
552+ {
553+ regs->r28 = regs->pc+4;
554+ regs->r27 = addr+16;
555+ regs->pc = resolver;
556+ return 3;
557+ }
558+ }
559+ } while (0);
560+#endif
561+
562+ return 1;
563+}
564+
6e9df6a3 565+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
58c5fc13
MT
566+{
567+ unsigned long i;
568+
569+ printk(KERN_ERR "PAX: bytes at PC: ");
570+ for (i = 0; i < 5; i++) {
571+ unsigned int c;
572+ if (get_user(c, (unsigned int *)pc+i))
573+ printk(KERN_CONT "???????? ");
574+ else
575+ printk(KERN_CONT "%08x ", c);
576+ }
577+ printk("\n");
578+}
579+#endif
580
581 /*
582 * This routine handles page faults. It determines the address,
fe2de317 583@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
58c5fc13
MT
584 good_area:
585 si_code = SEGV_ACCERR;
586 if (cause < 0) {
587- if (!(vma->vm_flags & VM_EXEC))
588+ if (!(vma->vm_flags & VM_EXEC)) {
589+
590+#ifdef CONFIG_PAX_PAGEEXEC
591+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
592+ goto bad_area;
593+
594+ up_read(&mm->mmap_sem);
595+ switch (pax_handle_fetch_fault(regs)) {
596+
597+#ifdef CONFIG_PAX_EMUPLT
598+ case 2:
599+ case 3:
600+ return;
601+#endif
602+
603+ }
604+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
605+ do_group_exit(SIGKILL);
606+#else
607 goto bad_area;
608+#endif
609+
610+ }
611 } else if (!cause) {
612 /* Allow reads even for write-only mappings */
613 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
fe2de317
MT
614diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
615index 86976d0..8a57797 100644
616--- a/arch/arm/include/asm/atomic.h
617+++ b/arch/arm/include/asm/atomic.h
618@@ -239,6 +239,14 @@ typedef struct {
619 u64 __aligned(8) counter;
620 } atomic64_t;
621
622+#ifdef CONFIG_PAX_REFCOUNT
623+typedef struct {
624+ u64 __aligned(8) counter;
625+} atomic64_unchecked_t;
626+#else
627+typedef atomic64_t atomic64_unchecked_t;
628+#endif
629+
630 #define ATOMIC64_INIT(i) { (i) }
631
632 static inline u64 atomic64_read(atomic64_t *v)
633diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
634index 0e9ce8d..6ef1e03 100644
635--- a/arch/arm/include/asm/elf.h
636+++ b/arch/arm/include/asm/elf.h
637@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
58c5fc13
MT
638 the loader. We need to make sure that it is out of the way of the program
639 that it will "exec", and that there is sufficient room for the brk. */
640
641-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
642+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
643+
644+#ifdef CONFIG_PAX_ASLR
645+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
646+
647+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
648+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
649+#endif
650
651 /* When the program starts, a1 contains a pointer to a function to be
652 registered with atexit, as per the SVR4 ABI. A value of 0 means we
fe2de317 653@@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
bc901d79
MT
654 extern void elf_set_personality(const struct elf32_hdr *);
655 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
656
657-struct mm_struct;
658-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
659-#define arch_randomize_brk arch_randomize_brk
660-
661 extern int vectors_user_mapping(void);
662 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
663 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
fe2de317
MT
664diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
665index e51b1e8..32a3113 100644
666--- a/arch/arm/include/asm/kmap_types.h
667+++ b/arch/arm/include/asm/kmap_types.h
57199397 668@@ -21,6 +21,7 @@ enum km_type {
df50ba0c 669 KM_L1_CACHE,
58c5fc13 670 KM_L2_CACHE,
57199397 671 KM_KDB,
58c5fc13
MT
672+ KM_CLEARPAGE,
673 KM_TYPE_NR
674 };
675
fe2de317
MT
676diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
677index b293616..96310e5 100644
678--- a/arch/arm/include/asm/uaccess.h
679+++ b/arch/arm/include/asm/uaccess.h
15a11c5b
MT
680@@ -22,6 +22,8 @@
681 #define VERIFY_READ 0
682 #define VERIFY_WRITE 1
66a7e928 683
15a11c5b
MT
684+extern void check_object_size(const void *ptr, unsigned long n, bool to);
685+
686 /*
687 * The exception table consists of pairs of addresses: the first is the
688 * address of an instruction that is allowed to fault, and the second is
689@@ -387,8 +389,23 @@ do { \
66a7e928 690
66a7e928 691
15a11c5b
MT
692 #ifdef CONFIG_MMU
693-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
694-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
695+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
696+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
697+
698+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
699+{
700+ if (!__builtin_constant_p(n))
701+ check_object_size(to, n, false);
702+ return ___copy_from_user(to, from, n);
703+}
704+
705+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
706+{
707+ if (!__builtin_constant_p(n))
708+ check_object_size(from, n, true);
709+ return ___copy_to_user(to, from, n);
710+}
711+
712 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
713 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
714 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
fe2de317 715@@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
58c5fc13
MT
716
717 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
718 {
719+ if ((long)n < 0)
720+ return n;
721+
722 if (access_ok(VERIFY_READ, from, n))
723 n = __copy_from_user(to, from, n);
724 else /* security hole - plug it */
fe2de317 725@@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
58c5fc13
MT
726
727 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
728 {
729+ if ((long)n < 0)
730+ return n;
731+
732 if (access_ok(VERIFY_WRITE, to, n))
733 n = __copy_to_user(to, from, n);
734 return n;
fe2de317
MT
735diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
736index aeef960..2966009 100644
737--- a/arch/arm/kernel/armksyms.c
738+++ b/arch/arm/kernel/armksyms.c
15a11c5b
MT
739@@ -98,8 +98,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
740 #ifdef CONFIG_MMU
741 EXPORT_SYMBOL(copy_page);
742
743-EXPORT_SYMBOL(__copy_from_user);
744-EXPORT_SYMBOL(__copy_to_user);
745+EXPORT_SYMBOL(___copy_from_user);
746+EXPORT_SYMBOL(___copy_to_user);
747 EXPORT_SYMBOL(__clear_user);
748
749 EXPORT_SYMBOL(__get_user_1);
fe2de317
MT
750diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
751index c9d11ea..5078081 100644
752--- a/arch/arm/kernel/process.c
753+++ b/arch/arm/kernel/process.c
bc901d79
MT
754@@ -28,7 +28,6 @@
755 #include <linux/tick.h>
756 #include <linux/utsname.h>
757 #include <linux/uaccess.h>
758-#include <linux/random.h>
759 #include <linux/hw_breakpoint.h>
6e9df6a3 760 #include <linux/cpuidle.h>
bc901d79 761
fe2de317 762@@ -484,12 +483,6 @@ unsigned long get_wchan(struct task_struct *p)
bc901d79
MT
763 return 0;
764 }
765
766-unsigned long arch_randomize_brk(struct mm_struct *mm)
767-{
768- unsigned long range_end = mm->brk + 0x02000000;
769- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
770-}
771-
16454cff 772 #ifdef CONFIG_MMU
bc901d79
MT
773 /*
774 * The vectors page is always readable from user space for the
fe2de317
MT
775diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
776index bc9f9da..c75d826 100644
777--- a/arch/arm/kernel/traps.c
778+++ b/arch/arm/kernel/traps.c
779@@ -257,6 +257,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
66a7e928 780
15a11c5b 781 static DEFINE_SPINLOCK(die_lock);
66a7e928 782
15a11c5b
MT
783+extern void gr_handle_kernel_exploit(void);
784+
785 /*
786 * This function is protected against re-entrancy.
787 */
fe2de317 788@@ -284,6 +286,9 @@ void die(const char *str, struct pt_regs *regs, int err)
15a11c5b
MT
789 panic("Fatal exception in interrupt");
790 if (panic_on_oops)
791 panic("Fatal exception");
792+
793+ gr_handle_kernel_exploit();
794+
795 if (ret != NOTIFY_STOP)
796 do_exit(SIGSEGV);
66a7e928 797 }
fe2de317
MT
798diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
799index 66a477a..bee61d3 100644
800--- a/arch/arm/lib/copy_from_user.S
801+++ b/arch/arm/lib/copy_from_user.S
15a11c5b
MT
802@@ -16,7 +16,7 @@
803 /*
804 * Prototype:
805 *
806- * size_t __copy_from_user(void *to, const void *from, size_t n)
807+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
808 *
809 * Purpose:
810 *
811@@ -84,11 +84,11 @@
66a7e928 812
15a11c5b 813 .text
66a7e928 814
15a11c5b
MT
815-ENTRY(__copy_from_user)
816+ENTRY(___copy_from_user)
66a7e928 817
15a11c5b 818 #include "copy_template.S"
66a7e928 819
15a11c5b
MT
820-ENDPROC(__copy_from_user)
821+ENDPROC(___copy_from_user)
66a7e928 822
15a11c5b
MT
823 .pushsection .fixup,"ax"
824 .align 0
fe2de317
MT
825diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
826index d066df6..df28194 100644
827--- a/arch/arm/lib/copy_to_user.S
828+++ b/arch/arm/lib/copy_to_user.S
15a11c5b
MT
829@@ -16,7 +16,7 @@
830 /*
831 * Prototype:
832 *
833- * size_t __copy_to_user(void *to, const void *from, size_t n)
834+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
835 *
836 * Purpose:
837 *
838@@ -88,11 +88,11 @@
839 .text
57199397 840
15a11c5b
MT
841 ENTRY(__copy_to_user_std)
842-WEAK(__copy_to_user)
843+WEAK(___copy_to_user)
66a7e928 844
15a11c5b 845 #include "copy_template.S"
66a7e928 846
15a11c5b
MT
847-ENDPROC(__copy_to_user)
848+ENDPROC(___copy_to_user)
849 ENDPROC(__copy_to_user_std)
66a7e928 850
15a11c5b 851 .pushsection .fixup,"ax"
fe2de317
MT
852diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
853index d0ece2a..5ae2f39 100644
854--- a/arch/arm/lib/uaccess.S
855+++ b/arch/arm/lib/uaccess.S
15a11c5b 856@@ -20,7 +20,7 @@
66a7e928 857
15a11c5b
MT
858 #define PAGE_SHIFT 12
859
860-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
861+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
862 * Purpose : copy a block to user memory from kernel memory
863 * Params : to - user memory
864 * : from - kernel memory
fe2de317 865@@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May fault
15a11c5b
MT
866 sub r2, r2, ip
867 b .Lc2u_dest_aligned
868
869-ENTRY(__copy_to_user)
870+ENTRY(___copy_to_user)
871 stmfd sp!, {r2, r4 - r7, lr}
872 cmp r2, #4
873 blt .Lc2u_not_enough
fe2de317 874@@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May fault
15a11c5b
MT
875 ldrgtb r3, [r1], #0
876 USER( T(strgtb) r3, [r0], #1) @ May fault
877 b .Lc2u_finished
878-ENDPROC(__copy_to_user)
879+ENDPROC(___copy_to_user)
880
881 .pushsection .fixup,"ax"
882 .align 0
883 9001: ldmfd sp!, {r0, r4 - r7, pc}
884 .popsection
885
886-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
887+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
888 * Purpose : copy a block from user memory to kernel memory
889 * Params : to - kernel memory
890 * : from - user memory
fe2de317 891@@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May fault
15a11c5b
MT
892 sub r2, r2, ip
893 b .Lcfu_dest_aligned
894
895-ENTRY(__copy_from_user)
896+ENTRY(___copy_from_user)
897 stmfd sp!, {r0, r2, r4 - r7, lr}
898 cmp r2, #4
899 blt .Lcfu_not_enough
fe2de317 900@@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May fault
15a11c5b
MT
901 USER( T(ldrgtb) r3, [r1], #1) @ May fault
902 strgtb r3, [r0], #1
903 b .Lcfu_finished
904-ENDPROC(__copy_from_user)
905+ENDPROC(___copy_from_user)
906
907 .pushsection .fixup,"ax"
908 .align 0
fe2de317
MT
909diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
910index 8b9b136..70d5100 100644
911--- a/arch/arm/lib/uaccess_with_memcpy.c
912+++ b/arch/arm/lib/uaccess_with_memcpy.c
15a11c5b 913@@ -103,7 +103,7 @@ out:
66a7e928
MT
914 }
915
15a11c5b
MT
916 unsigned long
917-__copy_to_user(void __user *to, const void *from, unsigned long n)
918+___copy_to_user(void __user *to, const void *from, unsigned long n)
919 {
920 /*
921 * This test is stubbed out of the main function above to keep
fe2de317
MT
922diff --git a/arch/arm/mach-ux500/mbox-db5500.c b/arch/arm/mach-ux500/mbox-db5500.c
923index 2b2d51c..0127490 100644
924--- a/arch/arm/mach-ux500/mbox-db5500.c
925+++ b/arch/arm/mach-ux500/mbox-db5500.c
926@@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct device *dev,
317566c1
MT
927 return sprintf(buf, "0x%X\n", mbox_value);
928 }
929
930-static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
931+static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
932
933 static int mbox_show(struct seq_file *s, void *data)
934 {
fe2de317
MT
935diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
936index 3b5ea68..42fc9af 100644
937--- a/arch/arm/mm/fault.c
938+++ b/arch/arm/mm/fault.c
939@@ -182,6 +182,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
ae4e228f
MT
940 }
941 #endif
942
943+#ifdef CONFIG_PAX_PAGEEXEC
944+ if (fsr & FSR_LNX_PF) {
945+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
946+ do_group_exit(SIGKILL);
947+ }
948+#endif
949+
950 tsk->thread.address = addr;
951 tsk->thread.error_code = fsr;
952 tsk->thread.trap_no = 14;
fe2de317 953@@ -383,6 +390,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
ae4e228f
MT
954 }
955 #endif /* CONFIG_MMU */
956
957+#ifdef CONFIG_PAX_PAGEEXEC
6e9df6a3 958+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
ae4e228f
MT
959+{
960+ long i;
961+
962+ printk(KERN_ERR "PAX: bytes at PC: ");
963+ for (i = 0; i < 20; i++) {
964+ unsigned char c;
965+ if (get_user(c, (__force unsigned char __user *)pc+i))
966+ printk(KERN_CONT "?? ");
967+ else
968+ printk(KERN_CONT "%02x ", c);
969+ }
970+ printk("\n");
971+
972+ printk(KERN_ERR "PAX: bytes at SP-4: ");
973+ for (i = -1; i < 20; i++) {
974+ unsigned long c;
975+ if (get_user(c, (__force unsigned long __user *)sp+i))
976+ printk(KERN_CONT "???????? ");
977+ else
978+ printk(KERN_CONT "%08lx ", c);
979+ }
980+ printk("\n");
981+}
982+#endif
983+
984 /*
985 * First Level Translation Fault Handler
986 *
fe2de317
MT
987diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
988index 74be05f..f605b8c 100644
989--- a/arch/arm/mm/mmap.c
990+++ b/arch/arm/mm/mmap.c
991@@ -65,6 +65,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
58c5fc13
MT
992 if (len > TASK_SIZE)
993 return -ENOMEM;
994
995+#ifdef CONFIG_PAX_RANDMMAP
996+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
997+#endif
998+
999 if (addr) {
1000 if (do_align)
1001 addr = COLOUR_ALIGN(addr, pgoff);
fe2de317 1002@@ -72,15 +76,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
57199397
MT
1003 addr = PAGE_ALIGN(addr);
1004
1005 vma = find_vma(mm, addr);
1006- if (TASK_SIZE - len >= addr &&
1007- (!vma || addr + len <= vma->vm_start))
1008+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
58c5fc13
MT
1009 return addr;
1010 }
1011 if (len > mm->cached_hole_size) {
1012- start_addr = addr = mm->free_area_cache;
1013+ start_addr = addr = mm->free_area_cache;
1014 } else {
1015- start_addr = addr = TASK_UNMAPPED_BASE;
1016- mm->cached_hole_size = 0;
1017+ start_addr = addr = mm->mmap_base;
1018+ mm->cached_hole_size = 0;
1019 }
6892158b 1020 /* 8 bits of randomness in 20 address space bits */
66a7e928
MT
1021 if ((current->flags & PF_RANDOMIZE) &&
1022@@ -100,14 +103,14 @@ full_search:
58c5fc13
MT
1023 * Start a new search - just in case we missed
1024 * some holes.
1025 */
1026- if (start_addr != TASK_UNMAPPED_BASE) {
1027- start_addr = addr = TASK_UNMAPPED_BASE;
1028+ if (start_addr != mm->mmap_base) {
1029+ start_addr = addr = mm->mmap_base;
1030 mm->cached_hole_size = 0;
1031 goto full_search;
1032 }
57199397
MT
1033 return -ENOMEM;
1034 }
1035- if (!vma || addr + len <= vma->vm_start) {
1036+ if (check_heap_stack_gap(vma, addr, len)) {
1037 /*
1038 * Remember the place where we stopped the search:
1039 */
fe2de317
MT
1040diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1041index 3b3159b..425ea94 100644
1042--- a/arch/avr32/include/asm/elf.h
1043+++ b/arch/avr32/include/asm/elf.h
1044@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
58c5fc13
MT
1045 the loader. We need to make sure that it is out of the way of the program
1046 that it will "exec", and that there is sufficient room for the brk. */
1047
1048-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1049+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1050
1051+#ifdef CONFIG_PAX_ASLR
1052+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1053+
1054+#define PAX_DELTA_MMAP_LEN 15
1055+#define PAX_DELTA_STACK_LEN 15
1056+#endif
1057
1058 /* This yields a mask that user programs can use to figure out what
1059 instruction set this CPU supports. This could be done in user space,
fe2de317
MT
1060diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1061index b7f5c68..556135c 100644
1062--- a/arch/avr32/include/asm/kmap_types.h
1063+++ b/arch/avr32/include/asm/kmap_types.h
58c5fc13
MT
1064@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1065 D(11) KM_IRQ1,
1066 D(12) KM_SOFTIRQ0,
1067 D(13) KM_SOFTIRQ1,
1068-D(14) KM_TYPE_NR
1069+D(14) KM_CLEARPAGE,
1070+D(15) KM_TYPE_NR
1071 };
1072
1073 #undef D
fe2de317
MT
1074diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
1075index f7040a1..db9f300 100644
1076--- a/arch/avr32/mm/fault.c
1077+++ b/arch/avr32/mm/fault.c
1078@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
58c5fc13
MT
1079
1080 int exception_trace = 1;
1081
1082+#ifdef CONFIG_PAX_PAGEEXEC
6e9df6a3 1083+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
58c5fc13
MT
1084+{
1085+ unsigned long i;
1086+
1087+ printk(KERN_ERR "PAX: bytes at PC: ");
1088+ for (i = 0; i < 20; i++) {
1089+ unsigned char c;
1090+ if (get_user(c, (unsigned char *)pc+i))
1091+ printk(KERN_CONT "???????? ");
1092+ else
1093+ printk(KERN_CONT "%02x ", c);
1094+ }
1095+ printk("\n");
1096+}
1097+#endif
1098+
1099 /*
1100 * This routine handles page faults. It determines the address and the
1101 * problem, and then passes it off to one of the appropriate routines.
6892158b 1102@@ -156,6 +173,16 @@ bad_area:
58c5fc13
MT
1103 up_read(&mm->mmap_sem);
1104
1105 if (user_mode(regs)) {
1106+
1107+#ifdef CONFIG_PAX_PAGEEXEC
1108+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1109+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1110+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1111+ do_group_exit(SIGKILL);
1112+ }
1113+ }
1114+#endif
1115+
1116 if (exception_trace && printk_ratelimit())
1117 printk("%s%s[%d]: segfault at %08lx pc %08lx "
1118 "sp %08lx ecr %lu\n",
fe2de317
MT
1119diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
1120index f8e16b2..c73ff79 100644
1121--- a/arch/frv/include/asm/kmap_types.h
1122+++ b/arch/frv/include/asm/kmap_types.h
58c5fc13
MT
1123@@ -23,6 +23,7 @@ enum km_type {
1124 KM_IRQ1,
1125 KM_SOFTIRQ0,
1126 KM_SOFTIRQ1,
1127+ KM_CLEARPAGE,
1128 KM_TYPE_NR
1129 };
1130
fe2de317
MT
1131diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
1132index 385fd30..6c3d97e 100644
1133--- a/arch/frv/mm/elf-fdpic.c
1134+++ b/arch/frv/mm/elf-fdpic.c
1135@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
57199397
MT
1136 if (addr) {
1137 addr = PAGE_ALIGN(addr);
1138 vma = find_vma(current->mm, addr);
1139- if (TASK_SIZE - len >= addr &&
1140- (!vma || addr + len <= vma->vm_start))
1141+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1142 goto success;
1143 }
1144
fe2de317 1145@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
57199397
MT
1146 for (; vma; vma = vma->vm_next) {
1147 if (addr > limit)
1148 break;
1149- if (addr + len <= vma->vm_start)
1150+ if (check_heap_stack_gap(vma, addr, len))
1151 goto success;
1152 addr = vma->vm_end;
1153 }
fe2de317 1154@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
57199397
MT
1155 for (; vma; vma = vma->vm_next) {
1156 if (addr > limit)
1157 break;
1158- if (addr + len <= vma->vm_start)
1159+ if (check_heap_stack_gap(vma, addr, len))
1160 goto success;
1161 addr = vma->vm_end;
1162 }
fe2de317
MT
1163diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
1164index b5298eb..67c6e62 100644
1165--- a/arch/ia64/include/asm/elf.h
1166+++ b/arch/ia64/include/asm/elf.h
ae4e228f 1167@@ -42,6 +42,13 @@
58c5fc13
MT
1168 */
1169 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
1170
1171+#ifdef CONFIG_PAX_ASLR
1172+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1173+
1174+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1175+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1176+#endif
1177+
1178 #define PT_IA_64_UNWIND 0x70000001
1179
1180 /* IA-64 relocations: */
fe2de317
MT
1181diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
1182index 1a97af3..7529d31 100644
1183--- a/arch/ia64/include/asm/pgtable.h
1184+++ b/arch/ia64/include/asm/pgtable.h
57199397
MT
1185@@ -12,7 +12,7 @@
1186 * David Mosberger-Tang <davidm@hpl.hp.com>
1187 */
1188
1189-
1190+#include <linux/const.h>
1191 #include <asm/mman.h>
1192 #include <asm/page.h>
1193 #include <asm/processor.h>
58c5fc13
MT
1194@@ -143,6 +143,17 @@
1195 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1196 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1197 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1198+
1199+#ifdef CONFIG_PAX_PAGEEXEC
1200+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1201+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1202+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1203+#else
1204+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1205+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1206+# define PAGE_COPY_NOEXEC PAGE_COPY
1207+#endif
1208+
1209 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1210 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1211 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
fe2de317
MT
1212diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
1213index b77768d..e0795eb 100644
1214--- a/arch/ia64/include/asm/spinlock.h
1215+++ b/arch/ia64/include/asm/spinlock.h
1216@@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
317566c1
MT
1217 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1218
1219 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1220- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1221+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1222 }
1223
1224 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
fe2de317
MT
1225diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
1226index 449c8c0..432a3d2 100644
1227--- a/arch/ia64/include/asm/uaccess.h
1228+++ b/arch/ia64/include/asm/uaccess.h
1229@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
58c5fc13
MT
1230 const void *__cu_from = (from); \
1231 long __cu_len = (n); \
1232 \
1233- if (__access_ok(__cu_to, __cu_len, get_fs())) \
ae4e228f 1234+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
58c5fc13
MT
1235 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1236 __cu_len; \
1237 })
fe2de317 1238@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
58c5fc13
MT
1239 long __cu_len = (n); \
1240 \
1241 __chk_user_ptr(__cu_from); \
1242- if (__access_ok(__cu_from, __cu_len, get_fs())) \
ae4e228f 1243+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
58c5fc13
MT
1244 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1245 __cu_len; \
1246 })
fe2de317
MT
1247diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
1248index 24603be..948052d 100644
1249--- a/arch/ia64/kernel/module.c
1250+++ b/arch/ia64/kernel/module.c
6e9df6a3 1251@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
58c5fc13
MT
1252 void
1253 module_free (struct module *mod, void *module_region)
1254 {
1255- if (mod && mod->arch.init_unw_table &&
1256- module_region == mod->module_init) {
1257+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1258 unw_remove_unwind_table(mod->arch.init_unw_table);
1259 mod->arch.init_unw_table = NULL;
1260 }
fe2de317 1261@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
58c5fc13
MT
1262 }
1263
1264 static inline int
1265+in_init_rx (const struct module *mod, uint64_t addr)
1266+{
1267+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1268+}
1269+
1270+static inline int
1271+in_init_rw (const struct module *mod, uint64_t addr)
1272+{
1273+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1274+}
1275+
1276+static inline int
1277 in_init (const struct module *mod, uint64_t addr)
1278 {
1279- return addr - (uint64_t) mod->module_init < mod->init_size;
1280+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1281+}
1282+
1283+static inline int
1284+in_core_rx (const struct module *mod, uint64_t addr)
1285+{
1286+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1287+}
1288+
1289+static inline int
1290+in_core_rw (const struct module *mod, uint64_t addr)
1291+{
1292+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1293 }
1294
1295 static inline int
1296 in_core (const struct module *mod, uint64_t addr)
1297 {
1298- return addr - (uint64_t) mod->module_core < mod->core_size;
1299+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1300 }
1301
1302 static inline int
fe2de317 1303@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
58c5fc13
MT
1304 break;
1305
1306 case RV_BDREL:
1307- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1308+ if (in_init_rx(mod, val))
1309+ val -= (uint64_t) mod->module_init_rx;
1310+ else if (in_init_rw(mod, val))
1311+ val -= (uint64_t) mod->module_init_rw;
1312+ else if (in_core_rx(mod, val))
1313+ val -= (uint64_t) mod->module_core_rx;
1314+ else if (in_core_rw(mod, val))
1315+ val -= (uint64_t) mod->module_core_rw;
1316 break;
1317
1318 case RV_LTV:
fe2de317 1319@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
58c5fc13
MT
1320 * addresses have been selected...
1321 */
1322 uint64_t gp;
1323- if (mod->core_size > MAX_LTOFF)
1324+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1325 /*
1326 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1327 * at the end of the module.
1328 */
1329- gp = mod->core_size - MAX_LTOFF / 2;
1330+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1331 else
1332- gp = mod->core_size / 2;
1333- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1334+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1335+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1336 mod->arch.gp = gp;
1337 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1338 }
fe2de317
MT
1339diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
1340index 609d500..7dde2a8 100644
1341--- a/arch/ia64/kernel/sys_ia64.c
1342+++ b/arch/ia64/kernel/sys_ia64.c
1343@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
58c5fc13
MT
1344 if (REGION_NUMBER(addr) == RGN_HPAGE)
1345 addr = 0;
1346 #endif
1347+
1348+#ifdef CONFIG_PAX_RANDMMAP
1349+ if (mm->pax_flags & MF_PAX_RANDMMAP)
1350+ addr = mm->free_area_cache;
1351+ else
1352+#endif
1353+
1354 if (!addr)
1355 addr = mm->free_area_cache;
1356
fe2de317 1357@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
58c5fc13
MT
1358 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1359 /* At this point: (!vma || addr < vma->vm_end). */
1360 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1361- if (start_addr != TASK_UNMAPPED_BASE) {
1362+ if (start_addr != mm->mmap_base) {
1363 /* Start a new search --- just in case we missed some holes. */
1364- addr = TASK_UNMAPPED_BASE;
1365+ addr = mm->mmap_base;
1366 goto full_search;
1367 }
1368 return -ENOMEM;
57199397
MT
1369 }
1370- if (!vma || addr + len <= vma->vm_start) {
1371+ if (check_heap_stack_gap(vma, addr, len)) {
1372 /* Remember the address where we stopped this search: */
1373 mm->free_area_cache = addr + len;
1374 return addr;
fe2de317
MT
1375diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
1376index 53c0ba0..2accdde 100644
1377--- a/arch/ia64/kernel/vmlinux.lds.S
1378+++ b/arch/ia64/kernel/vmlinux.lds.S
6892158b
MT
1379@@ -199,7 +199,7 @@ SECTIONS {
1380 /* Per-cpu data: */
1381 . = ALIGN(PERCPU_PAGE_SIZE);
66a7e928 1382 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
6892158b
MT
1383- __phys_per_cpu_start = __per_cpu_load;
1384+ __phys_per_cpu_start = per_cpu_load;
1385 /*
1386 * ensure percpu data fits
1387 * into percpu page size
fe2de317
MT
1388diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
1389index 20b3593..1ce77f0 100644
1390--- a/arch/ia64/mm/fault.c
1391+++ b/arch/ia64/mm/fault.c
1392@@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned long address)
58c5fc13
MT
1393 return pte_present(pte);
1394 }
1395
1396+#ifdef CONFIG_PAX_PAGEEXEC
6e9df6a3 1397+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
58c5fc13
MT
1398+{
1399+ unsigned long i;
1400+
1401+ printk(KERN_ERR "PAX: bytes at PC: ");
1402+ for (i = 0; i < 8; i++) {
1403+ unsigned int c;
1404+ if (get_user(c, (unsigned int *)pc+i))
1405+ printk(KERN_CONT "???????? ");
1406+ else
1407+ printk(KERN_CONT "%08x ", c);
1408+ }
1409+ printk("\n");
1410+}
1411+#endif
1412+
1413 void __kprobes
1414 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1415 {
fe2de317 1416@@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
58c5fc13
MT
1417 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1418 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1419
1420- if ((vma->vm_flags & mask) != mask)
1421+ if ((vma->vm_flags & mask) != mask) {
1422+
1423+#ifdef CONFIG_PAX_PAGEEXEC
1424+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1425+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1426+ goto bad_area;
1427+
1428+ up_read(&mm->mmap_sem);
1429+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1430+ do_group_exit(SIGKILL);
1431+ }
1432+#endif
1433+
1434 goto bad_area;
1435
1436+ }
1437+
58c5fc13
MT
1438 /*
1439 * If for any reason at all we couldn't handle the fault, make
57199397 1440 * sure we exit gracefully rather than endlessly redo the
fe2de317
MT
1441diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
1442index 5ca674b..e0e1b70 100644
1443--- a/arch/ia64/mm/hugetlbpage.c
1444+++ b/arch/ia64/mm/hugetlbpage.c
1445@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
57199397
MT
1446 /* At this point: (!vmm || addr < vmm->vm_end). */
1447 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1448 return -ENOMEM;
1449- if (!vmm || (addr + len) <= vmm->vm_start)
1450+ if (check_heap_stack_gap(vmm, addr, len))
1451 return addr;
1452 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1453 }
fe2de317
MT
1454diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
1455index 00cb0e2..2ad8024 100644
1456--- a/arch/ia64/mm/init.c
1457+++ b/arch/ia64/mm/init.c
15a11c5b 1458@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
58c5fc13
MT
1459 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1460 vma->vm_end = vma->vm_start + PAGE_SIZE;
1461 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1462+
1463+#ifdef CONFIG_PAX_PAGEEXEC
1464+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1465+ vma->vm_flags &= ~VM_EXEC;
1466+
1467+#ifdef CONFIG_PAX_MPROTECT
1468+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
1469+ vma->vm_flags &= ~VM_MAYEXEC;
1470+#endif
1471+
1472+ }
1473+#endif
1474+
1475 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1476 down_write(&current->mm->mmap_sem);
1477 if (insert_vm_struct(current->mm, vma)) {
fe2de317
MT
1478diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
1479index 82abd15..d95ae5d 100644
1480--- a/arch/m32r/lib/usercopy.c
1481+++ b/arch/m32r/lib/usercopy.c
58c5fc13
MT
1482@@ -14,6 +14,9 @@
1483 unsigned long
1484 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1485 {
1486+ if ((long)n < 0)
1487+ return n;
1488+
1489 prefetch(from);
1490 if (access_ok(VERIFY_WRITE, to, n))
1491 __copy_user(to,from,n);
fe2de317 1492@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
58c5fc13
MT
1493 unsigned long
1494 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1495 {
1496+ if ((long)n < 0)
1497+ return n;
1498+
1499 prefetchw(to);
1500 if (access_ok(VERIFY_READ, from, n))
1501 __copy_user_zeroing(to,from,n);
fe2de317
MT
1502diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
1503index 455c0ac..ad65fbe 100644
1504--- a/arch/mips/include/asm/elf.h
1505+++ b/arch/mips/include/asm/elf.h
bc901d79 1506@@ -372,13 +372,16 @@ extern const char *__elf_platform;
58c5fc13
MT
1507 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1508 #endif
1509
1510+#ifdef CONFIG_PAX_ASLR
66a7e928 1511+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
58c5fc13 1512+
66a7e928
MT
1513+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1514+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
58c5fc13
MT
1515+#endif
1516+
df50ba0c
MT
1517 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1518 struct linux_binprm;
1519 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
bc901d79
MT
1520 int uses_interp);
1521
1522-struct mm_struct;
1523-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1524-#define arch_randomize_brk arch_randomize_brk
1525-
1526 #endif /* _ASM_ELF_H */
fe2de317
MT
1527diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
1528index e59cd1a..8e329d6 100644
1529--- a/arch/mips/include/asm/page.h
1530+++ b/arch/mips/include/asm/page.h
1531@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
58c5fc13
MT
1532 #ifdef CONFIG_CPU_MIPS32
1533 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1534 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1535- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1536+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1537 #else
1538 typedef struct { unsigned long long pte; } pte_t;
1539 #define pte_val(x) ((x).pte)
fe2de317
MT
1540diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
1541index 6018c80..7c37203 100644
1542--- a/arch/mips/include/asm/system.h
1543+++ b/arch/mips/include/asm/system.h
71d190be 1544@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
58c5fc13
MT
1545 */
1546 #define __ARCH_WANT_UNLOCKED_CTXSW
1547
1548-extern unsigned long arch_align_stack(unsigned long sp);
71d190be 1549+#define arch_align_stack(x) ((x) & ~0xfUL)
58c5fc13
MT
1550
1551 #endif /* _ASM_SYSTEM_H */
fe2de317
MT
1552diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
1553index 9fdd8bc..4bd7f1a 100644
1554--- a/arch/mips/kernel/binfmt_elfn32.c
1555+++ b/arch/mips/kernel/binfmt_elfn32.c
1556@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
58c5fc13
MT
1557 #undef ELF_ET_DYN_BASE
1558 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1559
1560+#ifdef CONFIG_PAX_ASLR
66a7e928 1561+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
58c5fc13 1562+
66a7e928
MT
1563+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1564+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
58c5fc13
MT
1565+#endif
1566+
1567 #include <asm/processor.h>
1568 #include <linux/module.h>
1569 #include <linux/elfcore.h>
fe2de317
MT
1570diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
1571index ff44823..97f8906 100644
1572--- a/arch/mips/kernel/binfmt_elfo32.c
1573+++ b/arch/mips/kernel/binfmt_elfo32.c
1574@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
58c5fc13
MT
1575 #undef ELF_ET_DYN_BASE
1576 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1577
1578+#ifdef CONFIG_PAX_ASLR
66a7e928 1579+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
58c5fc13 1580+
66a7e928
MT
1581+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1582+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
58c5fc13
MT
1583+#endif
1584+
1585 #include <asm/processor.h>
1586
1587 /*
fe2de317
MT
1588diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
1589index b30cb25..454c0a9 100644
1590--- a/arch/mips/kernel/process.c
1591+++ b/arch/mips/kernel/process.c
1592@@ -481,15 +481,3 @@ unsigned long get_wchan(struct task_struct *task)
58c5fc13
MT
1593 out:
1594 return pc;
1595 }
1596-
1597-/*
1598- * Don't forget that the stack pointer must be aligned on a 8 bytes
1599- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1600- */
1601-unsigned long arch_align_stack(unsigned long sp)
1602-{
1603- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1604- sp -= get_random_int() & ~PAGE_MASK;
1605-
1606- return sp & ALMASK;
1607-}
fe2de317
MT
1608diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
1609index 937cf33..adb39bb 100644
1610--- a/arch/mips/mm/fault.c
1611+++ b/arch/mips/mm/fault.c
15a11c5b
MT
1612@@ -28,6 +28,23 @@
1613 #include <asm/highmem.h> /* For VMALLOC_END */
1614 #include <linux/kdebug.h>
1615
1616+#ifdef CONFIG_PAX_PAGEEXEC
6e9df6a3 1617+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
15a11c5b
MT
1618+{
1619+ unsigned long i;
1620+
1621+ printk(KERN_ERR "PAX: bytes at PC: ");
1622+ for (i = 0; i < 5; i++) {
1623+ unsigned int c;
1624+ if (get_user(c, (unsigned int *)pc+i))
1625+ printk(KERN_CONT "???????? ");
1626+ else
1627+ printk(KERN_CONT "%08x ", c);
1628+ }
1629+ printk("\n");
1630+}
1631+#endif
1632+
1633 /*
1634 * This routine handles page faults. It determines the address,
1635 * and the problem, and then passes it off to one of the appropriate
fe2de317
MT
1636diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
1637index 302d779..7d35bf8 100644
1638--- a/arch/mips/mm/mmap.c
1639+++ b/arch/mips/mm/mmap.c
1640@@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
58c5fc13 1641 do_color_align = 1;
6e9df6a3
MT
1642
1643 /* requesting a specific address */
58c5fc13
MT
1644+
1645+#ifdef CONFIG_PAX_RANDMMAP
1646+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1647+#endif
1648+
1649 if (addr) {
1650 if (do_color_align)
1651 addr = COLOUR_ALIGN(addr, pgoff);
fe2de317 1652@@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
57199397 1653 addr = PAGE_ALIGN(addr);
6e9df6a3
MT
1654
1655 vma = find_vma(mm, addr);
15a11c5b 1656- if (TASK_SIZE - len >= addr &&
6e9df6a3 1657- (!vma || addr + len <= vma->vm_start))
15a11c5b 1658+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
58c5fc13
MT
1659 return addr;
1660 }
6e9df6a3 1661
fe2de317 1662@@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6e9df6a3
MT
1663 /* At this point: (!vma || addr < vma->vm_end). */
1664 if (TASK_SIZE - len < addr)
1665 return -ENOMEM;
1666- if (!vma || addr + len <= vma->vm_start)
1667+ if (check_heap_stack_gap(vmm, addr, len))
1668 return addr;
1669 addr = vma->vm_end;
1670 if (do_color_align)
fe2de317 1671@@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6e9df6a3
MT
1672 /* make sure it can fit in the remaining address space */
1673 if (likely(addr > len)) {
1674 vma = find_vma(mm, addr - len);
1675- if (!vma || addr <= vma->vm_start) {
1676+ if (check_heap_stack_gap(vmm, addr - len, len))
1677 /* cache the address as a hint for next time */
1678 return mm->free_area_cache = addr - len;
1679 }
fe2de317 1680@@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6e9df6a3
MT
1681 * return with success:
1682 */
1683 vma = find_vma(mm, addr);
1684- if (likely(!vma || addr + len <= vma->vm_start)) {
1685+ if (check_heap_stack_gap(vmm, addr, len)) {
1686 /* cache the address as a hint for next time */
1687 return mm->free_area_cache = addr;
1688 }
fe2de317 1689@@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6e9df6a3
MT
1690 mm->unmap_area = arch_unmap_area_topdown;
1691 }
bc901d79 1692 }
15a11c5b 1693-
66a7e928
MT
1694-static inline unsigned long brk_rnd(void)
1695-{
1696- unsigned long rnd = get_random_int();
1697-
1698- rnd = rnd << PAGE_SHIFT;
1699- /* 8MB for 32bit, 256MB for 64bit */
1700- if (TASK_IS_32BIT_ADDR)
1701- rnd = rnd & 0x7ffffful;
1702- else
1703- rnd = rnd & 0xffffffful;
1704-
1705- return rnd;
1706-}
1707-
bc901d79
MT
1708-unsigned long arch_randomize_brk(struct mm_struct *mm)
1709-{
1710- unsigned long base = mm->brk;
1711- unsigned long ret;
1712-
1713- ret = PAGE_ALIGN(base + brk_rnd());
1714-
1715- if (ret < mm->brk)
1716- return mm->brk;
1717-
1718- return ret;
1719-}
fe2de317
MT
1720diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
1721index 19f6cb1..6c78cf2 100644
1722--- a/arch/parisc/include/asm/elf.h
1723+++ b/arch/parisc/include/asm/elf.h
1724@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
58c5fc13
MT
1725
1726 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1727
1728+#ifdef CONFIG_PAX_ASLR
1729+#define PAX_ELF_ET_DYN_BASE 0x10000UL
1730+
1731+#define PAX_DELTA_MMAP_LEN 16
1732+#define PAX_DELTA_STACK_LEN 16
1733+#endif
1734+
1735 /* This yields a mask that user programs can use to figure out what
1736 instruction set this CPU supports. This could be done in user space,
1737 but it's not easy, and we've already done it here. */
fe2de317
MT
1738diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
1739index 22dadeb..f6c2be4 100644
1740--- a/arch/parisc/include/asm/pgtable.h
1741+++ b/arch/parisc/include/asm/pgtable.h
15a11c5b 1742@@ -210,6 +210,17 @@ struct vm_area_struct;
58c5fc13
MT
1743 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1744 #define PAGE_COPY PAGE_EXECREAD
1745 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1746+
1747+#ifdef CONFIG_PAX_PAGEEXEC
1748+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1749+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1750+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1751+#else
1752+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1753+# define PAGE_COPY_NOEXEC PAGE_COPY
1754+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1755+#endif
1756+
1757 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
15a11c5b
MT
1758 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
1759 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
fe2de317
MT
1760diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
1761index 5e34ccf..672bc9c 100644
1762--- a/arch/parisc/kernel/module.c
1763+++ b/arch/parisc/kernel/module.c
15a11c5b 1764@@ -98,16 +98,38 @@
58c5fc13
MT
1765
1766 /* three functions to determine where in the module core
1767 * or init pieces the location is */
1768+static inline int in_init_rx(struct module *me, void *loc)
1769+{
1770+ return (loc >= me->module_init_rx &&
1771+ loc < (me->module_init_rx + me->init_size_rx));
1772+}
1773+
1774+static inline int in_init_rw(struct module *me, void *loc)
1775+{
1776+ return (loc >= me->module_init_rw &&
1777+ loc < (me->module_init_rw + me->init_size_rw));
1778+}
1779+
1780 static inline int in_init(struct module *me, void *loc)
1781 {
1782- return (loc >= me->module_init &&
1783- loc <= (me->module_init + me->init_size));
1784+ return in_init_rx(me, loc) || in_init_rw(me, loc);
1785+}
1786+
1787+static inline int in_core_rx(struct module *me, void *loc)
1788+{
1789+ return (loc >= me->module_core_rx &&
1790+ loc < (me->module_core_rx + me->core_size_rx));
1791+}
1792+
1793+static inline int in_core_rw(struct module *me, void *loc)
1794+{
1795+ return (loc >= me->module_core_rw &&
1796+ loc < (me->module_core_rw + me->core_size_rw));
1797 }
1798
1799 static inline int in_core(struct module *me, void *loc)
1800 {
1801- return (loc >= me->module_core &&
1802- loc <= (me->module_core + me->core_size));
1803+ return in_core_rx(me, loc) || in_core_rw(me, loc);
1804 }
1805
1806 static inline int in_local(struct module *me, void *loc)
fe2de317 1807@@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
58c5fc13
MT
1808 }
1809
1810 /* align things a bit */
1811- me->core_size = ALIGN(me->core_size, 16);
1812- me->arch.got_offset = me->core_size;
1813- me->core_size += gots * sizeof(struct got_entry);
58c5fc13
MT
1814+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1815+ me->arch.got_offset = me->core_size_rw;
1816+ me->core_size_rw += gots * sizeof(struct got_entry);
fe2de317
MT
1817
1818- me->core_size = ALIGN(me->core_size, 16);
1819- me->arch.fdesc_offset = me->core_size;
1820- me->core_size += fdescs * sizeof(Elf_Fdesc);
58c5fc13
MT
1821+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1822+ me->arch.fdesc_offset = me->core_size_rw;
1823+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1824
1825 me->arch.got_max = gots;
1826 me->arch.fdesc_max = fdescs;
fe2de317 1827@@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
58c5fc13
MT
1828
1829 BUG_ON(value == 0);
1830
1831- got = me->module_core + me->arch.got_offset;
1832+ got = me->module_core_rw + me->arch.got_offset;
1833 for (i = 0; got[i].addr; i++)
1834 if (got[i].addr == value)
1835 goto out;
fe2de317 1836@@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
58c5fc13
MT
1837 #ifdef CONFIG_64BIT
1838 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1839 {
1840- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1841+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1842
1843 if (!value) {
1844 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
fe2de317 1845@@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
58c5fc13
MT
1846
1847 /* Create new one */
1848 fdesc->addr = value;
1849- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1850+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1851 return (Elf_Addr)fdesc;
1852 }
1853 #endif /* CONFIG_64BIT */
6e9df6a3 1854@@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
58c5fc13
MT
1855
1856 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1857 end = table + sechdrs[me->arch.unwind_section].sh_size;
1858- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1859+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1860
1861 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1862 me->arch.unwind_section, table, end, gp);
fe2de317
MT
1863diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
1864index c9b9322..02d8940 100644
1865--- a/arch/parisc/kernel/sys_parisc.c
1866+++ b/arch/parisc/kernel/sys_parisc.c
1867@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
57199397
MT
1868 /* At this point: (!vma || addr < vma->vm_end). */
1869 if (TASK_SIZE - len < addr)
1870 return -ENOMEM;
1871- if (!vma || addr + len <= vma->vm_start)
1872+ if (check_heap_stack_gap(vma, addr, len))
1873 return addr;
1874 addr = vma->vm_end;
1875 }
fe2de317 1876@@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
57199397
MT
1877 /* At this point: (!vma || addr < vma->vm_end). */
1878 if (TASK_SIZE - len < addr)
1879 return -ENOMEM;
1880- if (!vma || addr + len <= vma->vm_start)
1881+ if (check_heap_stack_gap(vma, addr, len))
1882 return addr;
1883 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1884 if (addr < vma->vm_end) /* handle wraparound */
fe2de317 1885@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
58c5fc13
MT
1886 if (flags & MAP_FIXED)
1887 return addr;
1888 if (!addr)
1889- addr = TASK_UNMAPPED_BASE;
1890+ addr = current->mm->mmap_base;
1891
1892 if (filp) {
1893 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
fe2de317
MT
1894diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
1895index f19e660..414fe24 100644
1896--- a/arch/parisc/kernel/traps.c
1897+++ b/arch/parisc/kernel/traps.c
1898@@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
58c5fc13
MT
1899
1900 down_read(&current->mm->mmap_sem);
1901 vma = find_vma(current->mm,regs->iaoq[0]);
1902- if (vma && (regs->iaoq[0] >= vma->vm_start)
1903- && (vma->vm_flags & VM_EXEC)) {
1904-
1905+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1906 fault_address = regs->iaoq[0];
1907 fault_space = regs->iasq[0];
1908
fe2de317
MT
1909diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
1910index 18162ce..94de376 100644
1911--- a/arch/parisc/mm/fault.c
1912+++ b/arch/parisc/mm/fault.c
58c5fc13
MT
1913@@ -15,6 +15,7 @@
1914 #include <linux/sched.h>
1915 #include <linux/interrupt.h>
1916 #include <linux/module.h>
1917+#include <linux/unistd.h>
1918
1919 #include <asm/uaccess.h>
1920 #include <asm/traps.h>
fe2de317 1921@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
58c5fc13
MT
1922 static unsigned long
1923 parisc_acctyp(unsigned long code, unsigned int inst)
1924 {
1925- if (code == 6 || code == 16)
1926+ if (code == 6 || code == 7 || code == 16)
1927 return VM_EXEC;
1928
1929 switch (inst & 0xf0000000) {
fe2de317 1930@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
58c5fc13
MT
1931 }
1932 #endif
1933
1934+#ifdef CONFIG_PAX_PAGEEXEC
1935+/*
1936+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1937+ *
1938+ * returns 1 when task should be killed
1939+ * 2 when rt_sigreturn trampoline was detected
1940+ * 3 when unpatched PLT trampoline was detected
1941+ */
1942+static int pax_handle_fetch_fault(struct pt_regs *regs)
1943+{
1944+
1945+#ifdef CONFIG_PAX_EMUPLT
1946+ int err;
1947+
1948+ do { /* PaX: unpatched PLT emulation */
1949+ unsigned int bl, depwi;
1950+
1951+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1952+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1953+
1954+ if (err)
1955+ break;
1956+
1957+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1958+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1959+
1960+ err = get_user(ldw, (unsigned int *)addr);
1961+ err |= get_user(bv, (unsigned int *)(addr+4));
1962+ err |= get_user(ldw2, (unsigned int *)(addr+8));
1963+
1964+ if (err)
1965+ break;
1966+
1967+ if (ldw == 0x0E801096U &&
1968+ bv == 0xEAC0C000U &&
1969+ ldw2 == 0x0E881095U)
1970+ {
1971+ unsigned int resolver, map;
1972+
1973+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1974+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1975+ if (err)
1976+ break;
1977+
1978+ regs->gr[20] = instruction_pointer(regs)+8;
1979+ regs->gr[21] = map;
1980+ regs->gr[22] = resolver;
1981+ regs->iaoq[0] = resolver | 3UL;
1982+ regs->iaoq[1] = regs->iaoq[0] + 4;
1983+ return 3;
1984+ }
1985+ }
1986+ } while (0);
1987+#endif
1988+
1989+#ifdef CONFIG_PAX_EMUTRAMP
1990+
1991+#ifndef CONFIG_PAX_EMUSIGRT
1992+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1993+ return 1;
1994+#endif
1995+
1996+ do { /* PaX: rt_sigreturn emulation */
1997+ unsigned int ldi1, ldi2, bel, nop;
1998+
1999+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
2000+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
2001+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
2002+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
2003+
2004+ if (err)
2005+ break;
2006+
2007+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
2008+ ldi2 == 0x3414015AU &&
2009+ bel == 0xE4008200U &&
2010+ nop == 0x08000240U)
2011+ {
2012+ regs->gr[25] = (ldi1 & 2) >> 1;
2013+ regs->gr[20] = __NR_rt_sigreturn;
2014+ regs->gr[31] = regs->iaoq[1] + 16;
2015+ regs->sr[0] = regs->iasq[1];
2016+ regs->iaoq[0] = 0x100UL;
2017+ regs->iaoq[1] = regs->iaoq[0] + 4;
2018+ regs->iasq[0] = regs->sr[2];
2019+ regs->iasq[1] = regs->sr[2];
2020+ return 2;
2021+ }
2022+ } while (0);
2023+#endif
2024+
2025+ return 1;
2026+}
2027+
6e9df6a3 2028+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
58c5fc13
MT
2029+{
2030+ unsigned long i;
2031+
2032+ printk(KERN_ERR "PAX: bytes at PC: ");
2033+ for (i = 0; i < 5; i++) {
2034+ unsigned int c;
2035+ if (get_user(c, (unsigned int *)pc+i))
2036+ printk(KERN_CONT "???????? ");
2037+ else
2038+ printk(KERN_CONT "%08x ", c);
2039+ }
2040+ printk("\n");
2041+}
2042+#endif
2043+
2044 int fixup_exception(struct pt_regs *regs)
2045 {
2046 const struct exception_table_entry *fix;
2047@@ -192,8 +303,33 @@ good_area:
2048
2049 acc_type = parisc_acctyp(code,regs->iir);
2050
2051- if ((vma->vm_flags & acc_type) != acc_type)
2052+ if ((vma->vm_flags & acc_type) != acc_type) {
2053+
2054+#ifdef CONFIG_PAX_PAGEEXEC
2055+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
2056+ (address & ~3UL) == instruction_pointer(regs))
2057+ {
2058+ up_read(&mm->mmap_sem);
2059+ switch (pax_handle_fetch_fault(regs)) {
2060+
2061+#ifdef CONFIG_PAX_EMUPLT
2062+ case 3:
2063+ return;
2064+#endif
2065+
2066+#ifdef CONFIG_PAX_EMUTRAMP
2067+ case 2:
2068+ return;
2069+#endif
2070+
2071+ }
2072+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
2073+ do_group_exit(SIGKILL);
2074+ }
2075+#endif
2076+
2077 goto bad_area;
2078+ }
2079
2080 /*
2081 * If for any reason at all we couldn't handle the fault, make
fe2de317
MT
2082diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
2083index 3bf9cca..e7457d0 100644
2084--- a/arch/powerpc/include/asm/elf.h
2085+++ b/arch/powerpc/include/asm/elf.h
2086@@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
58c5fc13
MT
2087 the loader. We need to make sure that it is out of the way of the program
2088 that it will "exec", and that there is sufficient room for the brk. */
2089
2090-extern unsigned long randomize_et_dyn(unsigned long base);
2091-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2092+#define ELF_ET_DYN_BASE (0x20000000)
2093+
2094+#ifdef CONFIG_PAX_ASLR
2095+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2096+
2097+#ifdef __powerpc64__
bc901d79
MT
2098+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
2099+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
58c5fc13
MT
2100+#else
2101+#define PAX_DELTA_MMAP_LEN 15
2102+#define PAX_DELTA_STACK_LEN 15
2103+#endif
2104+#endif
2105
2106 /*
2107 * Our registers are always unsigned longs, whether we're a 32 bit
fe2de317 2108@@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
58c5fc13
MT
2109 (0x7ff >> (PAGE_SHIFT - 12)) : \
2110 (0x3ffff >> (PAGE_SHIFT - 12)))
2111
2112-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2113-#define arch_randomize_brk arch_randomize_brk
2114-
2115 #endif /* __KERNEL__ */
2116
2117 /*
fe2de317
MT
2118diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
2119index bca8fdc..61e9580 100644
2120--- a/arch/powerpc/include/asm/kmap_types.h
2121+++ b/arch/powerpc/include/asm/kmap_types.h
57199397 2122@@ -27,6 +27,7 @@ enum km_type {
58c5fc13
MT
2123 KM_PPC_SYNC_PAGE,
2124 KM_PPC_SYNC_ICACHE,
57199397 2125 KM_KDB,
58c5fc13
MT
2126+ KM_CLEARPAGE,
2127 KM_TYPE_NR
2128 };
2129
fe2de317
MT
2130diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
2131index d4a7f64..451de1c 100644
2132--- a/arch/powerpc/include/asm/mman.h
2133+++ b/arch/powerpc/include/asm/mman.h
2134@@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
15a11c5b
MT
2135 }
2136 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
2137
2138-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
2139+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
2140 {
2141 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
2142 }
fe2de317
MT
2143diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
2144index 2cd664e..1d2e8a7 100644
2145--- a/arch/powerpc/include/asm/page.h
2146+++ b/arch/powerpc/include/asm/page.h
57199397
MT
2147@@ -129,8 +129,9 @@ extern phys_addr_t kernstart_addr;
2148 * and needs to be executable. This means the whole heap ends
2149 * up being executable.
2150 */
2151-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2152- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2153+#define VM_DATA_DEFAULT_FLAGS32 \
2154+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2155+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2156
2157 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2158 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2159@@ -158,6 +159,9 @@ extern phys_addr_t kernstart_addr;
2160 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2161 #endif
2162
2163+#define ktla_ktva(addr) (addr)
2164+#define ktva_ktla(addr) (addr)
2165+
2166 #ifndef __ASSEMBLY__
2167
2168 #undef STRICT_MM_TYPECHECKS
fe2de317
MT
2169diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
2170index 9356262..ea96148 100644
2171--- a/arch/powerpc/include/asm/page_64.h
2172+++ b/arch/powerpc/include/asm/page_64.h
2173@@ -155,15 +155,18 @@ do { \
2174 * stack by default, so in the absence of a PT_GNU_STACK program header
2175 * we turn execute permission off.
2176 */
2177-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2178- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2179+#define VM_STACK_DEFAULT_FLAGS32 \
2180+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2181+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2182
2183 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2184 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2185
2186+#ifndef CONFIG_PAX_PAGEEXEC
2187 #define VM_STACK_DEFAULT_FLAGS \
2188 (is_32bit_task() ? \
2189 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2190+#endif
2191
2192 #include <asm-generic/getorder.h>
2193
2194diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
2195index 88b0bd9..e32bc67 100644
2196--- a/arch/powerpc/include/asm/pgtable.h
2197+++ b/arch/powerpc/include/asm/pgtable.h
317566c1
MT
2198@@ -2,6 +2,7 @@
2199 #define _ASM_POWERPC_PGTABLE_H
2200 #ifdef __KERNEL__
2201
2202+#include <linux/const.h>
2203 #ifndef __ASSEMBLY__
2204 #include <asm/processor.h> /* For TASK_SIZE */
2205 #include <asm/mmu.h>
fe2de317
MT
2206diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
2207index 4aad413..85d86bf 100644
2208--- a/arch/powerpc/include/asm/pte-hash32.h
2209+++ b/arch/powerpc/include/asm/pte-hash32.h
58c5fc13
MT
2210@@ -21,6 +21,7 @@
2211 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2212 #define _PAGE_USER 0x004 /* usermode access allowed */
2213 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
ae4e228f 2214+#define _PAGE_EXEC _PAGE_GUARDED
58c5fc13
MT
2215 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2216 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2217 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
fe2de317
MT
2218diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
2219index 559da19..7e5835c 100644
2220--- a/arch/powerpc/include/asm/reg.h
2221+++ b/arch/powerpc/include/asm/reg.h
6e9df6a3 2222@@ -212,6 +212,7 @@
58c5fc13
MT
2223 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2224 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2225 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2226+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2227 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2228 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2229 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
fe2de317
MT
2230diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
2231index e30a13d..2b7d994 100644
2232--- a/arch/powerpc/include/asm/system.h
2233+++ b/arch/powerpc/include/asm/system.h
2234@@ -530,7 +530,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
bc901d79
MT
2235 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2236 #endif
2237
2238-extern unsigned long arch_align_stack(unsigned long sp);
2239+#define arch_align_stack(x) ((x) & ~0xfUL)
2240
2241 /* Used in very early kernel initialization. */
2242 extern unsigned long reloc_offset(void);
fe2de317
MT
2243diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
2244index bd0fb84..a42a14b 100644
2245--- a/arch/powerpc/include/asm/uaccess.h
2246+++ b/arch/powerpc/include/asm/uaccess.h
efbe55a5
MT
2247@@ -13,6 +13,8 @@
2248 #define VERIFY_READ 0
2249 #define VERIFY_WRITE 1
2250
2251+extern void check_object_size(const void *ptr, unsigned long n, bool to);
2252+
2253 /*
2254 * The fs value determines whether argument validity checking should be
2255 * performed or not. If get_fs() == USER_DS, checking is performed, with
2256@@ -327,52 +329,6 @@ do { \
58c5fc13
MT
2257 extern unsigned long __copy_tofrom_user(void __user *to,
2258 const void __user *from, unsigned long size);
2259
2260-#ifndef __powerpc64__
2261-
2262-static inline unsigned long copy_from_user(void *to,
2263- const void __user *from, unsigned long n)
2264-{
2265- unsigned long over;
2266-
2267- if (access_ok(VERIFY_READ, from, n))
2268- return __copy_tofrom_user((__force void __user *)to, from, n);
2269- if ((unsigned long)from < TASK_SIZE) {
2270- over = (unsigned long)from + n - TASK_SIZE;
2271- return __copy_tofrom_user((__force void __user *)to, from,
2272- n - over) + over;
2273- }
2274- return n;
2275-}
2276-
2277-static inline unsigned long copy_to_user(void __user *to,
2278- const void *from, unsigned long n)
2279-{
2280- unsigned long over;
2281-
2282- if (access_ok(VERIFY_WRITE, to, n))
2283- return __copy_tofrom_user(to, (__force void __user *)from, n);
2284- if ((unsigned long)to < TASK_SIZE) {
2285- over = (unsigned long)to + n - TASK_SIZE;
2286- return __copy_tofrom_user(to, (__force void __user *)from,
2287- n - over) + over;
2288- }
2289- return n;
2290-}
2291-
2292-#else /* __powerpc64__ */
2293-
2294-#define __copy_in_user(to, from, size) \
2295- __copy_tofrom_user((to), (from), (size))
2296-
2297-extern unsigned long copy_from_user(void *to, const void __user *from,
2298- unsigned long n);
2299-extern unsigned long copy_to_user(void __user *to, const void *from,
2300- unsigned long n);
2301-extern unsigned long copy_in_user(void __user *to, const void __user *from,
2302- unsigned long n);
2303-
2304-#endif /* __powerpc64__ */
2305-
2306 static inline unsigned long __copy_from_user_inatomic(void *to,
2307 const void __user *from, unsigned long n)
2308 {
fe2de317 2309@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
58c5fc13
MT
2310 if (ret == 0)
2311 return 0;
2312 }
ae4e228f 2313+
58c5fc13
MT
2314+ if (!__builtin_constant_p(n))
2315+ check_object_size(to, n, false);
2316+
2317 return __copy_tofrom_user((__force void __user *)to, from, n);
2318 }
2319
fe2de317 2320@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
58c5fc13
MT
2321 if (ret == 0)
2322 return 0;
2323 }
ae4e228f 2324+
58c5fc13
MT
2325+ if (!__builtin_constant_p(n))
2326+ check_object_size(from, n, true);
2327+
2328 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2329 }
2330
fe2de317 2331@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
58c5fc13
MT
2332 return __copy_to_user_inatomic(to, from, size);
2333 }
2334
2335+#ifndef __powerpc64__
2336+
2337+static inline unsigned long __must_check copy_from_user(void *to,
2338+ const void __user *from, unsigned long n)
2339+{
2340+ unsigned long over;
2341+
ae4e228f 2342+ if ((long)n < 0)
58c5fc13
MT
2343+ return n;
2344+
2345+ if (access_ok(VERIFY_READ, from, n)) {
2346+ if (!__builtin_constant_p(n))
2347+ check_object_size(to, n, false);
58c5fc13
MT
2348+ return __copy_tofrom_user((__force void __user *)to, from, n);
2349+ }
2350+ if ((unsigned long)from < TASK_SIZE) {
2351+ over = (unsigned long)from + n - TASK_SIZE;
2352+ if (!__builtin_constant_p(n - over))
2353+ check_object_size(to, n - over, false);
2354+ return __copy_tofrom_user((__force void __user *)to, from,
2355+ n - over) + over;
2356+ }
2357+ return n;
2358+}
2359+
2360+static inline unsigned long __must_check copy_to_user(void __user *to,
2361+ const void *from, unsigned long n)
2362+{
2363+ unsigned long over;
2364+
ae4e228f 2365+ if ((long)n < 0)
58c5fc13
MT
2366+ return n;
2367+
2368+ if (access_ok(VERIFY_WRITE, to, n)) {
2369+ if (!__builtin_constant_p(n))
2370+ check_object_size(from, n, true);
2371+ return __copy_tofrom_user(to, (__force void __user *)from, n);
2372+ }
2373+ if ((unsigned long)to < TASK_SIZE) {
2374+ over = (unsigned long)to + n - TASK_SIZE;
2375+ if (!__builtin_constant_p(n))
2376+ check_object_size(from, n - over, true);
2377+ return __copy_tofrom_user(to, (__force void __user *)from,
2378+ n - over) + over;
2379+ }
2380+ return n;
2381+}
2382+
2383+#else /* __powerpc64__ */
2384+
2385+#define __copy_in_user(to, from, size) \
2386+ __copy_tofrom_user((to), (from), (size))
2387+
ae4e228f 2388+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
58c5fc13 2389+{
ae4e228f 2390+ if ((long)n < 0 || n > INT_MAX)
58c5fc13
MT
2391+ return n;
2392+
2393+ if (!__builtin_constant_p(n))
2394+ check_object_size(to, n, false);
2395+
2396+ if (likely(access_ok(VERIFY_READ, from, n)))
2397+ n = __copy_from_user(to, from, n);
2398+ else
2399+ memset(to, 0, n);
58c5fc13
MT
2400+ return n;
2401+}
2402+
ae4e228f 2403+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
58c5fc13 2404+{
ae4e228f 2405+ if ((long)n < 0 || n > INT_MAX)
58c5fc13
MT
2406+ return n;
2407+
2408+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
2409+ if (!__builtin_constant_p(n))
2410+ check_object_size(from, n, true);
2411+ n = __copy_to_user(to, from, n);
2412+ }
58c5fc13
MT
2413+ return n;
2414+}
2415+
2416+extern unsigned long copy_in_user(void __user *to, const void __user *from,
2417+ unsigned long n);
2418+
2419+#endif /* __powerpc64__ */
2420+
2421 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2422
2423 static inline unsigned long clear_user(void __user *addr, unsigned long size)
fe2de317
MT
2424diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
2425index 429983c..7af363b 100644
2426--- a/arch/powerpc/kernel/exceptions-64e.S
2427+++ b/arch/powerpc/kernel/exceptions-64e.S
6e9df6a3 2428@@ -587,6 +587,7 @@ storage_fault_common:
ae4e228f
MT
2429 std r14,_DAR(r1)
2430 std r15,_DSISR(r1)
2431 addi r3,r1,STACK_FRAME_OVERHEAD
2432+ bl .save_nvgprs
2433 mr r4,r14
2434 mr r5,r15
2435 ld r14,PACA_EXGEN+EX_R14(r13)
6e9df6a3 2436@@ -596,8 +597,7 @@ storage_fault_common:
ae4e228f
MT
2437 cmpdi r3,0
2438 bne- 1f
2439 b .ret_from_except_lite
2440-1: bl .save_nvgprs
2441- mr r5,r3
2442+1: mr r5,r3
2443 addi r3,r1,STACK_FRAME_OVERHEAD
2444 ld r4,_DAR(r1)
2445 bl .bad_page_fault
fe2de317
MT
2446diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
2447index 41b02c7..05e76fb 100644
2448--- a/arch/powerpc/kernel/exceptions-64s.S
2449+++ b/arch/powerpc/kernel/exceptions-64s.S
6e9df6a3 2450@@ -1014,10 +1014,10 @@ handle_page_fault:
ae4e228f
MT
2451 11: ld r4,_DAR(r1)
2452 ld r5,_DSISR(r1)
2453 addi r3,r1,STACK_FRAME_OVERHEAD
2454+ bl .save_nvgprs
2455 bl .do_page_fault
2456 cmpdi r3,0
2457 beq+ 13f
2458- bl .save_nvgprs
2459 mr r5,r3
2460 addi r3,r1,STACK_FRAME_OVERHEAD
2461 lwz r4,_DAR(r1)
fe2de317
MT
2462diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
2463index 0b6d796..d760ddb 100644
2464--- a/arch/powerpc/kernel/module_32.c
2465+++ b/arch/powerpc/kernel/module_32.c
2466@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
57199397
MT
2467 me->arch.core_plt_section = i;
2468 }
2469 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2470- printk("Module doesn't contain .plt or .init.plt sections.\n");
2471+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2472 return -ENOEXEC;
2473 }
2474
fe2de317 2475@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
57199397
MT
2476
2477 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2478 /* Init, or core PLT? */
2479- if (location >= mod->module_core
2480- && location < mod->module_core + mod->core_size)
2481+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2482+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2483 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2484- else
2485+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2486+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2487 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2488+ else {
2489+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2490+ return ~0UL;
2491+ }
2492
2493 /* Find this entry, or if that fails, the next avail. entry */
2494 while (entry->jump[0]) {
fe2de317
MT
2495diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
2496index 8f53954..a704ad6 100644
2497--- a/arch/powerpc/kernel/process.c
2498+++ b/arch/powerpc/kernel/process.c
6e9df6a3 2499@@ -682,8 +682,8 @@ void show_regs(struct pt_regs * regs)
bc901d79
MT
2500 * Lookup NIP late so we have the best change of getting the
2501 * above info out without failing
2502 */
2503- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2504- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2505+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2506+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2507 #endif
2508 show_stack(current, (unsigned long *) regs->gpr[1]);
2509 if (!user_mode(regs))
fe2de317 2510@@ -1187,10 +1187,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
bc901d79
MT
2511 newsp = stack[0];
2512 ip = stack[STACK_FRAME_LR_SAVE];
2513 if (!firstframe || ip != lr) {
2514- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2515+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2516 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2517 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2518- printk(" (%pS)",
2519+ printk(" (%pA)",
2520 (void *)current->ret_stack[curr_frame].ret);
2521 curr_frame--;
2522 }
fe2de317 2523@@ -1210,7 +1210,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
bc901d79
MT
2524 struct pt_regs *regs = (struct pt_regs *)
2525 (sp + STACK_FRAME_OVERHEAD);
2526 lr = regs->link;
2527- printk("--- Exception: %lx at %pS\n LR = %pS\n",
2528+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
2529 regs->trap, (void *)regs->nip, (void *)lr);
2530 firstframe = 1;
2531 }
6e9df6a3 2532@@ -1285,58 +1285,3 @@ void thread_info_cache_init(void)
58c5fc13 2533 }
6892158b 2534
bc901d79
MT
2535 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2536-
2537-unsigned long arch_align_stack(unsigned long sp)
2538-{
2539- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2540- sp -= get_random_int() & ~PAGE_MASK;
2541- return sp & ~0xf;
2542-}
2543-
58c5fc13
MT
2544-static inline unsigned long brk_rnd(void)
2545-{
2546- unsigned long rnd = 0;
2547-
2548- /* 8MB for 32bit, 1GB for 64bit */
2549- if (is_32bit_task())
2550- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2551- else
2552- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2553-
2554- return rnd << PAGE_SHIFT;
2555-}
2556-
2557-unsigned long arch_randomize_brk(struct mm_struct *mm)
2558-{
ae4e228f
MT
2559- unsigned long base = mm->brk;
2560- unsigned long ret;
2561-
2562-#ifdef CONFIG_PPC_STD_MMU_64
2563- /*
2564- * If we are using 1TB segments and we are allowed to randomise
2565- * the heap, we can put it above 1TB so it is backed by a 1TB
2566- * segment. Otherwise the heap will be in the bottom 1TB
2567- * which always uses 256MB segments and this may result in a
2568- * performance penalty.
2569- */
2570- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2571- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2572-#endif
2573-
2574- ret = PAGE_ALIGN(base + brk_rnd());
58c5fc13
MT
2575-
2576- if (ret < mm->brk)
2577- return mm->brk;
2578-
2579- return ret;
2580-}
2581-
2582-unsigned long randomize_et_dyn(unsigned long base)
2583-{
2584- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2585-
2586- if (ret < base)
2587- return base;
2588-
2589- return ret;
2590-}
fe2de317
MT
2591diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
2592index 78b76dc..7f232ef 100644
2593--- a/arch/powerpc/kernel/signal_32.c
2594+++ b/arch/powerpc/kernel/signal_32.c
2595@@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
58c5fc13
MT
2596 /* Save user registers on the stack */
2597 frame = &rt_sf->uc.uc_mcontext;
2598 addr = frame;
2599- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2600+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2601 if (save_user_regs(regs, frame, 0, 1))
2602 goto badframe;
2603 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
fe2de317
MT
2604diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
2605index e91c736..742ec06 100644
2606--- a/arch/powerpc/kernel/signal_64.c
2607+++ b/arch/powerpc/kernel/signal_64.c
2608@@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
58c5fc13
MT
2609 current->thread.fpscr.val = 0;
2610
2611 /* Set up to return from userspace. */
2612- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2613+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2614 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2615 } else {
2616 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
fe2de317
MT
2617diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
2618index f19d977..8ac286e 100644
2619--- a/arch/powerpc/kernel/traps.c
2620+++ b/arch/powerpc/kernel/traps.c
15a11c5b
MT
2621@@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
2622 static inline void pmac_backlight_unblank(void) { }
2623 #endif
2624
2625+extern void gr_handle_kernel_exploit(void);
2626+
2627 int die(const char *str, struct pt_regs *regs, long err)
2628 {
2629 static struct {
fe2de317 2630@@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs *regs, long err)
15a11c5b
MT
2631 if (panic_on_oops)
2632 panic("Fatal exception");
2633
2634+ gr_handle_kernel_exploit();
2635+
2636 oops_exit();
2637 do_exit(err);
2638
fe2de317
MT
2639diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
2640index 142ab10..236e61a 100644
2641--- a/arch/powerpc/kernel/vdso.c
2642+++ b/arch/powerpc/kernel/vdso.c
ae4e228f 2643@@ -36,6 +36,7 @@
58c5fc13
MT
2644 #include <asm/firmware.h>
2645 #include <asm/vdso.h>
2646 #include <asm/vdso_datapage.h>
2647+#include <asm/mman.h>
2648
2649 #include "setup.h"
2650
fe2de317 2651@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
58c5fc13
MT
2652 vdso_base = VDSO32_MBASE;
2653 #endif
2654
2655- current->mm->context.vdso_base = 0;
2656+ current->mm->context.vdso_base = ~0UL;
2657
2658 /* vDSO has a problem and was disabled, just don't "enable" it for the
2659 * process
fe2de317 2660@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
58c5fc13 2661 vdso_base = get_unmapped_area(NULL, vdso_base,
ae4e228f
MT
2662 (vdso_pages << PAGE_SHIFT) +
2663 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2664- 0, 0);
2665+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
58c5fc13
MT
2666 if (IS_ERR_VALUE(vdso_base)) {
2667 rc = vdso_base;
2668 goto fail_mmapsem;
fe2de317
MT
2669diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
2670index 5eea6f3..5d10396 100644
2671--- a/arch/powerpc/lib/usercopy_64.c
2672+++ b/arch/powerpc/lib/usercopy_64.c
58c5fc13
MT
2673@@ -9,22 +9,6 @@
2674 #include <linux/module.h>
2675 #include <asm/uaccess.h>
2676
2677-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2678-{
2679- if (likely(access_ok(VERIFY_READ, from, n)))
2680- n = __copy_from_user(to, from, n);
2681- else
2682- memset(to, 0, n);
2683- return n;
2684-}
2685-
2686-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2687-{
2688- if (likely(access_ok(VERIFY_WRITE, to, n)))
2689- n = __copy_to_user(to, from, n);
2690- return n;
2691-}
2692-
2693 unsigned long copy_in_user(void __user *to, const void __user *from,
2694 unsigned long n)
2695 {
fe2de317 2696@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
58c5fc13
MT
2697 return n;
2698 }
2699
2700-EXPORT_SYMBOL(copy_from_user);
2701-EXPORT_SYMBOL(copy_to_user);
2702 EXPORT_SYMBOL(copy_in_user);
2703
fe2de317
MT
2704diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
2705index 5efe8c9..db9ceef 100644
2706--- a/arch/powerpc/mm/fault.c
2707+++ b/arch/powerpc/mm/fault.c
15a11c5b 2708@@ -32,6 +32,10 @@
ae4e228f 2709 #include <linux/perf_event.h>
bc901d79 2710 #include <linux/magic.h>
15a11c5b 2711 #include <linux/ratelimit.h>
58c5fc13
MT
2712+#include <linux/slab.h>
2713+#include <linux/pagemap.h>
2714+#include <linux/compiler.h>
2715+#include <linux/unistd.h>
2716
2717 #include <asm/firmware.h>
2718 #include <asm/page.h>
15a11c5b 2719@@ -43,6 +47,7 @@
58c5fc13
MT
2720 #include <asm/tlbflush.h>
2721 #include <asm/siginfo.h>
ae4e228f 2722 #include <mm/mmu_decl.h>
58c5fc13
MT
2723+#include <asm/ptrace.h>
2724
58c5fc13 2725 #ifdef CONFIG_KPROBES
ae4e228f 2726 static inline int notify_page_fault(struct pt_regs *regs)
fe2de317 2727@@ -66,6 +71,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
58c5fc13
MT
2728 }
2729 #endif
2730
2731+#ifdef CONFIG_PAX_PAGEEXEC
2732+/*
2733+ * PaX: decide what to do with offenders (regs->nip = fault address)
2734+ *
2735+ * returns 1 when task should be killed
2736+ */
2737+static int pax_handle_fetch_fault(struct pt_regs *regs)
2738+{
2739+ return 1;
2740+}
2741+
6e9df6a3 2742+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
58c5fc13
MT
2743+{
2744+ unsigned long i;
2745+
2746+ printk(KERN_ERR "PAX: bytes at PC: ");
2747+ for (i = 0; i < 5; i++) {
2748+ unsigned int c;
ae4e228f 2749+ if (get_user(c, (unsigned int __user *)pc+i))
58c5fc13
MT
2750+ printk(KERN_CONT "???????? ");
2751+ else
2752+ printk(KERN_CONT "%08x ", c);
2753+ }
2754+ printk("\n");
2755+}
2756+#endif
2757+
2758 /*
2759 * Check whether the instruction at regs->nip is a store using
2760 * an update addressing form which will update r1.
fe2de317 2761@@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
58c5fc13
MT
2762 * indicate errors in DSISR but can validly be set in SRR1.
2763 */
2764 if (trap == 0x400)
2765- error_code &= 0x48200000;
2766+ error_code &= 0x58200000;
2767 else
2768 is_write = error_code & DSISR_ISSTORE;
2769 #else
15a11c5b 2770@@ -259,7 +291,7 @@ good_area:
58c5fc13
MT
2771 * "undefined". Of those that can be set, this is the only
2772 * one which seems bad.
2773 */
2774- if (error_code & 0x10000000)
2775+ if (error_code & DSISR_GUARDED)
2776 /* Guarded storage error. */
2777 goto bad_area;
2778 #endif /* CONFIG_8xx */
15a11c5b 2779@@ -274,7 +306,7 @@ good_area:
58c5fc13
MT
2780 * processors use the same I/D cache coherency mechanism
2781 * as embedded.
2782 */
2783- if (error_code & DSISR_PROTFAULT)
2784+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2785 goto bad_area;
2786 #endif /* CONFIG_PPC_STD_MMU */
2787
15a11c5b 2788@@ -343,6 +375,23 @@ bad_area:
58c5fc13
MT
2789 bad_area_nosemaphore:
2790 /* User mode accesses cause a SIGSEGV */
2791 if (user_mode(regs)) {
2792+
2793+#ifdef CONFIG_PAX_PAGEEXEC
2794+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2795+#ifdef CONFIG_PPC_STD_MMU
2796+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2797+#else
2798+ if (is_exec && regs->nip == address) {
2799+#endif
2800+ switch (pax_handle_fetch_fault(regs)) {
2801+ }
2802+
2803+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2804+ do_group_exit(SIGKILL);
2805+ }
2806+ }
2807+#endif
2808+
2809 _exception(SIGSEGV, regs, code, address);
2810 return 0;
2811 }
fe2de317
MT
2812diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
2813index 5a783d8..c23e14b 100644
2814--- a/arch/powerpc/mm/mmap_64.c
2815+++ b/arch/powerpc/mm/mmap_64.c
2816@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
58c5fc13
MT
2817 */
2818 if (mmap_is_legacy()) {
2819 mm->mmap_base = TASK_UNMAPPED_BASE;
2820+
2821+#ifdef CONFIG_PAX_RANDMMAP
2822+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2823+ mm->mmap_base += mm->delta_mmap;
2824+#endif
2825+
2826 mm->get_unmapped_area = arch_get_unmapped_area;
2827 mm->unmap_area = arch_unmap_area;
2828 } else {
2829 mm->mmap_base = mmap_base();
2830+
2831+#ifdef CONFIG_PAX_RANDMMAP
2832+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2833+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2834+#endif
2835+
2836 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2837 mm->unmap_area = arch_unmap_area_topdown;
2838 }
fe2de317
MT
2839diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
2840index ba51948..23009d9 100644
2841--- a/arch/powerpc/mm/slice.c
2842+++ b/arch/powerpc/mm/slice.c
2843@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
57199397
MT
2844 if ((mm->task_size - len) < addr)
2845 return 0;
2846 vma = find_vma(mm, addr);
2847- return (!vma || (addr + len) <= vma->vm_start);
2848+ return check_heap_stack_gap(vma, addr, len);
2849 }
2850
6892158b
MT
2851 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2852@@ -256,7 +256,7 @@ full_search:
57199397
MT
2853 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2854 continue;
2855 }
2856- if (!vma || addr + len <= vma->vm_start) {
2857+ if (check_heap_stack_gap(vma, addr, len)) {
2858 /*
2859 * Remember the place where we stopped the search:
2860 */
fe2de317 2861@@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
16454cff
MT
2862 }
2863 }
2864
2865- addr = mm->mmap_base;
2866- while (addr > len) {
2867+ if (mm->mmap_base < len)
2868+ addr = -ENOMEM;
2869+ else
2870+ addr = mm->mmap_base - len;
2871+
2872+ while (!IS_ERR_VALUE(addr)) {
2873 /* Go down by chunk size */
2874- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2875+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
2876
2877 /* Check for hit with different page size */
2878 mask = slice_range_to_mask(addr, len);
fe2de317 2879@@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
57199397
MT
2880 * return with success:
2881 */
2882 vma = find_vma(mm, addr);
2883- if (!vma || (addr + len) <= vma->vm_start) {
2884+ if (check_heap_stack_gap(vma, addr, len)) {
2885 /* remember the address as a hint for next time */
2886 if (use_cache)
2887 mm->free_area_cache = addr;
fe2de317 2888@@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
16454cff
MT
2889 mm->cached_hole_size = vma->vm_start - addr;
2890
2891 /* try just below the current vma->vm_start */
2892- addr = vma->vm_start;
2893+ addr = skip_heap_stack_gap(vma, len);
2894 }
2895
2896 /*
fe2de317 2897@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
58c5fc13
MT
2898 if (fixed && addr > (mm->task_size - len))
2899 return -EINVAL;
2900
2901+#ifdef CONFIG_PAX_RANDMMAP
2902+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2903+ addr = 0;
2904+#endif
2905+
2906 /* If hint, make sure it matches our alignment restrictions */
2907 if (!fixed && addr) {
2908 addr = _ALIGN_UP(addr, 1ul << pshift);
fe2de317
MT
2909diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
2910index 547f1a6..3fff354 100644
2911--- a/arch/s390/include/asm/elf.h
2912+++ b/arch/s390/include/asm/elf.h
16454cff
MT
2913@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
2914 the loader. We need to make sure that it is out of the way of the program
ae4e228f 2915 that it will "exec", and that there is sufficient room for the brk. */
58c5fc13 2916
16454cff
MT
2917-extern unsigned long randomize_et_dyn(unsigned long base);
2918-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
2919+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
2920+
ae4e228f
MT
2921+#ifdef CONFIG_PAX_ASLR
2922+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
58c5fc13 2923+
ae4e228f
MT
2924+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2925+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2926+#endif
16454cff 2927
ae4e228f
MT
2928 /* This yields a mask that user programs can use to figure out what
2929 instruction set this CPU supports. */
6e9df6a3 2930@@ -211,7 +217,4 @@ struct linux_binprm;
16454cff
MT
2931 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2932 int arch_setup_additional_pages(struct linux_binprm *, int);
2933
2934-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2935-#define arch_randomize_brk arch_randomize_brk
2936-
2937 #endif
fe2de317
MT
2938diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
2939index 6582f69..b69906f 100644
2940--- a/arch/s390/include/asm/system.h
2941+++ b/arch/s390/include/asm/system.h
2942@@ -256,7 +256,7 @@ extern void (*_machine_restart)(char *command);
16454cff
MT
2943 extern void (*_machine_halt)(void);
2944 extern void (*_machine_power_off)(void);
58c5fc13 2945
16454cff
MT
2946-extern unsigned long arch_align_stack(unsigned long sp);
2947+#define arch_align_stack(x) ((x) & ~0xfUL)
2948
2949 static inline int tprot(unsigned long addr)
2950 {
fe2de317
MT
2951diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
2952index 2b23885..e136e31 100644
2953--- a/arch/s390/include/asm/uaccess.h
2954+++ b/arch/s390/include/asm/uaccess.h
15a11c5b 2955@@ -235,6 +235,10 @@ static inline unsigned long __must_check
58c5fc13
MT
2956 copy_to_user(void __user *to, const void *from, unsigned long n)
2957 {
2958 might_fault();
2959+
2960+ if ((long)n < 0)
2961+ return n;
2962+
2963 if (access_ok(VERIFY_WRITE, to, n))
2964 n = __copy_to_user(to, from, n);
2965 return n;
fe2de317 2966@@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
58c5fc13
MT
2967 static inline unsigned long __must_check
2968 __copy_from_user(void *to, const void __user *from, unsigned long n)
2969 {
2970+ if ((long)n < 0)
2971+ return n;
2972+
2973 if (__builtin_constant_p(n) && (n <= 256))
2974 return uaccess.copy_from_user_small(n, from, to);
2975 else
fe2de317 2976@@ -294,6 +301,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
df50ba0c
MT
2977 unsigned int sz = __compiletime_object_size(to);
2978
58c5fc13
MT
2979 might_fault();
2980+
2981+ if ((long)n < 0)
2982+ return n;
2983+
df50ba0c
MT
2984 if (unlikely(sz != -1 && sz < n)) {
2985 copy_from_user_overflow();
2986 return n;
fe2de317
MT
2987diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
2988index dfcb343..eda788a 100644
2989--- a/arch/s390/kernel/module.c
2990+++ b/arch/s390/kernel/module.c
2991@@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
58c5fc13
MT
2992
2993 /* Increase core size by size of got & plt and set start
2994 offsets for got and plt. */
2995- me->core_size = ALIGN(me->core_size, 4);
2996- me->arch.got_offset = me->core_size;
2997- me->core_size += me->arch.got_size;
2998- me->arch.plt_offset = me->core_size;
2999- me->core_size += me->arch.plt_size;
3000+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
3001+ me->arch.got_offset = me->core_size_rw;
3002+ me->core_size_rw += me->arch.got_size;
3003+ me->arch.plt_offset = me->core_size_rx;
3004+ me->core_size_rx += me->arch.plt_size;
3005 return 0;
3006 }
3007
fe2de317 3008@@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
58c5fc13
MT
3009 if (info->got_initialized == 0) {
3010 Elf_Addr *gotent;
3011
3012- gotent = me->module_core + me->arch.got_offset +
3013+ gotent = me->module_core_rw + me->arch.got_offset +
3014 info->got_offset;
3015 *gotent = val;
3016 info->got_initialized = 1;
fe2de317 3017@@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
58c5fc13
MT
3018 else if (r_type == R_390_GOTENT ||
3019 r_type == R_390_GOTPLTENT)
3020 *(unsigned int *) loc =
3021- (val + (Elf_Addr) me->module_core - loc) >> 1;
3022+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3023 else if (r_type == R_390_GOT64 ||
3024 r_type == R_390_GOTPLT64)
3025 *(unsigned long *) loc = val;
fe2de317 3026@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
58c5fc13
MT
3027 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3028 if (info->plt_initialized == 0) {
3029 unsigned int *ip;
3030- ip = me->module_core + me->arch.plt_offset +
3031+ ip = me->module_core_rx + me->arch.plt_offset +
3032 info->plt_offset;
3033 #ifndef CONFIG_64BIT
3034 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
fe2de317 3035@@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
58c5fc13
MT
3036 val - loc + 0xffffUL < 0x1ffffeUL) ||
3037 (r_type == R_390_PLT32DBL &&
3038 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3039- val = (Elf_Addr) me->module_core +
3040+ val = (Elf_Addr) me->module_core_rx +
3041 me->arch.plt_offset +
3042 info->plt_offset;
3043 val += rela->r_addend - loc;
fe2de317 3044@@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
58c5fc13
MT
3045 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3046 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3047 val = val + rela->r_addend -
3048- ((Elf_Addr) me->module_core + me->arch.got_offset);
3049+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3050 if (r_type == R_390_GOTOFF16)
3051 *(unsigned short *) loc = val;
3052 else if (r_type == R_390_GOTOFF32)
fe2de317 3053@@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
58c5fc13
MT
3054 break;
3055 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3056 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3057- val = (Elf_Addr) me->module_core + me->arch.got_offset +
3058+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3059 rela->r_addend - loc;
3060 if (r_type == R_390_GOTPC)
3061 *(unsigned int *) loc = val;
fe2de317
MT
3062diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
3063index 541a750..8739853 100644
3064--- a/arch/s390/kernel/process.c
3065+++ b/arch/s390/kernel/process.c
3066@@ -319,39 +319,3 @@ unsigned long get_wchan(struct task_struct *p)
16454cff
MT
3067 }
3068 return 0;
3069 }
3070-
3071-unsigned long arch_align_stack(unsigned long sp)
3072-{
3073- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3074- sp -= get_random_int() & ~PAGE_MASK;
3075- return sp & ~0xf;
3076-}
3077-
3078-static inline unsigned long brk_rnd(void)
3079-{
3080- /* 8MB for 32bit, 1GB for 64bit */
3081- if (is_32bit_task())
3082- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
3083- else
3084- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
3085-}
3086-
3087-unsigned long arch_randomize_brk(struct mm_struct *mm)
3088-{
3089- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
3090-
3091- if (ret < mm->brk)
3092- return mm->brk;
3093- return ret;
3094-}
3095-
3096-unsigned long randomize_et_dyn(unsigned long base)
3097-{
3098- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
3099-
3100- if (!(current->flags & PF_RANDOMIZE))
3101- return base;
3102- if (ret < base)
3103- return base;
3104- return ret;
3105-}
fe2de317
MT
3106diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
3107index 7b371c3..ad06cf1 100644
3108--- a/arch/s390/kernel/setup.c
3109+++ b/arch/s390/kernel/setup.c
3110@@ -271,7 +271,7 @@ static int __init early_parse_mem(char *p)
ae4e228f
MT
3111 }
3112 early_param("mem", early_parse_mem);
58c5fc13 3113
ae4e228f
MT
3114-unsigned int user_mode = HOME_SPACE_MODE;
3115+unsigned int user_mode = SECONDARY_SPACE_MODE;
3116 EXPORT_SYMBOL_GPL(user_mode);
3117
3118 static int set_amode_and_uaccess(unsigned long user_amode,
fe2de317
MT
3119diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
3120index c9a9f7f..60d0315 100644
3121--- a/arch/s390/mm/mmap.c
3122+++ b/arch/s390/mm/mmap.c
3123@@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
ae4e228f
MT
3124 */
3125 if (mmap_is_legacy()) {
3126 mm->mmap_base = TASK_UNMAPPED_BASE;
3127+
3128+#ifdef CONFIG_PAX_RANDMMAP
3129+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3130+ mm->mmap_base += mm->delta_mmap;
3131+#endif
3132+
3133 mm->get_unmapped_area = arch_get_unmapped_area;
3134 mm->unmap_area = arch_unmap_area;
3135 } else {
3136 mm->mmap_base = mmap_base();
3137+
3138+#ifdef CONFIG_PAX_RANDMMAP
3139+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3140+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3141+#endif
3142+
3143 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3144 mm->unmap_area = arch_unmap_area_topdown;
3145 }
fe2de317 3146@@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
ae4e228f
MT
3147 */
3148 if (mmap_is_legacy()) {
3149 mm->mmap_base = TASK_UNMAPPED_BASE;
3150+
3151+#ifdef CONFIG_PAX_RANDMMAP
3152+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3153+ mm->mmap_base += mm->delta_mmap;
3154+#endif
3155+
3156 mm->get_unmapped_area = s390_get_unmapped_area;
3157 mm->unmap_area = arch_unmap_area;
3158 } else {
3159 mm->mmap_base = mmap_base();
3160+
3161+#ifdef CONFIG_PAX_RANDMMAP
3162+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3163+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3164+#endif
3165+
3166 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
3167 mm->unmap_area = arch_unmap_area_topdown;
3168 }
fe2de317
MT
3169diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
3170index 589d5c7..669e274 100644
3171--- a/arch/score/include/asm/system.h
3172+++ b/arch/score/include/asm/system.h
bc901d79
MT
3173@@ -17,7 +17,7 @@ do { \
3174 #define finish_arch_switch(prev) do {} while (0)
3175
3176 typedef void (*vi_handler_t)(void);
3177-extern unsigned long arch_align_stack(unsigned long sp);
3178+#define arch_align_stack(x) (x)
3179
3180 #define mb() barrier()
3181 #define rmb() barrier()
fe2de317
MT
3182diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
3183index 25d0803..d6c8e36 100644
3184--- a/arch/score/kernel/process.c
3185+++ b/arch/score/kernel/process.c
3186@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
bc901d79
MT
3187
3188 return task_pt_regs(task)->cp0_epc;
3189 }
3190-
3191-unsigned long arch_align_stack(unsigned long sp)
3192-{
3193- return sp;
3194-}
fe2de317
MT
3195diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
3196index afeb710..d1d1289 100644
3197--- a/arch/sh/mm/mmap.c
3198+++ b/arch/sh/mm/mmap.c
3199@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
57199397
MT
3200 addr = PAGE_ALIGN(addr);
3201
3202 vma = find_vma(mm, addr);
3203- if (TASK_SIZE - len >= addr &&
3204- (!vma || addr + len <= vma->vm_start))
3205+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3206 return addr;
3207 }
efbe55a5 3208
57199397
MT
3209@@ -106,7 +105,7 @@ full_search:
3210 }
3211 return -ENOMEM;
3212 }
3213- if (likely(!vma || addr + len <= vma->vm_start)) {
3214+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3215 /*
3216 * Remember the place where we stopped the search:
3217 */
fe2de317 3218@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
57199397
MT
3219 addr = PAGE_ALIGN(addr);
3220
3221 vma = find_vma(mm, addr);
3222- if (TASK_SIZE - len >= addr &&
3223- (!vma || addr + len <= vma->vm_start))
3224+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3225 return addr;
3226 }
3227
fe2de317 3228@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
57199397
MT
3229 /* make sure it can fit in the remaining address space */
3230 if (likely(addr > len)) {
3231 vma = find_vma(mm, addr-len);
3232- if (!vma || addr <= vma->vm_start) {
3233+ if (check_heap_stack_gap(vma, addr - len, len)) {
3234 /* remember the address as a hint for next time */
3235 return (mm->free_area_cache = addr-len);
3236 }
fe2de317 3237@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
16454cff
MT
3238 if (unlikely(mm->mmap_base < len))
3239 goto bottomup;
3240
3241- addr = mm->mmap_base-len;
3242- if (do_colour_align)
3243- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3244+ addr = mm->mmap_base - len;
3245
3246 do {
3247+ if (do_colour_align)
3248+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3249 /*
3250 * Lookup failure means no vma is above this address,
3251 * else if new region fits below vma->vm_start,
57199397
MT
3252 * return with success:
3253 */
3254 vma = find_vma(mm, addr);
3255- if (likely(!vma || addr+len <= vma->vm_start)) {
3256+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3257 /* remember the address as a hint for next time */
3258 return (mm->free_area_cache = addr);
3259 }
fe2de317 3260@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
16454cff
MT
3261 mm->cached_hole_size = vma->vm_start - addr;
3262
3263 /* try just below the current vma->vm_start */
3264- addr = vma->vm_start-len;
3265- if (do_colour_align)
3266- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3267- } while (likely(len < vma->vm_start));
3268+ addr = skip_heap_stack_gap(vma, len);
3269+ } while (!IS_ERR_VALUE(addr));
3270
3271 bottomup:
3272 /*
fe2de317
MT
3273diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
3274index ad1fb5d..fc5315b 100644
3275--- a/arch/sparc/Makefile
3276+++ b/arch/sparc/Makefile
3277@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
3278 # Export what is needed by arch/sparc/boot/Makefile
3279 export VMLINUX_INIT VMLINUX_MAIN
3280 VMLINUX_INIT := $(head-y) $(init-y)
3281-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
3282+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
3283 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
3284 VMLINUX_MAIN += $(drivers-y) $(net-y)
3285
3286diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
3287index 9f421df..b81fc12 100644
3288--- a/arch/sparc/include/asm/atomic_64.h
3289+++ b/arch/sparc/include/asm/atomic_64.h
57199397 3290@@ -14,18 +14,40 @@
58c5fc13
MT
3291 #define ATOMIC64_INIT(i) { (i) }
3292
57199397 3293 #define atomic_read(v) (*(volatile int *)&(v)->counter)
ae4e228f
MT
3294+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
3295+{
3296+ return v->counter;
3297+}
57199397 3298 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
ae4e228f
MT
3299+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
3300+{
3301+ return v->counter;
3302+}
58c5fc13
MT
3303
3304 #define atomic_set(v, i) (((v)->counter) = i)
ae4e228f
MT
3305+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
3306+{
3307+ v->counter = i;
3308+}
58c5fc13 3309 #define atomic64_set(v, i) (((v)->counter) = i)
ae4e228f
MT
3310+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
3311+{
3312+ v->counter = i;
3313+}
58c5fc13
MT
3314
3315 extern void atomic_add(int, atomic_t *);
3316+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
57199397
MT
3317 extern void atomic64_add(long, atomic64_t *);
3318+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
58c5fc13
MT
3319 extern void atomic_sub(int, atomic_t *);
3320+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
57199397
MT
3321 extern void atomic64_sub(long, atomic64_t *);
3322+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
58c5fc13
MT
3323
3324 extern int atomic_add_ret(int, atomic_t *);
57199397
MT
3325+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
3326 extern long atomic64_add_ret(long, atomic64_t *);
3327+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
ae4e228f 3328 extern int atomic_sub_ret(int, atomic_t *);
57199397 3329 extern long atomic64_sub_ret(long, atomic64_t *);
ae4e228f 3330
fe2de317 3331@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
57199397 3332 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
ae4e228f
MT
3333
3334 #define atomic_inc_return(v) atomic_add_ret(1, v)
57199397
MT
3335+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
3336+{
3337+ return atomic_add_ret_unchecked(1, v);
3338+}
ae4e228f 3339 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
57199397
MT
3340+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
3341+{
3342+ return atomic64_add_ret_unchecked(1, v);
3343+}
ae4e228f
MT
3344
3345 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
3346 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
6892158b
MT
3347
3348 #define atomic_add_return(i, v) atomic_add_ret(i, v)
3349+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
3350+{
3351+ return atomic_add_ret_unchecked(i, v);
3352+}
3353 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
15a11c5b
MT
3354+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
3355+{
3356+ return atomic64_add_ret_unchecked(i, v);
3357+}
6892158b
MT
3358
3359 /*
15a11c5b 3360 * atomic_inc_and_test - increment and test
fe2de317 3361@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
8308f9c9
MT
3362 * other cases.
3363 */
3364 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
15a11c5b
MT
3365+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
3366+{
3367+ return atomic_inc_return_unchecked(v) == 0;
3368+}
8308f9c9
MT
3369 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
3370
3371 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
fe2de317 3372@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
58c5fc13
MT
3373 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
3374
3375 #define atomic_inc(v) atomic_add(1, v)
ae4e228f
MT
3376+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
3377+{
3378+ atomic_add_unchecked(1, v);
3379+}
58c5fc13 3380 #define atomic64_inc(v) atomic64_add(1, v)
ae4e228f
MT
3381+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
3382+{
3383+ atomic64_add_unchecked(1, v);
3384+}
58c5fc13
MT
3385
3386 #define atomic_dec(v) atomic_sub(1, v)
df50ba0c
MT
3387+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
3388+{
3389+ atomic_sub_unchecked(1, v);
3390+}
ae4e228f 3391 #define atomic64_dec(v) atomic64_sub(1, v)
df50ba0c
MT
3392+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
3393+{
3394+ atomic64_sub_unchecked(1, v);
3395+}
3396
3397 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
3398 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
8308f9c9
MT
3399
3400 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
15a11c5b
MT
3401+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
3402+{
3403+ return cmpxchg(&v->counter, old, new);
3404+}
8308f9c9 3405 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
15a11c5b
MT
3406+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
3407+{
3408+ return xchg(&v->counter, new);
3409+}
58c5fc13 3410
6e9df6a3 3411 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
58c5fc13
MT
3412 {
3413- int c, old;
3414+ int c, old, new;
3415 c = atomic_read(v);
3416 for (;;) {
3417- if (unlikely(c == (u)))
3418+ if (unlikely(c == u))
3419 break;
3420- old = atomic_cmpxchg((v), c, c + (a));
3421+
3422+ asm volatile("addcc %2, %0, %0\n"
3423+
3424+#ifdef CONFIG_PAX_REFCOUNT
3425+ "tvs %%icc, 6\n"
3426+#endif
3427+
3428+ : "=r" (new)
3429+ : "0" (c), "ir" (a)
3430+ : "cc");
3431+
3432+ old = atomic_cmpxchg(v, c, new);
3433 if (likely(old == c))
3434 break;
3435 c = old;
fe2de317 3436@@ -89,20 +166,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15a11c5b
MT
3437 #define atomic64_cmpxchg(v, o, n) \
3438 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
3439 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
3440+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
3441+{
3442+ return xchg(&v->counter, new);
3443+}
58c5fc13 3444
57199397 3445 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
58c5fc13
MT
3446 {
3447- long c, old;
3448+ long c, old, new;
3449 c = atomic64_read(v);
3450 for (;;) {
3451- if (unlikely(c == (u)))
3452+ if (unlikely(c == u))
3453 break;
3454- old = atomic64_cmpxchg((v), c, c + (a));
3455+
3456+ asm volatile("addcc %2, %0, %0\n"
3457+
3458+#ifdef CONFIG_PAX_REFCOUNT
3459+ "tvs %%xcc, 6\n"
3460+#endif
3461+
3462+ : "=r" (new)
3463+ : "0" (c), "ir" (a)
3464+ : "cc");
3465+
3466+ old = atomic64_cmpxchg(v, c, new);
3467 if (likely(old == c))
3468 break;
3469 c = old;
3470 }
3471- return c != (u);
3472+ return c != u;
3473 }
3474
3475 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
fe2de317
MT
3476diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
3477index 69358b5..17b4745 100644
3478--- a/arch/sparc/include/asm/cache.h
3479+++ b/arch/sparc/include/asm/cache.h
66a7e928
MT
3480@@ -10,7 +10,7 @@
3481 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3482
3483 #define L1_CACHE_SHIFT 5
3484-#define L1_CACHE_BYTES 32
15a11c5b 3485+#define L1_CACHE_BYTES 32UL
66a7e928
MT
3486
3487 #ifdef CONFIG_SPARC32
3488 #define SMP_CACHE_BYTES_SHIFT 5
fe2de317
MT
3489diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
3490index 4269ca6..e3da77f 100644
3491--- a/arch/sparc/include/asm/elf_32.h
3492+++ b/arch/sparc/include/asm/elf_32.h
ae4e228f 3493@@ -114,6 +114,13 @@ typedef struct {
58c5fc13
MT
3494
3495 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3496
3497+#ifdef CONFIG_PAX_ASLR
3498+#define PAX_ELF_ET_DYN_BASE 0x10000UL
3499+
3500+#define PAX_DELTA_MMAP_LEN 16
3501+#define PAX_DELTA_STACK_LEN 16
3502+#endif
3503+
3504 /* This yields a mask that user programs can use to figure out what
3505 instruction set this cpu supports. This can NOT be done in userspace
3506 on Sparc. */
fe2de317
MT
3507diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
3508index 7df8b7f..4946269 100644
3509--- a/arch/sparc/include/asm/elf_64.h
3510+++ b/arch/sparc/include/asm/elf_64.h
15a11c5b 3511@@ -180,6 +180,13 @@ typedef struct {
58c5fc13
MT
3512 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3513 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3514
3515+#ifdef CONFIG_PAX_ASLR
3516+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3517+
ae4e228f
MT
3518+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3519+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
58c5fc13 3520+#endif
15a11c5b
MT
3521+
3522 extern unsigned long sparc64_elf_hwcap;
3523 #define ELF_HWCAP sparc64_elf_hwcap
58c5fc13 3524
fe2de317
MT
3525diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
3526index 5b31a8e..1d92567 100644
3527--- a/arch/sparc/include/asm/pgtable_32.h
3528+++ b/arch/sparc/include/asm/pgtable_32.h
15a11c5b 3529@@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
58c5fc13
MT
3530 BTFIXUPDEF_INT(page_none)
3531 BTFIXUPDEF_INT(page_copy)
3532 BTFIXUPDEF_INT(page_readonly)
3533+
3534+#ifdef CONFIG_PAX_PAGEEXEC
3535+BTFIXUPDEF_INT(page_shared_noexec)
3536+BTFIXUPDEF_INT(page_copy_noexec)
3537+BTFIXUPDEF_INT(page_readonly_noexec)
3538+#endif
3539+
3540 BTFIXUPDEF_INT(page_kernel)
3541
3542 #define PMD_SHIFT SUN4C_PMD_SHIFT
15a11c5b 3543@@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
58c5fc13
MT
3544 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3545 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3546
3547+#ifdef CONFIG_PAX_PAGEEXEC
3548+extern pgprot_t PAGE_SHARED_NOEXEC;
3549+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3550+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3551+#else
3552+# define PAGE_SHARED_NOEXEC PAGE_SHARED
3553+# define PAGE_COPY_NOEXEC PAGE_COPY
3554+# define PAGE_READONLY_NOEXEC PAGE_READONLY
3555+#endif
3556+
3557 extern unsigned long page_kernel;
3558
3559 #ifdef MODULE
fe2de317
MT
3560diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
3561index f6ae2b2..b03ffc7 100644
3562--- a/arch/sparc/include/asm/pgtsrmmu.h
3563+++ b/arch/sparc/include/asm/pgtsrmmu.h
58c5fc13
MT
3564@@ -115,6 +115,13 @@
3565 SRMMU_EXEC | SRMMU_REF)
3566 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3567 SRMMU_EXEC | SRMMU_REF)
3568+
3569+#ifdef CONFIG_PAX_PAGEEXEC
3570+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3571+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3572+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3573+#endif
3574+
3575 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3576 SRMMU_DIRTY | SRMMU_REF)
3577
fe2de317
MT
3578diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
3579index 9689176..63c18ea 100644
3580--- a/arch/sparc/include/asm/spinlock_64.h
3581+++ b/arch/sparc/include/asm/spinlock_64.h
3582@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
8308f9c9
MT
3583
3584 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3585
3586-static void inline arch_read_lock(arch_rwlock_t *lock)
3587+static inline void arch_read_lock(arch_rwlock_t *lock)
3588 {
3589 unsigned long tmp1, tmp2;
3590
58c5fc13
MT
3591 __asm__ __volatile__ (
3592 "1: ldsw [%2], %0\n"
3593 " brlz,pn %0, 2f\n"
3594-"4: add %0, 1, %1\n"
3595+"4: addcc %0, 1, %1\n"
3596+
3597+#ifdef CONFIG_PAX_REFCOUNT
3598+" tvs %%icc, 6\n"
3599+#endif
3600+
3601 " cas [%2], %0, %1\n"
3602 " cmp %0, %1\n"
3603 " bne,pn %%icc, 1b\n"
fe2de317 3604@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
58c5fc13
MT
3605 " .previous"
3606 : "=&r" (tmp1), "=&r" (tmp2)
3607 : "r" (lock)
3608- : "memory");
3609+ : "memory", "cc");
3610 }
3611
8308f9c9
MT
3612-static int inline arch_read_trylock(arch_rwlock_t *lock)
3613+static inline int arch_read_trylock(arch_rwlock_t *lock)
3614 {
3615 int tmp1, tmp2;
3616
fe2de317 3617@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
58c5fc13
MT
3618 "1: ldsw [%2], %0\n"
3619 " brlz,a,pn %0, 2f\n"
3620 " mov 0, %0\n"
3621-" add %0, 1, %1\n"
3622+" addcc %0, 1, %1\n"
3623+
3624+#ifdef CONFIG_PAX_REFCOUNT
3625+" tvs %%icc, 6\n"
3626+#endif
3627+
3628 " cas [%2], %0, %1\n"
3629 " cmp %0, %1\n"
3630 " bne,pn %%icc, 1b\n"
fe2de317 3631@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
8308f9c9
MT
3632 return tmp1;
3633 }
3634
3635-static void inline arch_read_unlock(arch_rwlock_t *lock)
3636+static inline void arch_read_unlock(arch_rwlock_t *lock)
3637 {
3638 unsigned long tmp1, tmp2;
58c5fc13
MT
3639
3640 __asm__ __volatile__(
3641 "1: lduw [%2], %0\n"
3642-" sub %0, 1, %1\n"
3643+" subcc %0, 1, %1\n"
3644+
3645+#ifdef CONFIG_PAX_REFCOUNT
ae4e228f 3646+" tvs %%icc, 6\n"
58c5fc13
MT
3647+#endif
3648+
3649 " cas [%2], %0, %1\n"
3650 " cmp %0, %1\n"
3651 " bne,pn %%xcc, 1b\n"
fe2de317 3652@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
8308f9c9
MT
3653 : "memory");
3654 }
3655
3656-static void inline arch_write_lock(arch_rwlock_t *lock)
3657+static inline void arch_write_lock(arch_rwlock_t *lock)
3658 {
3659 unsigned long mask, tmp1, tmp2;
3660
fe2de317 3661@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
8308f9c9
MT
3662 : "memory");
3663 }
3664
3665-static void inline arch_write_unlock(arch_rwlock_t *lock)
3666+static inline void arch_write_unlock(arch_rwlock_t *lock)
3667 {
3668 __asm__ __volatile__(
3669 " stw %%g0, [%0]"
fe2de317 3670@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
8308f9c9
MT
3671 : "memory");
3672 }
3673
3674-static int inline arch_write_trylock(arch_rwlock_t *lock)
3675+static inline int arch_write_trylock(arch_rwlock_t *lock)
3676 {
3677 unsigned long mask, tmp1, tmp2, result;
3678
fe2de317
MT
3679diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
3680index fa57532..e1a4c53 100644
3681--- a/arch/sparc/include/asm/thread_info_32.h
3682+++ b/arch/sparc/include/asm/thread_info_32.h
15a11c5b
MT
3683@@ -50,6 +50,8 @@ struct thread_info {
3684 unsigned long w_saved;
3685
3686 struct restart_block restart_block;
3687+
3688+ unsigned long lowest_stack;
3689 };
3690
3691 /*
fe2de317
MT
3692diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
3693index 60d86be..952dea1 100644
3694--- a/arch/sparc/include/asm/thread_info_64.h
3695+++ b/arch/sparc/include/asm/thread_info_64.h
15a11c5b
MT
3696@@ -63,6 +63,8 @@ struct thread_info {
3697 struct pt_regs *kern_una_regs;
3698 unsigned int kern_una_insn;
3699
3700+ unsigned long lowest_stack;
3701+
3702 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3703 };
3704
fe2de317
MT
3705diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
3706index e88fbe5..96b0ce5 100644
3707--- a/arch/sparc/include/asm/uaccess.h
3708+++ b/arch/sparc/include/asm/uaccess.h
3709@@ -1,5 +1,13 @@
3710 #ifndef ___ASM_SPARC_UACCESS_H
3711 #define ___ASM_SPARC_UACCESS_H
3712+
3713+#ifdef __KERNEL__
3714+#ifndef __ASSEMBLY__
3715+#include <linux/types.h>
3716+extern void check_object_size(const void *ptr, unsigned long n, bool to);
3717+#endif
3718+#endif
3719+
3720 #if defined(__sparc__) && defined(__arch64__)
3721 #include <asm/uaccess_64.h>
3722 #else
3723diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
3724index 8303ac4..07f333d 100644
3725--- a/arch/sparc/include/asm/uaccess_32.h
3726+++ b/arch/sparc/include/asm/uaccess_32.h
3727@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
58c5fc13
MT
3728
3729 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3730 {
3731- if (n && __access_ok((unsigned long) to, n))
3732+ if ((long)n < 0)
3733+ return n;
3734+
3735+ if (n && __access_ok((unsigned long) to, n)) {
3736+ if (!__builtin_constant_p(n))
3737+ check_object_size(from, n, true);
3738 return __copy_user(to, (__force void __user *) from, n);
3739- else
3740+ } else
3741 return n;
3742 }
3743
3744 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3745 {
3746+ if ((long)n < 0)
3747+ return n;
3748+
3749+ if (!__builtin_constant_p(n))
3750+ check_object_size(from, n, true);
3751+
3752 return __copy_user(to, (__force void __user *) from, n);
3753 }
3754
6892158b 3755 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
58c5fc13 3756 {
6892158b 3757- if (n && __access_ok((unsigned long) from, n))
58c5fc13
MT
3758+ if ((long)n < 0)
3759+ return n;
3760+
3761+ if (n && __access_ok((unsigned long) from, n)) {
3762+ if (!__builtin_constant_p(n))
3763+ check_object_size(to, n, false);
3764 return __copy_user((__force void __user *) to, from, n);
3765- else
3766+ } else
3767 return n;
3768 }
3769
3770 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3771 {
3772+ if ((long)n < 0)
3773+ return n;
58c5fc13
MT
3774+
3775 return __copy_user((__force void __user *) to, from, n);
3776 }
3777
fe2de317
MT
3778diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
3779index 3e1449f..5293a0e 100644
3780--- a/arch/sparc/include/asm/uaccess_64.h
3781+++ b/arch/sparc/include/asm/uaccess_64.h
ae4e228f
MT
3782@@ -10,6 +10,7 @@
3783 #include <linux/compiler.h>
3784 #include <linux/string.h>
3785 #include <linux/thread_info.h>
3786+#include <linux/kernel.h>
3787 #include <asm/asi.h>
3788 #include <asm/system.h>
3789 #include <asm/spitfire.h>
fe2de317 3790@@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
6892158b
MT
3791 static inline unsigned long __must_check
3792 copy_from_user(void *to, const void __user *from, unsigned long size)
3793 {
3794- unsigned long ret = ___copy_from_user(to, from, size);
3795+ unsigned long ret;
ae4e228f
MT
3796
3797+ if ((long)size < 0 || size > INT_MAX)
58c5fc13
MT
3798+ return size;
3799+
3800+ if (!__builtin_constant_p(size))
3801+ check_object_size(to, size, false);
3802+
6892158b
MT
3803+ ret = ___copy_from_user(to, from, size);
3804 if (unlikely(ret))
3805 ret = copy_from_user_fixup(to, from, size);
3806
fe2de317 3807@@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
58c5fc13
MT
3808 static inline unsigned long __must_check
3809 copy_to_user(void __user *to, const void *from, unsigned long size)
3810 {
3811- unsigned long ret = ___copy_to_user(to, from, size);
3812+ unsigned long ret;
fe2de317 3813
ae4e228f 3814+ if ((long)size < 0 || size > INT_MAX)
58c5fc13
MT
3815+ return size;
3816+
3817+ if (!__builtin_constant_p(size))
3818+ check_object_size(from, size, true);
fe2de317 3819+
ae4e228f 3820+ ret = ___copy_to_user(to, from, size);
58c5fc13
MT
3821 if (unlikely(ret))
3822 ret = copy_to_user_fixup(to, from, size);
ae4e228f 3823 return ret;
fe2de317
MT
3824diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
3825index cb85458..e063f17 100644
3826--- a/arch/sparc/kernel/Makefile
3827+++ b/arch/sparc/kernel/Makefile
57199397
MT
3828@@ -3,7 +3,7 @@
3829 #
3830
3831 asflags-y := -ansi
3832-ccflags-y := -Werror
3833+#ccflags-y := -Werror
3834
3835 extra-y := head_$(BITS).o
3836 extra-y += init_task.o
fe2de317
MT
3837diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
3838index f793742..4d880af 100644
3839--- a/arch/sparc/kernel/process_32.c
3840+++ b/arch/sparc/kernel/process_32.c
15a11c5b 3841@@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
bc901d79
MT
3842 rw->ins[4], rw->ins[5],
3843 rw->ins[6],
3844 rw->ins[7]);
3845- printk("%pS\n", (void *) rw->ins[7]);
3846+ printk("%pA\n", (void *) rw->ins[7]);
3847 rw = (struct reg_window32 *) rw->ins[6];
3848 }
3849 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
15a11c5b 3850@@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
bc901d79
MT
3851
3852 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
3853 r->psr, r->pc, r->npc, r->y, print_tainted());
3854- printk("PC: <%pS>\n", (void *) r->pc);
3855+ printk("PC: <%pA>\n", (void *) r->pc);
3856 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3857 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
3858 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
3859 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3860 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
3861 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
3862- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
3863+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
3864
3865 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3866 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
fe2de317 3867@@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
bc901d79
MT
3868 rw = (struct reg_window32 *) fp;
3869 pc = rw->ins[7];
3870 printk("[%08lx : ", pc);
3871- printk("%pS ] ", (void *) pc);
3872+ printk("%pA ] ", (void *) pc);
3873 fp = rw->ins[6];
3874 } while (++count < 16);
3875 printk("\n");
fe2de317
MT
3876diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
3877index d959cd0..7b42812 100644
3878--- a/arch/sparc/kernel/process_64.c
3879+++ b/arch/sparc/kernel/process_64.c
3880@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs)
bc901d79
MT
3881 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
3882 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
3883 if (regs->tstate & TSTATE_PRIV)
3884- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
3885+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
3886 }
3887
3888 void show_regs(struct pt_regs *regs)
3889 {
3890 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
3891 regs->tpc, regs->tnpc, regs->y, print_tainted());
3892- printk("TPC: <%pS>\n", (void *) regs->tpc);
3893+ printk("TPC: <%pA>\n", (void *) regs->tpc);
3894 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
3895 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
3896 regs->u_regs[3]);
3897@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
3898 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
3899 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
3900 regs->u_regs[15]);
3901- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
3902+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
3903 show_regwindow(regs);
3904 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
3905 }
fe2de317 3906@@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void)
bc901d79
MT
3907 ((tp && tp->task) ? tp->task->pid : -1));
3908
3909 if (gp->tstate & TSTATE_PRIV) {
3910- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
3911+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
3912 (void *) gp->tpc,
3913 (void *) gp->o7,
3914 (void *) gp->i7,
fe2de317
MT
3915diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
3916index 42b282f..28ce9f2 100644
3917--- a/arch/sparc/kernel/sys_sparc_32.c
3918+++ b/arch/sparc/kernel/sys_sparc_32.c
3919@@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
58c5fc13
MT
3920 if (ARCH_SUN4C && len > 0x20000000)
3921 return -ENOMEM;
3922 if (!addr)
3923- addr = TASK_UNMAPPED_BASE;
3924+ addr = current->mm->mmap_base;
3925
3926 if (flags & MAP_SHARED)
3927 addr = COLOUR_ALIGN(addr);
fe2de317 3928@@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
57199397
MT
3929 }
3930 if (TASK_SIZE - PAGE_SIZE - len < addr)
3931 return -ENOMEM;
3932- if (!vmm || addr + len <= vmm->vm_start)
3933+ if (check_heap_stack_gap(vmm, addr, len))
3934 return addr;
3935 addr = vmm->vm_end;
3936 if (flags & MAP_SHARED)
fe2de317
MT
3937diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
3938index 908b47a..aa9e584 100644
3939--- a/arch/sparc/kernel/sys_sparc_64.c
3940+++ b/arch/sparc/kernel/sys_sparc_64.c
3941@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
58c5fc13
MT
3942 /* We do not accept a shared mapping if it would violate
3943 * cache aliasing constraints.
3944 */
3945- if ((flags & MAP_SHARED) &&
3946+ if ((filp || (flags & MAP_SHARED)) &&
3947 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3948 return -EINVAL;
3949 return addr;
fe2de317 3950@@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
58c5fc13
MT
3951 if (filp || (flags & MAP_SHARED))
3952 do_color_align = 1;
3953
3954+#ifdef CONFIG_PAX_RANDMMAP
3955+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3956+#endif
3957+
3958 if (addr) {
3959 if (do_color_align)
3960 addr = COLOUR_ALIGN(addr, pgoff);
fe2de317 3961@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
57199397
MT
3962 addr = PAGE_ALIGN(addr);
3963
3964 vma = find_vma(mm, addr);
3965- if (task_size - len >= addr &&
3966- (!vma || addr + len <= vma->vm_start))
3967+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3968 return addr;
58c5fc13
MT
3969 }
3970
3971 if (len > mm->cached_hole_size) {
3972- start_addr = addr = mm->free_area_cache;
3973+ start_addr = addr = mm->free_area_cache;
3974 } else {
3975- start_addr = addr = TASK_UNMAPPED_BASE;
3976+ start_addr = addr = mm->mmap_base;
3977 mm->cached_hole_size = 0;
3978 }
3979
57199397 3980@@ -174,14 +177,14 @@ full_search:
58c5fc13
MT
3981 vma = find_vma(mm, VA_EXCLUDE_END);
3982 }
3983 if (unlikely(task_size < addr)) {
3984- if (start_addr != TASK_UNMAPPED_BASE) {
3985- start_addr = addr = TASK_UNMAPPED_BASE;
3986+ if (start_addr != mm->mmap_base) {
3987+ start_addr = addr = mm->mmap_base;
3988 mm->cached_hole_size = 0;
3989 goto full_search;
3990 }
57199397
MT
3991 return -ENOMEM;
3992 }
3993- if (likely(!vma || addr + len <= vma->vm_start)) {
3994+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3995 /*
3996 * Remember the place where we stopped the search:
3997 */
fe2de317 3998@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
58c5fc13
MT
3999 /* We do not accept a shared mapping if it would violate
4000 * cache aliasing constraints.
4001 */
4002- if ((flags & MAP_SHARED) &&
4003+ if ((filp || (flags & MAP_SHARED)) &&
4004 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4005 return -EINVAL;
4006 return addr;
fe2de317 4007@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
57199397
MT
4008 addr = PAGE_ALIGN(addr);
4009
4010 vma = find_vma(mm, addr);
4011- if (task_size - len >= addr &&
4012- (!vma || addr + len <= vma->vm_start))
4013+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4014 return addr;
4015 }
4016
fe2de317 4017@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
57199397
MT
4018 /* make sure it can fit in the remaining address space */
4019 if (likely(addr > len)) {
4020 vma = find_vma(mm, addr-len);
4021- if (!vma || addr <= vma->vm_start) {
4022+ if (check_heap_stack_gap(vma, addr - len, len)) {
4023 /* remember the address as a hint for next time */
4024 return (mm->free_area_cache = addr-len);
4025 }
fe2de317 4026@@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
16454cff
MT
4027 if (unlikely(mm->mmap_base < len))
4028 goto bottomup;
4029
4030- addr = mm->mmap_base-len;
4031- if (do_color_align)
4032- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4033+ addr = mm->mmap_base - len;
4034
4035 do {
4036+ if (do_color_align)
4037+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4038 /*
4039 * Lookup failure means no vma is above this address,
4040 * else if new region fits below vma->vm_start,
57199397
MT
4041 * return with success:
4042 */
4043 vma = find_vma(mm, addr);
4044- if (likely(!vma || addr+len <= vma->vm_start)) {
4045+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4046 /* remember the address as a hint for next time */
4047 return (mm->free_area_cache = addr);
4048 }
fe2de317 4049@@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
16454cff
MT
4050 mm->cached_hole_size = vma->vm_start - addr;
4051
4052 /* try just below the current vma->vm_start */
4053- addr = vma->vm_start-len;
4054- if (do_color_align)
4055- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4056- } while (likely(len < vma->vm_start));
4057+ addr = skip_heap_stack_gap(vma, len);
4058+ } while (!IS_ERR_VALUE(addr));
4059
4060 bottomup:
4061 /*
fe2de317 4062@@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
ae4e228f 4063 gap == RLIM_INFINITY ||
58c5fc13
MT
4064 sysctl_legacy_va_layout) {
4065 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4066+
4067+#ifdef CONFIG_PAX_RANDMMAP
4068+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4069+ mm->mmap_base += mm->delta_mmap;
4070+#endif
4071+
4072 mm->get_unmapped_area = arch_get_unmapped_area;
4073 mm->unmap_area = arch_unmap_area;
4074 } else {
fe2de317 4075@@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
58c5fc13
MT
4076 gap = (task_size / 6 * 5);
4077
4078 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
4079+
4080+#ifdef CONFIG_PAX_RANDMMAP
4081+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4082+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4083+#endif
4084+
4085 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4086 mm->unmap_area = arch_unmap_area_topdown;
4087 }
fe2de317
MT
4088diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
4089index c0490c7..84959d1 100644
4090--- a/arch/sparc/kernel/traps_32.c
4091+++ b/arch/sparc/kernel/traps_32.c
4092@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
15a11c5b
MT
4093 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
4094 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
4095
4096+extern void gr_handle_kernel_exploit(void);
4097+
4098 void die_if_kernel(char *str, struct pt_regs *regs)
4099 {
4100 static int die_counter;
fe2de317 4101@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
bc901d79
MT
4102 count++ < 30 &&
4103 (((unsigned long) rw) >= PAGE_OFFSET) &&
4104 !(((unsigned long) rw) & 0x7)) {
4105- printk("Caller[%08lx]: %pS\n", rw->ins[7],
4106+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
4107 (void *) rw->ins[7]);
4108 rw = (struct reg_window32 *)rw->ins[6];
4109 }
15a11c5b
MT
4110 }
4111 printk("Instruction DUMP:");
4112 instruction_dump ((unsigned long *) regs->pc);
4113- if(regs->psr & PSR_PS)
4114+ if(regs->psr & PSR_PS) {
4115+ gr_handle_kernel_exploit();
4116 do_exit(SIGKILL);
4117+ }
4118 do_exit(SIGSEGV);
4119 }
4120
fe2de317
MT
4121diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
4122index 0cbdaa4..438e4c9 100644
4123--- a/arch/sparc/kernel/traps_64.c
4124+++ b/arch/sparc/kernel/traps_64.c
4125@@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
bc901d79
MT
4126 i + 1,
4127 p->trapstack[i].tstate, p->trapstack[i].tpc,
4128 p->trapstack[i].tnpc, p->trapstack[i].tt);
4129- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
4130+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
4131 }
4132 }
4133
fe2de317 4134@@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
58c5fc13
MT
4135
4136 lvl -= 0x100;
4137 if (regs->tstate & TSTATE_PRIV) {
4138+
4139+#ifdef CONFIG_PAX_REFCOUNT
4140+ if (lvl == 6)
4141+ pax_report_refcount_overflow(regs);
4142+#endif
4143+
4144 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
4145 die_if_kernel(buffer, regs);
4146 }
fe2de317 4147@@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
58c5fc13
MT
4148 void bad_trap_tl1(struct pt_regs *regs, long lvl)
4149 {
4150 char buffer[32];
4151-
4152+
4153 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
4154 0, lvl, SIGTRAP) == NOTIFY_STOP)
4155 return;
4156
4157+#ifdef CONFIG_PAX_REFCOUNT
4158+ if (lvl == 6)
4159+ pax_report_refcount_overflow(regs);
4160+#endif
4161+
4162 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
4163
4164 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
fe2de317 4165@@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
bc901d79
MT
4166 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
4167 printk("%s" "ERROR(%d): ",
4168 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
4169- printk("TPC<%pS>\n", (void *) regs->tpc);
4170+ printk("TPC<%pA>\n", (void *) regs->tpc);
4171 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
4172 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
4173 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
fe2de317 4174@@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
bc901d79
MT
4175 smp_processor_id(),
4176 (type & 0x1) ? 'I' : 'D',
4177 regs->tpc);
4178- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
4179+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
4180 panic("Irrecoverable Cheetah+ parity error.");
4181 }
4182
fe2de317 4183@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
bc901d79
MT
4184 smp_processor_id(),
4185 (type & 0x1) ? 'I' : 'D',
4186 regs->tpc);
4187- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
4188+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
4189 }
4190
4191 struct sun4v_error_entry {
fe2de317 4192@@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
bc901d79
MT
4193
4194 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
4195 regs->tpc, tl);
4196- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
4197+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
4198 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4199- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
4200+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
4201 (void *) regs->u_regs[UREG_I7]);
4202 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
4203 "pte[%lx] error[%lx]\n",
fe2de317 4204@@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
bc901d79
MT
4205
4206 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
4207 regs->tpc, tl);
4208- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
4209+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
4210 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4211- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
4212+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
4213 (void *) regs->u_regs[UREG_I7]);
4214 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
4215 "pte[%lx] error[%lx]\n",
fe2de317 4216@@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
bc901d79
MT
4217 fp = (unsigned long)sf->fp + STACK_BIAS;
4218 }
4219
4220- printk(" [%016lx] %pS\n", pc, (void *) pc);
4221+ printk(" [%016lx] %pA\n", pc, (void *) pc);
4222 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4223 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
4224 int index = tsk->curr_ret_stack;
4225 if (tsk->ret_stack && index >= graph) {
4226 pc = tsk->ret_stack[index - graph].ret;
4227- printk(" [%016lx] %pS\n", pc, (void *) pc);
4228+ printk(" [%016lx] %pA\n", pc, (void *) pc);
4229 graph++;
4230 }
4231 }
fe2de317 4232@@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
15a11c5b
MT
4233 return (struct reg_window *) (fp + STACK_BIAS);
4234 }
4235
4236+extern void gr_handle_kernel_exploit(void);
4237+
4238 void die_if_kernel(char *str, struct pt_regs *regs)
4239 {
4240 static int die_counter;
fe2de317 4241@@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
bc901d79
MT
4242 while (rw &&
4243 count++ < 30 &&
4244 kstack_valid(tp, (unsigned long) rw)) {
4245- printk("Caller[%016lx]: %pS\n", rw->ins[7],
4246+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
4247 (void *) rw->ins[7]);
4248
4249 rw = kernel_stack_up(rw);
fe2de317 4250@@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
15a11c5b
MT
4251 }
4252 user_instruction_dump ((unsigned int __user *) regs->tpc);
4253 }
4254- if (regs->tstate & TSTATE_PRIV)
4255+ if (regs->tstate & TSTATE_PRIV) {
4256+ gr_handle_kernel_exploit();
4257 do_exit(SIGKILL);
4258+ }
4259 do_exit(SIGSEGV);
4260 }
4261 EXPORT_SYMBOL(die_if_kernel);
fe2de317
MT
4262diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
4263index 76e4ac1..78f8bb1 100644
4264--- a/arch/sparc/kernel/unaligned_64.c
4265+++ b/arch/sparc/kernel/unaligned_64.c
4266@@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs *regs)
bc901d79
MT
4267 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
4268
4269 if (__ratelimit(&ratelimit)) {
4270- printk("Kernel unaligned access at TPC[%lx] %pS\n",
4271+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
4272 regs->tpc, (void *) regs->tpc);
4273 }
4274 }
fe2de317
MT
4275diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
4276index a3fc437..fea9957 100644
4277--- a/arch/sparc/lib/Makefile
4278+++ b/arch/sparc/lib/Makefile
4279@@ -2,7 +2,7 @@
4280 #
4281
4282 asflags-y := -ansi -DST_DIV0=0x02
4283-ccflags-y := -Werror
4284+#ccflags-y := -Werror
4285
4286 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4287 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4288diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
4289index 59186e0..f747d7a 100644
4290--- a/arch/sparc/lib/atomic_64.S
4291+++ b/arch/sparc/lib/atomic_64.S
58c5fc13
MT
4292@@ -18,7 +18,12 @@
4293 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4294 BACKOFF_SETUP(%o2)
4295 1: lduw [%o1], %g1
4296- add %g1, %o0, %g7
4297+ addcc %g1, %o0, %g7
4298+
4299+#ifdef CONFIG_PAX_REFCOUNT
4300+ tvs %icc, 6
4301+#endif
4302+
4303 cas [%o1], %g1, %g7
4304 cmp %g1, %g7
6892158b 4305 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
fe2de317 4306@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
58c5fc13
MT
4307 2: BACKOFF_SPIN(%o2, %o3, 1b)
4308 .size atomic_add, .-atomic_add
4309
4310+ .globl atomic_add_unchecked
4311+ .type atomic_add_unchecked,#function
4312+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4313+ BACKOFF_SETUP(%o2)
4314+1: lduw [%o1], %g1
4315+ add %g1, %o0, %g7
4316+ cas [%o1], %g1, %g7
4317+ cmp %g1, %g7
4318+ bne,pn %icc, 2f
4319+ nop
4320+ retl
4321+ nop
4322+2: BACKOFF_SPIN(%o2, %o3, 1b)
4323+ .size atomic_add_unchecked, .-atomic_add_unchecked
4324+
4325 .globl atomic_sub
4326 .type atomic_sub,#function
4327 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4328 BACKOFF_SETUP(%o2)
4329 1: lduw [%o1], %g1
4330- sub %g1, %o0, %g7
4331+ subcc %g1, %o0, %g7
4332+
4333+#ifdef CONFIG_PAX_REFCOUNT
4334+ tvs %icc, 6
4335+#endif
4336+
4337 cas [%o1], %g1, %g7
4338 cmp %g1, %g7
6892158b 4339 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
fe2de317 4340@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
58c5fc13
MT
4341 2: BACKOFF_SPIN(%o2, %o3, 1b)
4342 .size atomic_sub, .-atomic_sub
4343
4344+ .globl atomic_sub_unchecked
4345+ .type atomic_sub_unchecked,#function
4346+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4347+ BACKOFF_SETUP(%o2)
4348+1: lduw [%o1], %g1
4349+ sub %g1, %o0, %g7
4350+ cas [%o1], %g1, %g7
4351+ cmp %g1, %g7
4352+ bne,pn %icc, 2f
4353+ nop
4354+ retl
4355+ nop
4356+2: BACKOFF_SPIN(%o2, %o3, 1b)
4357+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
4358+
4359 .globl atomic_add_ret
4360 .type atomic_add_ret,#function
4361 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4362 BACKOFF_SETUP(%o2)
4363 1: lduw [%o1], %g1
4364- add %g1, %o0, %g7
4365+ addcc %g1, %o0, %g7
4366+
4367+#ifdef CONFIG_PAX_REFCOUNT
4368+ tvs %icc, 6
4369+#endif
4370+
4371 cas [%o1], %g1, %g7
4372 cmp %g1, %g7
6892158b 4373 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
fe2de317 4374@@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
57199397
MT
4375 2: BACKOFF_SPIN(%o2, %o3, 1b)
4376 .size atomic_add_ret, .-atomic_add_ret
4377
4378+ .globl atomic_add_ret_unchecked
4379+ .type atomic_add_ret_unchecked,#function
4380+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4381+ BACKOFF_SETUP(%o2)
4382+1: lduw [%o1], %g1
4383+ addcc %g1, %o0, %g7
4384+ cas [%o1], %g1, %g7
4385+ cmp %g1, %g7
4386+ bne,pn %icc, 2f
4387+ add %g7, %o0, %g7
4388+ sra %g7, 0, %o0
4389+ retl
4390+ nop
4391+2: BACKOFF_SPIN(%o2, %o3, 1b)
4392+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
4393+
4394 .globl atomic_sub_ret
4395 .type atomic_sub_ret,#function
58c5fc13
MT
4396 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4397 BACKOFF_SETUP(%o2)
4398 1: lduw [%o1], %g1
4399- sub %g1, %o0, %g7
4400+ subcc %g1, %o0, %g7
4401+
4402+#ifdef CONFIG_PAX_REFCOUNT
4403+ tvs %icc, 6
4404+#endif
4405+
4406 cas [%o1], %g1, %g7
4407 cmp %g1, %g7
6892158b 4408 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
fe2de317 4409@@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
58c5fc13
MT
4410 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4411 BACKOFF_SETUP(%o2)
4412 1: ldx [%o1], %g1
4413- add %g1, %o0, %g7
4414+ addcc %g1, %o0, %g7
4415+
4416+#ifdef CONFIG_PAX_REFCOUNT
4417+ tvs %xcc, 6
4418+#endif
4419+
4420 casx [%o1], %g1, %g7
4421 cmp %g1, %g7
6892158b 4422 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
fe2de317 4423@@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
ae4e228f
MT
4424 2: BACKOFF_SPIN(%o2, %o3, 1b)
4425 .size atomic64_add, .-atomic64_add
4426
4427+ .globl atomic64_add_unchecked
4428+ .type atomic64_add_unchecked,#function
4429+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4430+ BACKOFF_SETUP(%o2)
4431+1: ldx [%o1], %g1
4432+ addcc %g1, %o0, %g7
4433+ casx [%o1], %g1, %g7
4434+ cmp %g1, %g7
4435+ bne,pn %xcc, 2f
4436+ nop
4437+ retl
4438+ nop
4439+2: BACKOFF_SPIN(%o2, %o3, 1b)
4440+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
4441+
4442 .globl atomic64_sub
4443 .type atomic64_sub,#function
58c5fc13
MT
4444 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4445 BACKOFF_SETUP(%o2)
4446 1: ldx [%o1], %g1
4447- sub %g1, %o0, %g7
4448+ subcc %g1, %o0, %g7
4449+
4450+#ifdef CONFIG_PAX_REFCOUNT
4451+ tvs %xcc, 6
4452+#endif
4453+
4454 casx [%o1], %g1, %g7
4455 cmp %g1, %g7
6892158b 4456 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
fe2de317 4457@@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
df50ba0c
MT
4458 2: BACKOFF_SPIN(%o2, %o3, 1b)
4459 .size atomic64_sub, .-atomic64_sub
4460
4461+ .globl atomic64_sub_unchecked
4462+ .type atomic64_sub_unchecked,#function
4463+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4464+ BACKOFF_SETUP(%o2)
4465+1: ldx [%o1], %g1
4466+ subcc %g1, %o0, %g7
4467+ casx [%o1], %g1, %g7
4468+ cmp %g1, %g7
4469+ bne,pn %xcc, 2f
4470+ nop
4471+ retl
4472+ nop
4473+2: BACKOFF_SPIN(%o2, %o3, 1b)
4474+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
4475+
4476 .globl atomic64_add_ret
4477 .type atomic64_add_ret,#function
58c5fc13
MT
4478 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4479 BACKOFF_SETUP(%o2)
4480 1: ldx [%o1], %g1
4481- add %g1, %o0, %g7
4482+ addcc %g1, %o0, %g7
4483+
4484+#ifdef CONFIG_PAX_REFCOUNT
4485+ tvs %xcc, 6
4486+#endif
4487+
4488 casx [%o1], %g1, %g7
4489 cmp %g1, %g7
6892158b 4490 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
fe2de317 4491@@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
ae4e228f
MT
4492 2: BACKOFF_SPIN(%o2, %o3, 1b)
4493 .size atomic64_add_ret, .-atomic64_add_ret
4494
4495+ .globl atomic64_add_ret_unchecked
4496+ .type atomic64_add_ret_unchecked,#function
4497+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4498+ BACKOFF_SETUP(%o2)
4499+1: ldx [%o1], %g1
4500+ addcc %g1, %o0, %g7
4501+ casx [%o1], %g1, %g7
4502+ cmp %g1, %g7
4503+ bne,pn %xcc, 2f
4504+ add %g7, %o0, %g7
4505+ mov %g7, %o0
4506+ retl
4507+ nop
4508+2: BACKOFF_SPIN(%o2, %o3, 1b)
4509+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4510+
4511 .globl atomic64_sub_ret
4512 .type atomic64_sub_ret,#function
58c5fc13
MT
4513 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4514 BACKOFF_SETUP(%o2)
4515 1: ldx [%o1], %g1
4516- sub %g1, %o0, %g7
4517+ subcc %g1, %o0, %g7
4518+
4519+#ifdef CONFIG_PAX_REFCOUNT
4520+ tvs %xcc, 6
4521+#endif
4522+
4523 casx [%o1], %g1, %g7
4524 cmp %g1, %g7
6892158b 4525 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
fe2de317
MT
4526diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
4527index 1b30bb3..b4a16c7 100644
4528--- a/arch/sparc/lib/ksyms.c
4529+++ b/arch/sparc/lib/ksyms.c
15a11c5b 4530@@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write);
58c5fc13
MT
4531
4532 /* Atomic counter implementation. */
4533 EXPORT_SYMBOL(atomic_add);
4534+EXPORT_SYMBOL(atomic_add_unchecked);
4535 EXPORT_SYMBOL(atomic_add_ret);
15a11c5b 4536+EXPORT_SYMBOL(atomic_add_ret_unchecked);
58c5fc13
MT
4537 EXPORT_SYMBOL(atomic_sub);
4538+EXPORT_SYMBOL(atomic_sub_unchecked);
4539 EXPORT_SYMBOL(atomic_sub_ret);
4540 EXPORT_SYMBOL(atomic64_add);
57199397 4541+EXPORT_SYMBOL(atomic64_add_unchecked);
58c5fc13 4542 EXPORT_SYMBOL(atomic64_add_ret);
57199397 4543+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
df50ba0c
MT
4544 EXPORT_SYMBOL(atomic64_sub);
4545+EXPORT_SYMBOL(atomic64_sub_unchecked);
4546 EXPORT_SYMBOL(atomic64_sub_ret);
4547
4548 /* Atomic bit operations. */
fe2de317
MT
4549diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
4550index e3cda21..a68e4cb 100644
4551--- a/arch/sparc/mm/Makefile
4552+++ b/arch/sparc/mm/Makefile
66a7e928
MT
4553@@ -2,7 +2,7 @@
4554 #
4555
fe2de317 4556 asflags-y := -ansi
66a7e928
MT
4557-ccflags-y := -Werror
4558+#ccflags-y := -Werror
4559
fe2de317
MT
4560 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
4561 obj-y += fault_$(BITS).o
4562diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
4563index aa1c1b1..f93e28f 100644
4564--- a/arch/sparc/mm/fault_32.c
4565+++ b/arch/sparc/mm/fault_32.c
ae4e228f 4566@@ -22,6 +22,9 @@
58c5fc13
MT
4567 #include <linux/interrupt.h>
4568 #include <linux/module.h>
4569 #include <linux/kdebug.h>
4570+#include <linux/slab.h>
4571+#include <linux/pagemap.h>
4572+#include <linux/compiler.h>
4573
4574 #include <asm/system.h>
4575 #include <asm/page.h>
fe2de317 4576@@ -209,6 +212,268 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
58c5fc13
MT
4577 return safe_compute_effective_address(regs, insn);
4578 }
4579
4580+#ifdef CONFIG_PAX_PAGEEXEC
4581+#ifdef CONFIG_PAX_DLRESOLVE
ae4e228f 4582+static void pax_emuplt_close(struct vm_area_struct *vma)
58c5fc13
MT
4583+{
4584+ vma->vm_mm->call_dl_resolve = 0UL;
4585+}
4586+
4587+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4588+{
4589+ unsigned int *kaddr;
4590+
4591+ vmf->page = alloc_page(GFP_HIGHUSER);
4592+ if (!vmf->page)
4593+ return VM_FAULT_OOM;
4594+
4595+ kaddr = kmap(vmf->page);
4596+ memset(kaddr, 0, PAGE_SIZE);
4597+ kaddr[0] = 0x9DE3BFA8U; /* save */
4598+ flush_dcache_page(vmf->page);
4599+ kunmap(vmf->page);
4600+ return VM_FAULT_MAJOR;
4601+}
4602+
4603+static const struct vm_operations_struct pax_vm_ops = {
4604+ .close = pax_emuplt_close,
4605+ .fault = pax_emuplt_fault
4606+};
4607+
4608+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4609+{
4610+ int ret;
4611+
df50ba0c 4612+ INIT_LIST_HEAD(&vma->anon_vma_chain);
58c5fc13
MT
4613+ vma->vm_mm = current->mm;
4614+ vma->vm_start = addr;
4615+ vma->vm_end = addr + PAGE_SIZE;
4616+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4617+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4618+ vma->vm_ops = &pax_vm_ops;
4619+
4620+ ret = insert_vm_struct(current->mm, vma);
4621+ if (ret)
4622+ return ret;
4623+
4624+ ++current->mm->total_vm;
4625+ return 0;
4626+}
4627+#endif
4628+
4629+/*
4630+ * PaX: decide what to do with offenders (regs->pc = fault address)
4631+ *
4632+ * returns 1 when task should be killed
4633+ * 2 when patched PLT trampoline was detected
4634+ * 3 when unpatched PLT trampoline was detected
4635+ */
4636+static int pax_handle_fetch_fault(struct pt_regs *regs)
4637+{
4638+
4639+#ifdef CONFIG_PAX_EMUPLT
4640+ int err;
4641+
4642+ do { /* PaX: patched PLT emulation #1 */
4643+ unsigned int sethi1, sethi2, jmpl;
4644+
4645+ err = get_user(sethi1, (unsigned int *)regs->pc);
4646+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4647+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4648+
4649+ if (err)
4650+ break;
4651+
4652+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4653+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4654+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4655+ {
4656+ unsigned int addr;
4657+
4658+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4659+ addr = regs->u_regs[UREG_G1];
4660+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4661+ regs->pc = addr;
4662+ regs->npc = addr+4;
4663+ return 2;
4664+ }
4665+ } while (0);
4666+
4667+ { /* PaX: patched PLT emulation #2 */
4668+ unsigned int ba;
4669+
4670+ err = get_user(ba, (unsigned int *)regs->pc);
4671+
4672+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4673+ unsigned int addr;
4674+
4675+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4676+ regs->pc = addr;
4677+ regs->npc = addr+4;
4678+ return 2;
4679+ }
4680+ }
4681+
4682+ do { /* PaX: patched PLT emulation #3 */
4683+ unsigned int sethi, jmpl, nop;
4684+
4685+ err = get_user(sethi, (unsigned int *)regs->pc);
4686+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4687+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4688+
4689+ if (err)
4690+ break;
4691+
4692+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4693+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4694+ nop == 0x01000000U)
4695+ {
4696+ unsigned int addr;
4697+
4698+ addr = (sethi & 0x003FFFFFU) << 10;
4699+ regs->u_regs[UREG_G1] = addr;
4700+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4701+ regs->pc = addr;
4702+ regs->npc = addr+4;
4703+ return 2;
4704+ }
4705+ } while (0);
4706+
4707+ do { /* PaX: unpatched PLT emulation step 1 */
4708+ unsigned int sethi, ba, nop;
4709+
4710+ err = get_user(sethi, (unsigned int *)regs->pc);
4711+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
4712+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4713+
4714+ if (err)
4715+ break;
4716+
4717+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4718+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4719+ nop == 0x01000000U)
4720+ {
4721+ unsigned int addr, save, call;
4722+
4723+ if ((ba & 0xFFC00000U) == 0x30800000U)
4724+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4725+ else
4726+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4727+
4728+ err = get_user(save, (unsigned int *)addr);
4729+ err |= get_user(call, (unsigned int *)(addr+4));
4730+ err |= get_user(nop, (unsigned int *)(addr+8));
4731+ if (err)
4732+ break;
4733+
4734+#ifdef CONFIG_PAX_DLRESOLVE
4735+ if (save == 0x9DE3BFA8U &&
4736+ (call & 0xC0000000U) == 0x40000000U &&
4737+ nop == 0x01000000U)
4738+ {
4739+ struct vm_area_struct *vma;
4740+ unsigned long call_dl_resolve;
4741+
4742+ down_read(&current->mm->mmap_sem);
4743+ call_dl_resolve = current->mm->call_dl_resolve;
4744+ up_read(&current->mm->mmap_sem);
4745+ if (likely(call_dl_resolve))
4746+ goto emulate;
4747+
4748+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4749+
4750+ down_write(&current->mm->mmap_sem);
4751+ if (current->mm->call_dl_resolve) {
4752+ call_dl_resolve = current->mm->call_dl_resolve;
4753+ up_write(&current->mm->mmap_sem);
4754+ if (vma)
4755+ kmem_cache_free(vm_area_cachep, vma);
4756+ goto emulate;
4757+ }
4758+
4759+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4760+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4761+ up_write(&current->mm->mmap_sem);
4762+ if (vma)
4763+ kmem_cache_free(vm_area_cachep, vma);
4764+ return 1;
4765+ }
4766+
4767+ if (pax_insert_vma(vma, call_dl_resolve)) {
4768+ up_write(&current->mm->mmap_sem);
4769+ kmem_cache_free(vm_area_cachep, vma);
4770+ return 1;
4771+ }
4772+
4773+ current->mm->call_dl_resolve = call_dl_resolve;
4774+ up_write(&current->mm->mmap_sem);
4775+
4776+emulate:
4777+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4778+ regs->pc = call_dl_resolve;
4779+ regs->npc = addr+4;
4780+ return 3;
4781+ }
4782+#endif
4783+
4784+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4785+ if ((save & 0xFFC00000U) == 0x05000000U &&
4786+ (call & 0xFFFFE000U) == 0x85C0A000U &&
4787+ nop == 0x01000000U)
4788+ {
4789+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4790+ regs->u_regs[UREG_G2] = addr + 4;
4791+ addr = (save & 0x003FFFFFU) << 10;
4792+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4793+ regs->pc = addr;
4794+ regs->npc = addr+4;
4795+ return 3;
4796+ }
4797+ }
4798+ } while (0);
4799+
4800+ do { /* PaX: unpatched PLT emulation step 2 */
4801+ unsigned int save, call, nop;
4802+
4803+ err = get_user(save, (unsigned int *)(regs->pc-4));
4804+ err |= get_user(call, (unsigned int *)regs->pc);
4805+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
4806+ if (err)
4807+ break;
4808+
4809+ if (save == 0x9DE3BFA8U &&
4810+ (call & 0xC0000000U) == 0x40000000U &&
4811+ nop == 0x01000000U)
4812+ {
4813+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
4814+
4815+ regs->u_regs[UREG_RETPC] = regs->pc;
4816+ regs->pc = dl_resolve;
4817+ regs->npc = dl_resolve+4;
4818+ return 3;
4819+ }
4820+ } while (0);
4821+#endif
4822+
4823+ return 1;
4824+}
4825+
6e9df6a3 4826+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
58c5fc13
MT
4827+{
4828+ unsigned long i;
4829+
4830+ printk(KERN_ERR "PAX: bytes at PC: ");
ae4e228f 4831+ for (i = 0; i < 8; i++) {
58c5fc13
MT
4832+ unsigned int c;
4833+ if (get_user(c, (unsigned int *)pc+i))
4834+ printk(KERN_CONT "???????? ");
4835+ else
4836+ printk(KERN_CONT "%08x ", c);
4837+ }
4838+ printk("\n");
4839+}
4840+#endif
4841+
df50ba0c
MT
4842 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
4843 int text_fault)
58c5fc13 4844 {
71d190be 4845@@ -281,6 +546,24 @@ good_area:
58c5fc13
MT
4846 if(!(vma->vm_flags & VM_WRITE))
4847 goto bad_area;
4848 } else {
4849+
4850+#ifdef CONFIG_PAX_PAGEEXEC
4851+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
4852+ up_read(&mm->mmap_sem);
4853+ switch (pax_handle_fetch_fault(regs)) {
4854+
4855+#ifdef CONFIG_PAX_EMUPLT
4856+ case 2:
4857+ case 3:
4858+ return;
4859+#endif
4860+
4861+ }
4862+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
4863+ do_group_exit(SIGKILL);
4864+ }
4865+#endif
4866+
4867 /* Allow reads even for write-only mappings */
4868 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
4869 goto bad_area;
fe2de317
MT
4870diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
4871index 504c062..6fcb9c6 100644
4872--- a/arch/sparc/mm/fault_64.c
4873+++ b/arch/sparc/mm/fault_64.c
ae4e228f 4874@@ -21,6 +21,9 @@
58c5fc13
MT
4875 #include <linux/kprobes.h>
4876 #include <linux/kdebug.h>
4877 #include <linux/percpu.h>
4878+#include <linux/slab.h>
4879+#include <linux/pagemap.h>
4880+#include <linux/compiler.h>
4881
4882 #include <asm/page.h>
4883 #include <asm/pgtable.h>
fe2de317 4884@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
bc901d79
MT
4885 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
4886 regs->tpc);
4887 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
4888- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
4889+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
4890 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
4891 dump_stack();
4892 unhandled_fault(regs->tpc, current, regs);
fe2de317 4893@@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
58c5fc13
MT
4894 show_regs(regs);
4895 }
4896
4897+#ifdef CONFIG_PAX_PAGEEXEC
4898+#ifdef CONFIG_PAX_DLRESOLVE
4899+static void pax_emuplt_close(struct vm_area_struct *vma)
4900+{
4901+ vma->vm_mm->call_dl_resolve = 0UL;
4902+}
4903+
4904+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4905+{
4906+ unsigned int *kaddr;
4907+
4908+ vmf->page = alloc_page(GFP_HIGHUSER);
4909+ if (!vmf->page)
4910+ return VM_FAULT_OOM;
4911+
4912+ kaddr = kmap(vmf->page);
4913+ memset(kaddr, 0, PAGE_SIZE);
4914+ kaddr[0] = 0x9DE3BFA8U; /* save */
4915+ flush_dcache_page(vmf->page);
4916+ kunmap(vmf->page);
4917+ return VM_FAULT_MAJOR;
4918+}
4919+
4920+static const struct vm_operations_struct pax_vm_ops = {
4921+ .close = pax_emuplt_close,
4922+ .fault = pax_emuplt_fault
4923+};
4924+
4925+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4926+{
4927+ int ret;
4928+
df50ba0c 4929+ INIT_LIST_HEAD(&vma->anon_vma_chain);
58c5fc13
MT
4930+ vma->vm_mm = current->mm;
4931+ vma->vm_start = addr;
4932+ vma->vm_end = addr + PAGE_SIZE;
4933+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4934+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4935+ vma->vm_ops = &pax_vm_ops;
4936+
4937+ ret = insert_vm_struct(current->mm, vma);
4938+ if (ret)
4939+ return ret;
4940+
4941+ ++current->mm->total_vm;
4942+ return 0;
4943+}
4944+#endif
4945+
4946+/*
4947+ * PaX: decide what to do with offenders (regs->tpc = fault address)
4948+ *
4949+ * returns 1 when task should be killed
4950+ * 2 when patched PLT trampoline was detected
4951+ * 3 when unpatched PLT trampoline was detected
4952+ */
4953+static int pax_handle_fetch_fault(struct pt_regs *regs)
4954+{
4955+
4956+#ifdef CONFIG_PAX_EMUPLT
4957+ int err;
4958+
4959+ do { /* PaX: patched PLT emulation #1 */
4960+ unsigned int sethi1, sethi2, jmpl;
4961+
4962+ err = get_user(sethi1, (unsigned int *)regs->tpc);
4963+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
4964+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
4965+
4966+ if (err)
4967+ break;
4968+
4969+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4970+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4971+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4972+ {
4973+ unsigned long addr;
4974+
4975+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4976+ addr = regs->u_regs[UREG_G1];
4977+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4978+
4979+ if (test_thread_flag(TIF_32BIT))
4980+ addr &= 0xFFFFFFFFUL;
4981+
4982+ regs->tpc = addr;
4983+ regs->tnpc = addr+4;
4984+ return 2;
4985+ }
4986+ } while (0);
4987+
4988+ { /* PaX: patched PLT emulation #2 */
4989+ unsigned int ba;
4990+
4991+ err = get_user(ba, (unsigned int *)regs->tpc);
4992+
4993+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4994+ unsigned long addr;
4995+
4996+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4997+
4998+ if (test_thread_flag(TIF_32BIT))
4999+ addr &= 0xFFFFFFFFUL;
5000+
5001+ regs->tpc = addr;
5002+ regs->tnpc = addr+4;
5003+ return 2;
5004+ }
5005+ }
5006+
5007+ do { /* PaX: patched PLT emulation #3 */
5008+ unsigned int sethi, jmpl, nop;
5009+
5010+ err = get_user(sethi, (unsigned int *)regs->tpc);
5011+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
5012+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5013+
5014+ if (err)
5015+ break;
5016+
5017+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5018+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5019+ nop == 0x01000000U)
5020+ {
5021+ unsigned long addr;
5022+
5023+ addr = (sethi & 0x003FFFFFU) << 10;
5024+ regs->u_regs[UREG_G1] = addr;
5025+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5026+
5027+ if (test_thread_flag(TIF_32BIT))
5028+ addr &= 0xFFFFFFFFUL;
5029+
5030+ regs->tpc = addr;
5031+ regs->tnpc = addr+4;
5032+ return 2;
5033+ }
5034+ } while (0);
5035+
5036+ do { /* PaX: patched PLT emulation #4 */
ae4e228f 5037+ unsigned int sethi, mov1, call, mov2;
58c5fc13 5038+
ae4e228f
MT
5039+ err = get_user(sethi, (unsigned int *)regs->tpc);
5040+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
5041+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
5042+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
58c5fc13
MT
5043+
5044+ if (err)
5045+ break;
5046+
ae4e228f
MT
5047+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5048+ mov1 == 0x8210000FU &&
58c5fc13
MT
5049+ (call & 0xC0000000U) == 0x40000000U &&
5050+ mov2 == 0x9E100001U)
5051+ {
5052+ unsigned long addr;
5053+
5054+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
5055+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5056+
5057+ if (test_thread_flag(TIF_32BIT))
5058+ addr &= 0xFFFFFFFFUL;
5059+
5060+ regs->tpc = addr;
5061+ regs->tnpc = addr+4;
5062+ return 2;
5063+ }
5064+ } while (0);
5065+
5066+ do { /* PaX: patched PLT emulation #5 */
ae4e228f 5067+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
58c5fc13 5068+
ae4e228f
MT
5069+ err = get_user(sethi, (unsigned int *)regs->tpc);
5070+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5071+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5072+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
5073+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
5074+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
5075+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
5076+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
58c5fc13
MT
5077+
5078+ if (err)
5079+ break;
5080+
ae4e228f
MT
5081+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5082+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
58c5fc13
MT
5083+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5084+ (or1 & 0xFFFFE000U) == 0x82106000U &&
5085+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
ae4e228f 5086+ sllx == 0x83287020U &&
58c5fc13
MT
5087+ jmpl == 0x81C04005U &&
5088+ nop == 0x01000000U)
5089+ {
5090+ unsigned long addr;
5091+
5092+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5093+ regs->u_regs[UREG_G1] <<= 32;
5094+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5095+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5096+ regs->tpc = addr;
5097+ regs->tnpc = addr+4;
5098+ return 2;
5099+ }
5100+ } while (0);
5101+
5102+ do { /* PaX: patched PLT emulation #6 */
ae4e228f 5103+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
58c5fc13 5104+
ae4e228f
MT
5105+ err = get_user(sethi, (unsigned int *)regs->tpc);
5106+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5107+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5108+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
5109+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
5110+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
5111+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
58c5fc13
MT
5112+
5113+ if (err)
5114+ break;
5115+
ae4e228f
MT
5116+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5117+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
58c5fc13 5118+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
ae4e228f 5119+ sllx == 0x83287020U &&
58c5fc13
MT
5120+ (or & 0xFFFFE000U) == 0x8A116000U &&
5121+ jmpl == 0x81C04005U &&
5122+ nop == 0x01000000U)
5123+ {
5124+ unsigned long addr;
5125+
5126+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
5127+ regs->u_regs[UREG_G1] <<= 32;
5128+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
5129+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5130+ regs->tpc = addr;
5131+ regs->tnpc = addr+4;
5132+ return 2;
5133+ }
5134+ } while (0);
5135+
5136+ do { /* PaX: unpatched PLT emulation step 1 */
5137+ unsigned int sethi, ba, nop;
5138+
5139+ err = get_user(sethi, (unsigned int *)regs->tpc);
5140+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5141+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5142+
5143+ if (err)
5144+ break;
5145+
5146+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5147+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5148+ nop == 0x01000000U)
5149+ {
5150+ unsigned long addr;
5151+ unsigned int save, call;
ae4e228f 5152+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
58c5fc13
MT
5153+
5154+ if ((ba & 0xFFC00000U) == 0x30800000U)
5155+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5156+ else
5157+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5158+
5159+ if (test_thread_flag(TIF_32BIT))
5160+ addr &= 0xFFFFFFFFUL;
5161+
5162+ err = get_user(save, (unsigned int *)addr);
5163+ err |= get_user(call, (unsigned int *)(addr+4));
5164+ err |= get_user(nop, (unsigned int *)(addr+8));
5165+ if (err)
5166+ break;
5167+
5168+#ifdef CONFIG_PAX_DLRESOLVE
5169+ if (save == 0x9DE3BFA8U &&
5170+ (call & 0xC0000000U) == 0x40000000U &&
5171+ nop == 0x01000000U)
5172+ {
5173+ struct vm_area_struct *vma;
5174+ unsigned long call_dl_resolve;
5175+
5176+ down_read(&current->mm->mmap_sem);
5177+ call_dl_resolve = current->mm->call_dl_resolve;
5178+ up_read(&current->mm->mmap_sem);
5179+ if (likely(call_dl_resolve))
5180+ goto emulate;
5181+
5182+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5183+
5184+ down_write(&current->mm->mmap_sem);
5185+ if (current->mm->call_dl_resolve) {
5186+ call_dl_resolve = current->mm->call_dl_resolve;
5187+ up_write(&current->mm->mmap_sem);
5188+ if (vma)
5189+ kmem_cache_free(vm_area_cachep, vma);
5190+ goto emulate;
5191+ }
5192+
5193+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5194+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5195+ up_write(&current->mm->mmap_sem);
5196+ if (vma)
5197+ kmem_cache_free(vm_area_cachep, vma);
5198+ return 1;
5199+ }
5200+
5201+ if (pax_insert_vma(vma, call_dl_resolve)) {
5202+ up_write(&current->mm->mmap_sem);
5203+ kmem_cache_free(vm_area_cachep, vma);
5204+ return 1;
5205+ }
5206+
5207+ current->mm->call_dl_resolve = call_dl_resolve;
5208+ up_write(&current->mm->mmap_sem);
5209+
5210+emulate:
5211+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5212+ regs->tpc = call_dl_resolve;
5213+ regs->tnpc = addr+4;
5214+ return 3;
5215+ }
5216+#endif
5217+
5218+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5219+ if ((save & 0xFFC00000U) == 0x05000000U &&
5220+ (call & 0xFFFFE000U) == 0x85C0A000U &&
5221+ nop == 0x01000000U)
5222+ {
5223+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5224+ regs->u_regs[UREG_G2] = addr + 4;
5225+ addr = (save & 0x003FFFFFU) << 10;
5226+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5227+
5228+ if (test_thread_flag(TIF_32BIT))
5229+ addr &= 0xFFFFFFFFUL;
5230+
5231+ regs->tpc = addr;
5232+ regs->tnpc = addr+4;
5233+ return 3;
5234+ }
ae4e228f
MT
5235+
5236+ /* PaX: 64-bit PLT stub */
5237+ err = get_user(sethi1, (unsigned int *)addr);
5238+ err |= get_user(sethi2, (unsigned int *)(addr+4));
5239+ err |= get_user(or1, (unsigned int *)(addr+8));
5240+ err |= get_user(or2, (unsigned int *)(addr+12));
5241+ err |= get_user(sllx, (unsigned int *)(addr+16));
5242+ err |= get_user(add, (unsigned int *)(addr+20));
5243+ err |= get_user(jmpl, (unsigned int *)(addr+24));
5244+ err |= get_user(nop, (unsigned int *)(addr+28));
5245+ if (err)
5246+ break;
5247+
5248+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
5249+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5250+ (or1 & 0xFFFFE000U) == 0x88112000U &&
5251+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
5252+ sllx == 0x89293020U &&
5253+ add == 0x8A010005U &&
5254+ jmpl == 0x89C14000U &&
5255+ nop == 0x01000000U)
5256+ {
5257+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5258+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5259+ regs->u_regs[UREG_G4] <<= 32;
5260+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5261+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
5262+ regs->u_regs[UREG_G4] = addr + 24;
5263+ addr = regs->u_regs[UREG_G5];
5264+ regs->tpc = addr;
5265+ regs->tnpc = addr+4;
5266+ return 3;
5267+ }
58c5fc13
MT
5268+ }
5269+ } while (0);
5270+
5271+#ifdef CONFIG_PAX_DLRESOLVE
5272+ do { /* PaX: unpatched PLT emulation step 2 */
5273+ unsigned int save, call, nop;
5274+
5275+ err = get_user(save, (unsigned int *)(regs->tpc-4));
5276+ err |= get_user(call, (unsigned int *)regs->tpc);
5277+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
5278+ if (err)
5279+ break;
5280+
5281+ if (save == 0x9DE3BFA8U &&
5282+ (call & 0xC0000000U) == 0x40000000U &&
5283+ nop == 0x01000000U)
5284+ {
5285+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5286+
5287+ if (test_thread_flag(TIF_32BIT))
5288+ dl_resolve &= 0xFFFFFFFFUL;
5289+
5290+ regs->u_regs[UREG_RETPC] = regs->tpc;
5291+ regs->tpc = dl_resolve;
5292+ regs->tnpc = dl_resolve+4;
5293+ return 3;
5294+ }
5295+ } while (0);
5296+#endif
5297+
5298+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
5299+ unsigned int sethi, ba, nop;
5300+
5301+ err = get_user(sethi, (unsigned int *)regs->tpc);
5302+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5303+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5304+
5305+ if (err)
5306+ break;
5307+
5308+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5309+ (ba & 0xFFF00000U) == 0x30600000U &&
5310+ nop == 0x01000000U)
5311+ {
5312+ unsigned long addr;
5313+
5314+ addr = (sethi & 0x003FFFFFU) << 10;
5315+ regs->u_regs[UREG_G1] = addr;
5316+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5317+
5318+ if (test_thread_flag(TIF_32BIT))
5319+ addr &= 0xFFFFFFFFUL;
5320+
5321+ regs->tpc = addr;
5322+ regs->tnpc = addr+4;
5323+ return 2;
5324+ }
5325+ } while (0);
5326+
5327+#endif
5328+
5329+ return 1;
5330+}
5331+
6e9df6a3 5332+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
58c5fc13
MT
5333+{
5334+ unsigned long i;
5335+
5336+ printk(KERN_ERR "PAX: bytes at PC: ");
ae4e228f 5337+ for (i = 0; i < 8; i++) {
58c5fc13
MT
5338+ unsigned int c;
5339+ if (get_user(c, (unsigned int *)pc+i))
5340+ printk(KERN_CONT "???????? ");
5341+ else
5342+ printk(KERN_CONT "%08x ", c);
5343+ }
5344+ printk("\n");
5345+}
5346+#endif
5347+
5348 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5349 {
5350 struct mm_struct *mm = current->mm;
fe2de317 5351@@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
58c5fc13
MT
5352 if (!vma)
5353 goto bad_area;
5354
5355+#ifdef CONFIG_PAX_PAGEEXEC
5356+ /* PaX: detect ITLB misses on non-exec pages */
5357+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
5358+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
5359+ {
5360+ if (address != regs->tpc)
5361+ goto good_area;
5362+
5363+ up_read(&mm->mmap_sem);
5364+ switch (pax_handle_fetch_fault(regs)) {
5365+
5366+#ifdef CONFIG_PAX_EMUPLT
5367+ case 2:
5368+ case 3:
5369+ return;
5370+#endif
5371+
5372+ }
5373+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
5374+ do_group_exit(SIGKILL);
5375+ }
5376+#endif
5377+
5378 /* Pure DTLB misses do not tell us whether the fault causing
5379 * load/store/atomic was a write or not, it only says that there
5380 * was no match. So in such a case we (carefully) read the
fe2de317
MT
5381diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
5382index f4e9764..5682724 100644
5383--- a/arch/sparc/mm/hugetlbpage.c
5384+++ b/arch/sparc/mm/hugetlbpage.c
57199397
MT
5385@@ -68,7 +68,7 @@ full_search:
5386 }
5387 return -ENOMEM;
5388 }
5389- if (likely(!vma || addr + len <= vma->vm_start)) {
5390+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5391 /*
5392 * Remember the place where we stopped the search:
5393 */
fe2de317 5394@@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
57199397
MT
5395 /* make sure it can fit in the remaining address space */
5396 if (likely(addr > len)) {
5397 vma = find_vma(mm, addr-len);
5398- if (!vma || addr <= vma->vm_start) {
5399+ if (check_heap_stack_gap(vma, addr - len, len)) {
5400 /* remember the address as a hint for next time */
5401 return (mm->free_area_cache = addr-len);
5402 }
fe2de317 5403@@ -116,16 +116,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
16454cff
MT
5404 if (unlikely(mm->mmap_base < len))
5405 goto bottomup;
5406
5407- addr = (mm->mmap_base-len) & HPAGE_MASK;
5408+ addr = mm->mmap_base - len;
5409
5410 do {
5411+ addr &= HPAGE_MASK;
5412 /*
5413 * Lookup failure means no vma is above this address,
5414 * else if new region fits below vma->vm_start,
57199397
MT
5415 * return with success:
5416 */
5417 vma = find_vma(mm, addr);
5418- if (likely(!vma || addr+len <= vma->vm_start)) {
5419+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5420 /* remember the address as a hint for next time */
5421 return (mm->free_area_cache = addr);
5422 }
fe2de317 5423@@ -135,8 +136,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
16454cff
MT
5424 mm->cached_hole_size = vma->vm_start - addr;
5425
5426 /* try just below the current vma->vm_start */
5427- addr = (vma->vm_start-len) & HPAGE_MASK;
5428- } while (likely(len < vma->vm_start));
5429+ addr = skip_heap_stack_gap(vma, len);
5430+ } while (!IS_ERR_VALUE(addr));
5431
5432 bottomup:
5433 /*
fe2de317 5434@@ -182,8 +183,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
57199397
MT
5435 if (addr) {
5436 addr = ALIGN(addr, HPAGE_SIZE);
5437 vma = find_vma(mm, addr);
5438- if (task_size - len >= addr &&
5439- (!vma || addr + len <= vma->vm_start))
5440+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5441 return addr;
5442 }
5443 if (mm->get_unmapped_area == arch_get_unmapped_area)
fe2de317
MT
5444diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
5445index 7b00de6..78239f4 100644
5446--- a/arch/sparc/mm/init_32.c
5447+++ b/arch/sparc/mm/init_32.c
15a11c5b 5448@@ -316,6 +316,9 @@ extern void device_scan(void);
58c5fc13
MT
5449 pgprot_t PAGE_SHARED __read_mostly;
5450 EXPORT_SYMBOL(PAGE_SHARED);
5451
5452+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
5453+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
5454+
5455 void __init paging_init(void)
5456 {
5457 switch(sparc_cpu_model) {
15a11c5b 5458@@ -344,17 +347,17 @@ void __init paging_init(void)
58c5fc13
MT
5459
5460 /* Initialize the protection map with non-constant, MMU dependent values. */
5461 protection_map[0] = PAGE_NONE;
5462- protection_map[1] = PAGE_READONLY;
5463- protection_map[2] = PAGE_COPY;
5464- protection_map[3] = PAGE_COPY;
5465+ protection_map[1] = PAGE_READONLY_NOEXEC;
5466+ protection_map[2] = PAGE_COPY_NOEXEC;
5467+ protection_map[3] = PAGE_COPY_NOEXEC;
5468 protection_map[4] = PAGE_READONLY;
5469 protection_map[5] = PAGE_READONLY;
5470 protection_map[6] = PAGE_COPY;
5471 protection_map[7] = PAGE_COPY;
5472 protection_map[8] = PAGE_NONE;
5473- protection_map[9] = PAGE_READONLY;
5474- protection_map[10] = PAGE_SHARED;
5475- protection_map[11] = PAGE_SHARED;
5476+ protection_map[9] = PAGE_READONLY_NOEXEC;
5477+ protection_map[10] = PAGE_SHARED_NOEXEC;
5478+ protection_map[11] = PAGE_SHARED_NOEXEC;
5479 protection_map[12] = PAGE_READONLY;
5480 protection_map[13] = PAGE_READONLY;
5481 protection_map[14] = PAGE_SHARED;
fe2de317
MT
5482diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
5483index cbef74e..c38fead 100644
5484--- a/arch/sparc/mm/srmmu.c
5485+++ b/arch/sparc/mm/srmmu.c
bc901d79 5486@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
58c5fc13
MT
5487 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5488 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5489 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5490+
5491+#ifdef CONFIG_PAX_PAGEEXEC
5492+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5493+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5494+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5495+#endif
5496+
5497 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5498 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5499
fe2de317
MT
5500diff --git a/arch/um/Makefile b/arch/um/Makefile
5501index c0f712c..3a5c4c9 100644
5502--- a/arch/um/Makefile
5503+++ b/arch/um/Makefile
5504@@ -49,6 +49,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
5505 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
5506 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64
5507
5508+ifdef CONSTIFY_PLUGIN
5509+USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5510+endif
5511+
5512 include $(srctree)/$(ARCH_DIR)/Makefile-$(SUBARCH)
5513
5514 #This will adjust *FLAGS accordingly to the platform.
5515diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
5516index 6c03acd..a5e0215 100644
5517--- a/arch/um/include/asm/kmap_types.h
5518+++ b/arch/um/include/asm/kmap_types.h
58c5fc13
MT
5519@@ -23,6 +23,7 @@ enum km_type {
5520 KM_IRQ1,
5521 KM_SOFTIRQ0,
5522 KM_SOFTIRQ1,
5523+ KM_CLEARPAGE,
5524 KM_TYPE_NR
5525 };
5526
fe2de317
MT
5527diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
5528index 4cc9b6c..02e5029 100644
5529--- a/arch/um/include/asm/page.h
5530+++ b/arch/um/include/asm/page.h
58c5fc13
MT
5531@@ -14,6 +14,9 @@
5532 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5533 #define PAGE_MASK (~(PAGE_SIZE-1))
5534
5535+#define ktla_ktva(addr) (addr)
5536+#define ktva_ktla(addr) (addr)
5537+
5538 #ifndef __ASSEMBLY__
5539
5540 struct page;
fe2de317
MT
5541diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
5542index 21c1ae7..4640aaa 100644
5543--- a/arch/um/kernel/process.c
5544+++ b/arch/um/kernel/process.c
bc901d79
MT
5545@@ -404,22 +404,6 @@ int singlestepping(void * t)
5546 return 2;
5547 }
5548
5549-/*
5550- * Only x86 and x86_64 have an arch_align_stack().
5551- * All other arches have "#define arch_align_stack(x) (x)"
5552- * in their asm/system.h
5553- * As this is included in UML from asm-um/system-generic.h,
5554- * we can use it to behave as the subarch does.
5555- */
5556-#ifndef arch_align_stack
5557-unsigned long arch_align_stack(unsigned long sp)
5558-{
5559- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5560- sp -= get_random_int() % 8192;
5561- return sp & ~0xf;
5562-}
5563-#endif
5564-
5565 unsigned long get_wchan(struct task_struct *p)
5566 {
5567 unsigned long stack_page, sp, ip;
fe2de317
MT
5568diff --git a/arch/um/sys-i386/shared/sysdep/system.h b/arch/um/sys-i386/shared/sysdep/system.h
5569index d1b93c4..ae1b7fd 100644
5570--- a/arch/um/sys-i386/shared/sysdep/system.h
5571+++ b/arch/um/sys-i386/shared/sysdep/system.h
6e9df6a3
MT
5572@@ -17,7 +17,7 @@
5573 # define AT_VECTOR_SIZE_ARCH 1
5574 #endif
5575
5576-extern unsigned long arch_align_stack(unsigned long sp);
5577+#define arch_align_stack(x) ((x) & ~0xfUL)
5578
5579 void default_idle(void);
5580
fe2de317
MT
5581diff --git a/arch/um/sys-i386/syscalls.c b/arch/um/sys-i386/syscalls.c
5582index 70ca357..728d1cc 100644
5583--- a/arch/um/sys-i386/syscalls.c
5584+++ b/arch/um/sys-i386/syscalls.c
58c5fc13
MT
5585@@ -11,6 +11,21 @@
5586 #include "asm/uaccess.h"
5587 #include "asm/unistd.h"
5588
5589+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
5590+{
5591+ unsigned long pax_task_size = TASK_SIZE;
5592+
5593+#ifdef CONFIG_PAX_SEGMEXEC
5594+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
5595+ pax_task_size = SEGMEXEC_TASK_SIZE;
5596+#endif
5597+
5598+ if (len > pax_task_size || addr > pax_task_size - len)
5599+ return -EINVAL;
5600+
5601+ return 0;
5602+}
5603+
5604 /*
df50ba0c
MT
5605 * The prototype on i386 is:
5606 *
fe2de317
MT
5607diff --git a/arch/um/sys-x86_64/shared/sysdep/system.h b/arch/um/sys-x86_64/shared/sysdep/system.h
5608index d1b93c4..ae1b7fd 100644
5609--- a/arch/um/sys-x86_64/shared/sysdep/system.h
5610+++ b/arch/um/sys-x86_64/shared/sysdep/system.h
6e9df6a3
MT
5611@@ -17,7 +17,7 @@
5612 # define AT_VECTOR_SIZE_ARCH 1
5613 #endif
5614
5615-extern unsigned long arch_align_stack(unsigned long sp);
5616+#define arch_align_stack(x) ((x) & ~0xfUL)
5617
5618 void default_idle(void);
5619
fe2de317
MT
5620diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
5621index 6a47bb2..dc9a868 100644
5622--- a/arch/x86/Kconfig
5623+++ b/arch/x86/Kconfig
5624@@ -236,7 +236,7 @@ config X86_HT
5625
5626 config X86_32_LAZY_GS
5627 def_bool y
5628- depends on X86_32 && !CC_STACKPROTECTOR
5629+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
5630
5631 config ARCH_HWEIGHT_CFLAGS
5632 string
5633@@ -1019,7 +1019,7 @@ choice
5634
5635 config NOHIGHMEM
5636 bool "off"
5637- depends on !X86_NUMAQ
5638+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
5639 ---help---
5640 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
5641 However, the address space of 32-bit x86 processors is only 4
5642@@ -1056,7 +1056,7 @@ config NOHIGHMEM
5643
5644 config HIGHMEM4G
5645 bool "4GB"
5646- depends on !X86_NUMAQ
5647+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
5648 ---help---
5649 Select this if you have a 32-bit processor and between 1 and 4
5650 gigabytes of physical RAM.
5651@@ -1110,7 +1110,7 @@ config PAGE_OFFSET
5652 hex
5653 default 0xB0000000 if VMSPLIT_3G_OPT
5654 default 0x80000000 if VMSPLIT_2G
5655- default 0x78000000 if VMSPLIT_2G_OPT
5656+ default 0x70000000 if VMSPLIT_2G_OPT
5657 default 0x40000000 if VMSPLIT_1G
5658 default 0xC0000000
5659 depends on X86_32
5660@@ -1484,6 +1484,7 @@ config SECCOMP
5661
5662 config CC_STACKPROTECTOR
5663 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
5664+ depends on X86_64 || !PAX_MEMORY_UDEREF
5665 ---help---
5666 This option turns on the -fstack-protector GCC feature. This
5667 feature puts, at the beginning of functions, a canary value on
5668@@ -1541,6 +1542,7 @@ config KEXEC_JUMP
5669 config PHYSICAL_START
5670 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
5671 default "0x1000000"
5672+ range 0x400000 0x40000000
5673 ---help---
5674 This gives the physical address where the kernel is loaded.
5675
5676@@ -1604,6 +1606,7 @@ config X86_NEED_RELOCS
5677 config PHYSICAL_ALIGN
5678 hex "Alignment value to which kernel should be aligned" if X86_32
5679 default "0x1000000"
5680+ range 0x400000 0x1000000 if PAX_KERNEXEC
5681 range 0x2000 0x1000000
5682 ---help---
5683 This value puts the alignment restrictions on physical address
5684@@ -1635,9 +1638,10 @@ config HOTPLUG_CPU
5685 Say N if you want to disable CPU hotplug.
5686
5687 config COMPAT_VDSO
5688- def_bool y
5689+ def_bool n
5690 prompt "Compat VDSO support"
5691 depends on X86_32 || IA32_EMULATION
5692+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
5693 ---help---
5694 Map the 32-bit VDSO to the predictable old-style address too.
5695
5696diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
5697index e3ca7e0..b30b28a 100644
5698--- a/arch/x86/Kconfig.cpu
5699+++ b/arch/x86/Kconfig.cpu
5700@@ -341,7 +341,7 @@ config X86_PPRO_FENCE
5701
5702 config X86_F00F_BUG
5703 def_bool y
5704- depends on M586MMX || M586TSC || M586 || M486 || M386
5705+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
5706
5707 config X86_INVD_BUG
5708 def_bool y
5709@@ -365,7 +365,7 @@ config X86_POPAD_OK
5710
5711 config X86_ALIGNMENT_16
5712 def_bool y
5713- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
5714+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
5715
5716 config X86_INTEL_USERCOPY
5717 def_bool y
5718@@ -411,7 +411,7 @@ config X86_CMPXCHG64
5719 # generates cmov.
5720 config X86_CMOV
5721 def_bool y
5722- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
5723+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
5724
5725 config X86_MINIMUM_CPU_FAMILY
5726 int
5727diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
5728index c0f8a5c..6404f61 100644
5729--- a/arch/x86/Kconfig.debug
5730+++ b/arch/x86/Kconfig.debug
5731@@ -81,7 +81,7 @@ config X86_PTDUMP
5732 config DEBUG_RODATA
5733 bool "Write protect kernel read-only data structures"
5734 default y
5735- depends on DEBUG_KERNEL
5736+ depends on DEBUG_KERNEL && BROKEN
5737 ---help---
5738 Mark the kernel read-only data as write-protected in the pagetables,
5739 in order to catch accidental (and incorrect) writes to such const
5740@@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
5741
5742 config DEBUG_SET_MODULE_RONX
5743 bool "Set loadable kernel module data as NX and text as RO"
5744- depends on MODULES
5745+ depends on MODULES && BROKEN
5746 ---help---
5747 This option helps catch unintended modifications to loadable
5748 kernel module's text and read-only data. It also prevents execution
5749diff --git a/arch/x86/Makefile b/arch/x86/Makefile
5750index b02e509..2631e48 100644
5751--- a/arch/x86/Makefile
5752+++ b/arch/x86/Makefile
5753@@ -46,6 +46,7 @@ else
5754 UTS_MACHINE := x86_64
5755 CHECKFLAGS += -D__x86_64__ -m64
5756
5757+ biarch := $(call cc-option,-m64)
5758 KBUILD_AFLAGS += -m64
5759 KBUILD_CFLAGS += -m64
5760
5761@@ -195,3 +196,12 @@ define archhelp
5762 echo ' FDARGS="..." arguments for the booted kernel'
5763 echo ' FDINITRD=file initrd for the booted kernel'
5764 endef
5765+
5766+define OLD_LD
5767+
5768+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
5769+*** Please upgrade your binutils to 2.18 or newer
5770+endef
5771+
5772+archprepare:
5773+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
5774diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
5775index 95365a8..52f857b 100644
5776--- a/arch/x86/boot/Makefile
5777+++ b/arch/x86/boot/Makefile
5778@@ -63,6 +63,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
5779 $(call cc-option, -fno-stack-protector) \
5780 $(call cc-option, -mpreferred-stack-boundary=2)
5781 KBUILD_CFLAGS += $(call cc-option, -m32)
5782+ifdef CONSTIFY_PLUGIN
5783+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5784+endif
5785 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5786 GCOV_PROFILE := n
5787
5788diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
5789index 878e4b9..20537ab 100644
5790--- a/arch/x86/boot/bitops.h
5791+++ b/arch/x86/boot/bitops.h
5792@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
58c5fc13
MT
5793 u8 v;
5794 const u32 *p = (const u32 *)addr;
5795
5796- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5797+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5798 return v;
5799 }
5800
fe2de317 5801@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
58c5fc13
MT
5802
5803 static inline void set_bit(int nr, void *addr)
5804 {
5805- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5806+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5807 }
5808
5809 #endif /* BOOT_BITOPS_H */
fe2de317
MT
5810diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
5811index c7093bd..d4247ffe0 100644
5812--- a/arch/x86/boot/boot.h
5813+++ b/arch/x86/boot/boot.h
6892158b 5814@@ -85,7 +85,7 @@ static inline void io_delay(void)
58c5fc13
MT
5815 static inline u16 ds(void)
5816 {
5817 u16 seg;
5818- asm("movw %%ds,%0" : "=rm" (seg));
5819+ asm volatile("movw %%ds,%0" : "=rm" (seg));
5820 return seg;
5821 }
5822
fe2de317 5823@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
58c5fc13
MT
5824 static inline int memcmp(const void *s1, const void *s2, size_t len)
5825 {
5826 u8 diff;
5827- asm("repe; cmpsb; setnz %0"
5828+ asm volatile("repe; cmpsb; setnz %0"
5829 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5830 return diff;
5831 }
fe2de317
MT
5832diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
5833index 09664ef..edc5d03 100644
5834--- a/arch/x86/boot/compressed/Makefile
5835+++ b/arch/x86/boot/compressed/Makefile
5836@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
5837 KBUILD_CFLAGS += $(cflags-y)
5838 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
5839 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
5840+ifdef CONSTIFY_PLUGIN
5841+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5842+endif
5843
5844 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5845 GCOV_PROFILE := n
5846diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
5847index 67a655a..b924059 100644
5848--- a/arch/x86/boot/compressed/head_32.S
5849+++ b/arch/x86/boot/compressed/head_32.S
ae4e228f 5850@@ -76,7 +76,7 @@ ENTRY(startup_32)
58c5fc13
MT
5851 notl %eax
5852 andl %eax, %ebx
5853 #else
5854- movl $LOAD_PHYSICAL_ADDR, %ebx
5855+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5856 #endif
5857
5858 /* Target address to relocate to for decompression */
6892158b 5859@@ -162,7 +162,7 @@ relocated:
58c5fc13
MT
5860 * and where it was actually loaded.
5861 */
5862 movl %ebp, %ebx
5863- subl $LOAD_PHYSICAL_ADDR, %ebx
5864+ subl $____LOAD_PHYSICAL_ADDR, %ebx
5865 jz 2f /* Nothing to be done if loaded at compiled addr. */
5866 /*
5867 * Process relocations.
6892158b 5868@@ -170,8 +170,7 @@ relocated:
58c5fc13
MT
5869
5870 1: subl $4, %edi
5871 movl (%edi), %ecx
5872- testl %ecx, %ecx
5873- jz 2f
5874+ jecxz 2f
5875 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5876 jmp 1b
5877 2:
fe2de317
MT
5878diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
5879index 35af09d..99c9676 100644
5880--- a/arch/x86/boot/compressed/head_64.S
5881+++ b/arch/x86/boot/compressed/head_64.S
ae4e228f 5882@@ -91,7 +91,7 @@ ENTRY(startup_32)
58c5fc13
MT
5883 notl %eax
5884 andl %eax, %ebx
5885 #else
5886- movl $LOAD_PHYSICAL_ADDR, %ebx
5887+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5888 #endif
5889
5890 /* Target address to relocate to for decompression */
5891@@ -233,7 +233,7 @@ ENTRY(startup_64)
5892 notq %rax
5893 andq %rax, %rbp
5894 #else
5895- movq $LOAD_PHYSICAL_ADDR, %rbp
5896+ movq $____LOAD_PHYSICAL_ADDR, %rbp
5897 #endif
5898
5899 /* Target address to relocate to for decompression */
fe2de317
MT
5900diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
5901index 3a19d04..7c1d55a 100644
5902--- a/arch/x86/boot/compressed/misc.c
5903+++ b/arch/x86/boot/compressed/misc.c
16454cff 5904@@ -310,7 +310,7 @@ static void parse_elf(void *output)
58c5fc13
MT
5905 case PT_LOAD:
5906 #ifdef CONFIG_RELOCATABLE
5907 dest = output;
5908- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5909+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5910 #else
5911 dest = (void *)(phdr->p_paddr);
5912 #endif
fe2de317 5913@@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
58c5fc13
MT
5914 error("Destination address too large");
5915 #endif
5916 #ifndef CONFIG_RELOCATABLE
5917- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5918+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5919 error("Wrong destination address");
5920 #endif
5921
fe2de317
MT
5922diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
5923index 89bbf4e..869908e 100644
5924--- a/arch/x86/boot/compressed/relocs.c
5925+++ b/arch/x86/boot/compressed/relocs.c
ae4e228f 5926@@ -13,8 +13,11 @@
58c5fc13 5927
ae4e228f
MT
5928 static void die(char *fmt, ...);
5929
5930+#include "../../../../include/generated/autoconf.h"
58c5fc13
MT
5931+
5932 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5933 static Elf32_Ehdr ehdr;
5934+static Elf32_Phdr *phdr;
5935 static unsigned long reloc_count, reloc_idx;
5936 static unsigned long *relocs;
5937
ae4e228f 5938@@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
58c5fc13
MT
5939 }
5940 }
5941
5942+static void read_phdrs(FILE *fp)
5943+{
5944+ unsigned int i;
5945+
5946+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5947+ if (!phdr) {
5948+ die("Unable to allocate %d program headers\n",
5949+ ehdr.e_phnum);
5950+ }
5951+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5952+ die("Seek to %d failed: %s\n",
5953+ ehdr.e_phoff, strerror(errno));
5954+ }
5955+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5956+ die("Cannot read ELF program headers: %s\n",
5957+ strerror(errno));
5958+ }
5959+ for(i = 0; i < ehdr.e_phnum; i++) {
5960+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5961+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5962+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5963+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5964+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5965+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5966+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5967+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5968+ }
5969+
5970+}
5971+
5972 static void read_shdrs(FILE *fp)
5973 {
5974- int i;
5975+ unsigned int i;
5976 Elf32_Shdr shdr;
5977
5978 secs = calloc(ehdr.e_shnum, sizeof(struct section));
ae4e228f 5979@@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
58c5fc13
MT
5980
5981 static void read_strtabs(FILE *fp)
5982 {
5983- int i;
5984+ unsigned int i;
5985 for (i = 0; i < ehdr.e_shnum; i++) {
5986 struct section *sec = &secs[i];
5987 if (sec->shdr.sh_type != SHT_STRTAB) {
ae4e228f 5988@@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
58c5fc13
MT
5989
5990 static void read_symtabs(FILE *fp)
5991 {
5992- int i,j;
5993+ unsigned int i,j;
5994 for (i = 0; i < ehdr.e_shnum; i++) {
5995 struct section *sec = &secs[i];
5996 if (sec->shdr.sh_type != SHT_SYMTAB) {
ae4e228f 5997@@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
58c5fc13
MT
5998
5999 static void read_relocs(FILE *fp)
6000 {
6001- int i,j;
6002+ unsigned int i,j;
6003+ uint32_t base;
6004+
6005 for (i = 0; i < ehdr.e_shnum; i++) {
6006 struct section *sec = &secs[i];
6007 if (sec->shdr.sh_type != SHT_REL) {
ae4e228f 6008@@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
58c5fc13
MT
6009 die("Cannot read symbol table: %s\n",
6010 strerror(errno));
6011 }
6012+ base = 0;
6013+ for (j = 0; j < ehdr.e_phnum; j++) {
6014+ if (phdr[j].p_type != PT_LOAD )
6015+ continue;
6016+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
6017+ continue;
6018+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
6019+ break;
6020+ }
6021 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
6022 Elf32_Rel *rel = &sec->reltab[j];
6023- rel->r_offset = elf32_to_cpu(rel->r_offset);
6024+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
6025 rel->r_info = elf32_to_cpu(rel->r_info);
6026 }
6027 }
ae4e228f 6028@@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
58c5fc13
MT
6029
6030 static void print_absolute_symbols(void)
6031 {
6032- int i;
6033+ unsigned int i;
6034 printf("Absolute symbols\n");
6035 printf(" Num: Value Size Type Bind Visibility Name\n");
6036 for (i = 0; i < ehdr.e_shnum; i++) {
6037 struct section *sec = &secs[i];
6038 char *sym_strtab;
6039 Elf32_Sym *sh_symtab;
6040- int j;
6041+ unsigned int j;
6042
6043 if (sec->shdr.sh_type != SHT_SYMTAB) {
6044 continue;
ae4e228f 6045@@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
58c5fc13
MT
6046
6047 static void print_absolute_relocs(void)
6048 {
6049- int i, printed = 0;
6050+ unsigned int i, printed = 0;
6051
6052 for (i = 0; i < ehdr.e_shnum; i++) {
6053 struct section *sec = &secs[i];
6054 struct section *sec_applies, *sec_symtab;
6055 char *sym_strtab;
6056 Elf32_Sym *sh_symtab;
6057- int j;
6058+ unsigned int j;
6059 if (sec->shdr.sh_type != SHT_REL) {
6060 continue;
6061 }
ae4e228f 6062@@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
58c5fc13
MT
6063
6064 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6065 {
6066- int i;
6067+ unsigned int i;
6068 /* Walk through the relocations */
6069 for (i = 0; i < ehdr.e_shnum; i++) {
6070 char *sym_strtab;
6071 Elf32_Sym *sh_symtab;
6072 struct section *sec_applies, *sec_symtab;
6073- int j;
6074+ unsigned int j;
6075 struct section *sec = &secs[i];
6076
6077 if (sec->shdr.sh_type != SHT_REL) {
fe2de317 6078@@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
ae4e228f 6079 !is_rel_reloc(sym_name(sym_strtab, sym))) {
58c5fc13
MT
6080 continue;
6081 }
6082+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
57199397 6083+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
58c5fc13
MT
6084+ continue;
6085+
6086+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
6087+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
57199397 6088+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
ae4e228f 6089+ continue;
58c5fc13
MT
6090+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
6091+ continue;
6092+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
6093+ continue;
6094+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
6095+ continue;
6096+#endif
ae4e228f
MT
6097+
6098 switch (r_type) {
6099 case R_386_NONE:
6100 case R_386_PC32:
fe2de317 6101@@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, const void *vb)
58c5fc13
MT
6102
6103 static void emit_relocs(int as_text)
6104 {
6105- int i;
6106+ unsigned int i;
6107 /* Count how many relocations I have and allocate space for them. */
6108 reloc_count = 0;
6109 walk_relocs(count_reloc);
ae4e228f 6110@@ -665,6 +725,7 @@ int main(int argc, char **argv)
58c5fc13
MT
6111 fname, strerror(errno));
6112 }
6113 read_ehdr(fp);
6114+ read_phdrs(fp);
6115 read_shdrs(fp);
6116 read_strtabs(fp);
6117 read_symtabs(fp);
fe2de317
MT
6118diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
6119index 4d3ff03..e4972ff 100644
6120--- a/arch/x86/boot/cpucheck.c
6121+++ b/arch/x86/boot/cpucheck.c
58c5fc13
MT
6122@@ -74,7 +74,7 @@ static int has_fpu(void)
6123 u16 fcw = -1, fsw = -1;
6124 u32 cr0;
6125
6126- asm("movl %%cr0,%0" : "=r" (cr0));
6127+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
6128 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
6129 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
6130 asm volatile("movl %0,%%cr0" : : "r" (cr0));
6131@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
6132 {
6133 u32 f0, f1;
6134
6135- asm("pushfl ; "
6136+ asm volatile("pushfl ; "
6137 "pushfl ; "
6138 "popl %0 ; "
6139 "movl %0,%1 ; "
6140@@ -115,7 +115,7 @@ static void get_flags(void)
6141 set_bit(X86_FEATURE_FPU, cpu.flags);
6142
6143 if (has_eflag(X86_EFLAGS_ID)) {
6144- asm("cpuid"
6145+ asm volatile("cpuid"
6146 : "=a" (max_intel_level),
6147 "=b" (cpu_vendor[0]),
6148 "=d" (cpu_vendor[1]),
6149@@ -124,7 +124,7 @@ static void get_flags(void)
6150
6151 if (max_intel_level >= 0x00000001 &&
6152 max_intel_level <= 0x0000ffff) {
6153- asm("cpuid"
6154+ asm volatile("cpuid"
6155 : "=a" (tfms),
6156 "=c" (cpu.flags[4]),
6157 "=d" (cpu.flags[0])
6158@@ -136,7 +136,7 @@ static void get_flags(void)
6159 cpu.model += ((tfms >> 16) & 0xf) << 4;
6160 }
6161
6162- asm("cpuid"
6163+ asm volatile("cpuid"
6164 : "=a" (max_amd_level)
6165 : "a" (0x80000000)
6166 : "ebx", "ecx", "edx");
6167@@ -144,7 +144,7 @@ static void get_flags(void)
6168 if (max_amd_level >= 0x80000001 &&
6169 max_amd_level <= 0x8000ffff) {
6170 u32 eax = 0x80000001;
6171- asm("cpuid"
6172+ asm volatile("cpuid"
6173 : "+a" (eax),
6174 "=c" (cpu.flags[6]),
6175 "=d" (cpu.flags[1])
fe2de317 6176@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
58c5fc13
MT
6177 u32 ecx = MSR_K7_HWCR;
6178 u32 eax, edx;
6179
6180- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6181+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6182 eax &= ~(1 << 15);
6183- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6184+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6185
6186 get_flags(); /* Make sure it really did something */
6187 err = check_flags();
fe2de317 6188@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
58c5fc13
MT
6189 u32 ecx = MSR_VIA_FCR;
6190 u32 eax, edx;
6191
6192- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6193+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6194 eax |= (1<<1)|(1<<7);
6195- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6196+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6197
6198 set_bit(X86_FEATURE_CX8, cpu.flags);
6199 err = check_flags();
fe2de317 6200@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
58c5fc13
MT
6201 u32 eax, edx;
6202 u32 level = 1;
6203
6204- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6205- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6206- asm("cpuid"
6207+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6208+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6209+ asm volatile("cpuid"
6210 : "+a" (level), "=d" (cpu.flags[0])
6211 : : "ecx", "ebx");
6212- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6213+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6214
6215 err = check_flags();
6216 }
fe2de317
MT
6217diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
6218index 93e689f..504ba09 100644
6219--- a/arch/x86/boot/header.S
6220+++ b/arch/x86/boot/header.S
6221@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
58c5fc13
MT
6222 # single linked list of
6223 # struct setup_data
6224
6225-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
6226+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
6227
6228 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
6229 #define VO_INIT_SIZE (VO__end - VO__text)
fe2de317
MT
6230diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
6231index db75d07..8e6d0af 100644
6232--- a/arch/x86/boot/memory.c
6233+++ b/arch/x86/boot/memory.c
df50ba0c
MT
6234@@ -19,7 +19,7 @@
6235
6236 static int detect_memory_e820(void)
6237 {
6238- int count = 0;
6239+ unsigned int count = 0;
6240 struct biosregs ireg, oreg;
6241 struct e820entry *desc = boot_params.e820_map;
6242 static struct e820entry buf; /* static so it is zeroed */
fe2de317
MT
6243diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
6244index 11e8c6e..fdbb1ed 100644
6245--- a/arch/x86/boot/video-vesa.c
6246+++ b/arch/x86/boot/video-vesa.c
6247@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
6248
6249 boot_params.screen_info.vesapm_seg = oreg.es;
6250 boot_params.screen_info.vesapm_off = oreg.di;
6251+ boot_params.screen_info.vesapm_size = oreg.cx;
6252 }
6253
6254 /*
6255diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
6256index 43eda28..5ab5fdb 100644
6257--- a/arch/x86/boot/video.c
6258+++ b/arch/x86/boot/video.c
df50ba0c
MT
6259@@ -96,7 +96,7 @@ static void store_mode_params(void)
6260 static unsigned int get_entry(void)
6261 {
6262 char entry_buf[4];
6263- int i, len = 0;
6264+ unsigned int i, len = 0;
6265 int key;
6266 unsigned int v;
6267
fe2de317
MT
6268diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
6269index 5b577d5..3c1fed4 100644
6270--- a/arch/x86/crypto/aes-x86_64-asm_64.S
6271+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
6e9df6a3
MT
6272@@ -8,6 +8,8 @@
6273 * including this sentence is retained in full.
6274 */
6275
6276+#include <asm/alternative-asm.h>
6277+
6278 .extern crypto_ft_tab
6279 .extern crypto_it_tab
6280 .extern crypto_fl_tab
6281@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
15a11c5b
MT
6282 je B192; \
6283 leaq 32(r9),r9;
6284
fe2de317 6285+#define ret pax_force_retaddr 0, 1; ret
15a11c5b
MT
6286+
6287 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
6288 movq r1,r2; \
6289 movq r3,r4; \
fe2de317
MT
6290diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
6291index be6d9e3..21fbbca 100644
6292--- a/arch/x86/crypto/aesni-intel_asm.S
6293+++ b/arch/x86/crypto/aesni-intel_asm.S
6294@@ -31,6 +31,7 @@
6295
6296 #include <linux/linkage.h>
6297 #include <asm/inst.h>
6298+#include <asm/alternative-asm.h>
6299
6300 #ifdef __x86_64__
6301 .data
6302@@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
6303 pop %r14
6304 pop %r13
6305 pop %r12
6306+ pax_force_retaddr 0, 1
6307 ret
6308+ENDPROC(aesni_gcm_dec)
6309
6310
6311 /*****************************************************************************
6312@@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
6313 pop %r14
6314 pop %r13
6315 pop %r12
6316+ pax_force_retaddr 0, 1
6317 ret
6318+ENDPROC(aesni_gcm_enc)
6319
6320 #endif
6321
6322@@ -1714,6 +1719,7 @@ _key_expansion_256a:
6323 pxor %xmm1, %xmm0
6324 movaps %xmm0, (TKEYP)
6325 add $0x10, TKEYP
6326+ pax_force_retaddr_bts
6327 ret
6328
6329 .align 4
6330@@ -1738,6 +1744,7 @@ _key_expansion_192a:
6331 shufps $0b01001110, %xmm2, %xmm1
6332 movaps %xmm1, 0x10(TKEYP)
6333 add $0x20, TKEYP
6334+ pax_force_retaddr_bts
6335 ret
6336
6337 .align 4
6338@@ -1757,6 +1764,7 @@ _key_expansion_192b:
6339
6340 movaps %xmm0, (TKEYP)
6341 add $0x10, TKEYP
6342+ pax_force_retaddr_bts
6343 ret
6344
6345 .align 4
6346@@ -1769,6 +1777,7 @@ _key_expansion_256b:
6347 pxor %xmm1, %xmm2
6348 movaps %xmm2, (TKEYP)
6349 add $0x10, TKEYP
6350+ pax_force_retaddr_bts
6351 ret
6352
6353 /*
6354@@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
6355 #ifndef __x86_64__
6356 popl KEYP
6357 #endif
6358+ pax_force_retaddr 0, 1
6359 ret
6360+ENDPROC(aesni_set_key)
6361
6362 /*
6363 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
6364@@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
6365 popl KLEN
6366 popl KEYP
6367 #endif
6368+ pax_force_retaddr 0, 1
6369 ret
6370+ENDPROC(aesni_enc)
6371
6372 /*
6373 * _aesni_enc1: internal ABI
6374@@ -1959,6 +1972,7 @@ _aesni_enc1:
6375 AESENC KEY STATE
6376 movaps 0x70(TKEYP), KEY
6377 AESENCLAST KEY STATE
6378+ pax_force_retaddr_bts
6379 ret
6380
6381 /*
6382@@ -2067,6 +2081,7 @@ _aesni_enc4:
6383 AESENCLAST KEY STATE2
6384 AESENCLAST KEY STATE3
6385 AESENCLAST KEY STATE4
6386+ pax_force_retaddr_bts
6387 ret
6388
6389 /*
6390@@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
6391 popl KLEN
6392 popl KEYP
6393 #endif
6394+ pax_force_retaddr 0, 1
6395 ret
6396+ENDPROC(aesni_dec)
6397
6398 /*
6399 * _aesni_dec1: internal ABI
6400@@ -2146,6 +2163,7 @@ _aesni_dec1:
6401 AESDEC KEY STATE
6402 movaps 0x70(TKEYP), KEY
6403 AESDECLAST KEY STATE
6404+ pax_force_retaddr_bts
6405 ret
6406
6407 /*
6408@@ -2254,6 +2272,7 @@ _aesni_dec4:
6409 AESDECLAST KEY STATE2
6410 AESDECLAST KEY STATE3
6411 AESDECLAST KEY STATE4
6412+ pax_force_retaddr_bts
6413 ret
6414
6415 /*
6416@@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
6417 popl KEYP
6418 popl LEN
6419 #endif
6420+ pax_force_retaddr 0, 1
6421 ret
6422+ENDPROC(aesni_ecb_enc)
6423
6424 /*
6425 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6426@@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
6427 popl KEYP
6428 popl LEN
6429 #endif
6430+ pax_force_retaddr 0, 1
6431 ret
6432+ENDPROC(aesni_ecb_dec)
6433
6434 /*
6435 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6436@@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
6437 popl LEN
6438 popl IVP
6439 #endif
6440+ pax_force_retaddr 0, 1
6441 ret
6442+ENDPROC(aesni_cbc_enc)
6443
6444 /*
6445 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6446@@ -2498,7 +2523,9 @@ ENTRY(aesni_cbc_dec)
6447 popl LEN
6448 popl IVP
6449 #endif
6450+ pax_force_retaddr 0, 1
6451 ret
6452+ENDPROC(aesni_cbc_dec)
6453
6454 #ifdef __x86_64__
6455 .align 16
6456@@ -2524,6 +2551,7 @@ _aesni_inc_init:
6457 mov $1, TCTR_LOW
6458 MOVQ_R64_XMM TCTR_LOW INC
6459 MOVQ_R64_XMM CTR TCTR_LOW
6460+ pax_force_retaddr_bts
6461 ret
6462
6463 /*
6464@@ -2552,6 +2580,7 @@ _aesni_inc:
6465 .Linc_low:
6466 movaps CTR, IV
6467 PSHUFB_XMM BSWAP_MASK IV
6468+ pax_force_retaddr_bts
6469 ret
6470
6471 /*
6472@@ -2612,5 +2641,7 @@ ENTRY(aesni_ctr_enc)
6473 .Lctr_enc_ret:
6474 movups IV, (IVP)
6475 .Lctr_enc_just_ret:
6476+ pax_force_retaddr 0, 1
6477 ret
6478+ENDPROC(aesni_ctr_enc)
6479 #endif
6480diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
6481index 6214a9b..1f4fc9a 100644
6482--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
6483+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
6e9df6a3
MT
6484@@ -1,3 +1,5 @@
6485+#include <asm/alternative-asm.h>
6486+
6487 # enter ECRYPT_encrypt_bytes
6488 .text
6489 .p2align 5
6490@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
15a11c5b
MT
6491 add %r11,%rsp
6492 mov %rdi,%rax
6493 mov %rsi,%rdx
fe2de317 6494+ pax_force_retaddr 0, 1
15a11c5b
MT
6495 ret
6496 # bytesatleast65:
6497 ._bytesatleast65:
6e9df6a3 6498@@ -891,6 +894,7 @@ ECRYPT_keysetup:
15a11c5b
MT
6499 add %r11,%rsp
6500 mov %rdi,%rax
6501 mov %rsi,%rdx
6e9df6a3 6502+ pax_force_retaddr
15a11c5b
MT
6503 ret
6504 # enter ECRYPT_ivsetup
6505 .text
6e9df6a3 6506@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
15a11c5b
MT
6507 add %r11,%rsp
6508 mov %rdi,%rax
6509 mov %rsi,%rdx
6e9df6a3 6510+ pax_force_retaddr
15a11c5b 6511 ret
fe2de317
MT
6512diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
6513index 573aa10..b73ad89 100644
6514--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
6515+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
6e9df6a3
MT
6516@@ -21,6 +21,7 @@
6517 .text
6518
6519 #include <asm/asm-offsets.h>
6520+#include <asm/alternative-asm.h>
6521
6522 #define a_offset 0
6523 #define b_offset 4
6524@@ -269,6 +270,7 @@ twofish_enc_blk:
15a11c5b
MT
6525
6526 popq R1
6527 movq $1,%rax
fe2de317 6528+ pax_force_retaddr 0, 1
15a11c5b
MT
6529 ret
6530
6531 twofish_dec_blk:
6e9df6a3 6532@@ -321,4 +323,5 @@ twofish_dec_blk:
15a11c5b
MT
6533
6534 popq R1
6535 movq $1,%rax
fe2de317 6536+ pax_force_retaddr 0, 1
15a11c5b 6537 ret
fe2de317
MT
6538diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
6539index fd84387..0b4af7d 100644
6540--- a/arch/x86/ia32/ia32_aout.c
6541+++ b/arch/x86/ia32/ia32_aout.c
6542@@ -162,6 +162,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
6892158b
MT
6543 unsigned long dump_start, dump_size;
6544 struct user32 dump;
6545
6546+ memset(&dump, 0, sizeof(dump));
6547+
6548 fs = get_fs();
6549 set_fs(KERNEL_DS);
6550 has_dumped = 1;
fe2de317
MT
6551diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
6552index 6557769..ef6ae89 100644
6553--- a/arch/x86/ia32/ia32_signal.c
6554+++ b/arch/x86/ia32/ia32_signal.c
6555@@ -169,7 +169,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
6e9df6a3
MT
6556 }
6557 seg = get_fs();
6558 set_fs(KERNEL_DS);
6559- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
6560+ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
6561 set_fs(seg);
6562 if (ret >= 0 && uoss_ptr) {
6563 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
fe2de317 6564@@ -370,7 +370,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
6e9df6a3
MT
6565 */
6566 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6567 size_t frame_size,
6568- void **fpstate)
6569+ void __user **fpstate)
6570 {
6571 unsigned long sp;
6572
fe2de317 6573@@ -391,7 +391,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6e9df6a3
MT
6574
6575 if (used_math()) {
6576 sp = sp - sig_xstate_ia32_size;
6577- *fpstate = (struct _fpstate_ia32 *) sp;
6578+ *fpstate = (struct _fpstate_ia32 __user *) sp;
6579 if (save_i387_xstate_ia32(*fpstate) < 0)
6580 return (void __user *) -1L;
6581 }
fe2de317 6582@@ -399,7 +399,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
57199397
MT
6583 sp -= frame_size;
6584 /* Align the stack pointer according to the i386 ABI,
6585 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
6586- sp = ((sp + 4) & -16ul) - 4;
6587+ sp = ((sp - 12) & -16ul) - 4;
6588 return (void __user *) sp;
6589 }
6590
fe2de317 6591@@ -457,7 +457,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
bc901d79
MT
6592 * These are actually not used anymore, but left because some
6593 * gdb versions depend on them as a marker.
6594 */
6595- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6e9df6a3 6596+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
bc901d79
MT
6597 } put_user_catch(err);
6598
6599 if (err)
fe2de317 6600@@ -499,7 +499,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
57199397
MT
6601 0xb8,
6602 __NR_ia32_rt_sigreturn,
6603 0x80cd,
6604- 0,
6605+ 0
6606 };
6607
6608 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
fe2de317 6609@@ -529,16 +529,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
6892158b
MT
6610
6611 if (ka->sa.sa_flags & SA_RESTORER)
6612 restorer = ka->sa.sa_restorer;
6613+ else if (current->mm->context.vdso)
6614+ /* Return stub is in 32bit vsyscall page */
6615+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
6616 else
6617- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
6618- rt_sigreturn);
6619+ restorer = &frame->retcode;
6620 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
6621
6622 /*
bc901d79
MT
6623 * Not actually used anymore, but left because some gdb
6624 * versions need it.
6625 */
6626- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6e9df6a3 6627+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
bc901d79
MT
6628 } put_user_catch(err);
6629
6630 if (err)
fe2de317
MT
6631diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
6632index 54edb207..9335b5f 100644
6633--- a/arch/x86/ia32/ia32entry.S
6634+++ b/arch/x86/ia32/ia32entry.S
6635@@ -13,7 +13,9 @@
6636 #include <asm/thread_info.h>
6637 #include <asm/segment.h>
6638 #include <asm/irqflags.h>
6639+#include <asm/pgtable.h>
6640 #include <linux/linkage.h>
6641+#include <asm/alternative-asm.h>
6642
6643 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
6644 #include <linux/elf-em.h>
6645@@ -95,6 +97,30 @@ ENTRY(native_irq_enable_sysexit)
6646 ENDPROC(native_irq_enable_sysexit)
6647 #endif
6648
6649+ .macro pax_enter_kernel_user
6650+ pax_set_fptr_mask
6651+#ifdef CONFIG_PAX_MEMORY_UDEREF
6652+ call pax_enter_kernel_user
6653+#endif
6654+ .endm
6655+
6656+ .macro pax_exit_kernel_user
6657+#ifdef CONFIG_PAX_MEMORY_UDEREF
6658+ call pax_exit_kernel_user
6659+#endif
6660+#ifdef CONFIG_PAX_RANDKSTACK
6661+ pushq %rax
6662+ call pax_randomize_kstack
6663+ popq %rax
6664+#endif
6665+ .endm
6666+
6667+.macro pax_erase_kstack
6668+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
6669+ call pax_erase_kstack
6670+#endif
6671+.endm
6672+
6673 /*
6674 * 32bit SYSENTER instruction entry.
6675 *
6676@@ -121,12 +147,6 @@ ENTRY(ia32_sysenter_target)
6677 CFI_REGISTER rsp,rbp
6678 SWAPGS_UNSAFE_STACK
6679 movq PER_CPU_VAR(kernel_stack), %rsp
6680- addq $(KERNEL_STACK_OFFSET),%rsp
6681- /*
6682- * No need to follow this irqs on/off section: the syscall
6683- * disabled irqs, here we enable it straight after entry:
6684- */
6685- ENABLE_INTERRUPTS(CLBR_NONE)
6686 movl %ebp,%ebp /* zero extension */
6687 pushq_cfi $__USER32_DS
6688 /*CFI_REL_OFFSET ss,0*/
6689@@ -134,25 +154,38 @@ ENTRY(ia32_sysenter_target)
6690 CFI_REL_OFFSET rsp,0
6691 pushfq_cfi
6692 /*CFI_REL_OFFSET rflags,0*/
6693- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
6694- CFI_REGISTER rip,r10
6695+ GET_THREAD_INFO(%r11)
6696+ movl TI_sysenter_return(%r11), %r11d
6697+ CFI_REGISTER rip,r11
6698 pushq_cfi $__USER32_CS
6699 /*CFI_REL_OFFSET cs,0*/
6700 movl %eax, %eax
6701- pushq_cfi %r10
6702+ pushq_cfi %r11
6703 CFI_REL_OFFSET rip,0
6704 pushq_cfi %rax
6705 cld
6706 SAVE_ARGS 0,1,0
6707+ pax_enter_kernel_user
6708+ /*
6709+ * No need to follow this irqs on/off section: the syscall
6710+ * disabled irqs, here we enable it straight after entry:
6711+ */
6712+ ENABLE_INTERRUPTS(CLBR_NONE)
6713 /* no need to do an access_ok check here because rbp has been
6714 32bit zero extended */
6715+
6716+#ifdef CONFIG_PAX_MEMORY_UDEREF
6717+ mov $PAX_USER_SHADOW_BASE,%r11
6718+ add %r11,%rbp
6719+#endif
6720+
6721 1: movl (%rbp),%ebp
6722 .section __ex_table,"a"
6723 .quad 1b,ia32_badarg
6724 .previous
6725- GET_THREAD_INFO(%r10)
6726- orl $TS_COMPAT,TI_status(%r10)
6727- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
6728+ GET_THREAD_INFO(%r11)
6729+ orl $TS_COMPAT,TI_status(%r11)
6730+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
6731 CFI_REMEMBER_STATE
6732 jnz sysenter_tracesys
6733 cmpq $(IA32_NR_syscalls-1),%rax
6734@@ -162,13 +195,15 @@ sysenter_do_call:
6735 sysenter_dispatch:
6736 call *ia32_sys_call_table(,%rax,8)
6737 movq %rax,RAX-ARGOFFSET(%rsp)
6738- GET_THREAD_INFO(%r10)
6739+ GET_THREAD_INFO(%r11)
6740 DISABLE_INTERRUPTS(CLBR_NONE)
6741 TRACE_IRQS_OFF
6742- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6743+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
6744 jnz sysexit_audit
6745 sysexit_from_sys_call:
6746- andl $~TS_COMPAT,TI_status(%r10)
6747+ pax_exit_kernel_user
6748+ pax_erase_kstack
6749+ andl $~TS_COMPAT,TI_status(%r11)
6750 /* clear IF, that popfq doesn't enable interrupts early */
6751 andl $~0x200,EFLAGS-R11(%rsp)
6752 movl RIP-R11(%rsp),%edx /* User %eip */
6753@@ -194,6 +229,9 @@ sysexit_from_sys_call:
6754 movl %eax,%esi /* 2nd arg: syscall number */
6755 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
6756 call audit_syscall_entry
6757+
6758+ pax_erase_kstack
6759+
6760 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
6761 cmpq $(IA32_NR_syscalls-1),%rax
6762 ja ia32_badsys
6763@@ -205,7 +243,7 @@ sysexit_from_sys_call:
6764 .endm
6765
6766 .macro auditsys_exit exit
6767- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
6768+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
6769 jnz ia32_ret_from_sys_call
6770 TRACE_IRQS_ON
6771 sti
6772@@ -215,12 +253,12 @@ sysexit_from_sys_call:
6773 movzbl %al,%edi /* zero-extend that into %edi */
6774 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
6775 call audit_syscall_exit
6776- GET_THREAD_INFO(%r10)
6777+ GET_THREAD_INFO(%r11)
6778 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
6779 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
6780 cli
6781 TRACE_IRQS_OFF
6782- testl %edi,TI_flags(%r10)
6783+ testl %edi,TI_flags(%r11)
6784 jz \exit
6785 CLEAR_RREGS -ARGOFFSET
6786 jmp int_with_check
6787@@ -238,7 +276,7 @@ sysexit_audit:
6788
6789 sysenter_tracesys:
6790 #ifdef CONFIG_AUDITSYSCALL
6791- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
6792+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
6793 jz sysenter_auditsys
6794 #endif
6795 SAVE_REST
6796@@ -246,6 +284,9 @@ sysenter_tracesys:
6797 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
6798 movq %rsp,%rdi /* &pt_regs -> arg1 */
6799 call syscall_trace_enter
6800+
6801+ pax_erase_kstack
6802+
6803 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6804 RESTORE_REST
6805 cmpq $(IA32_NR_syscalls-1),%rax
6806@@ -277,19 +318,20 @@ ENDPROC(ia32_sysenter_target)
6807 ENTRY(ia32_cstar_target)
6808 CFI_STARTPROC32 simple
6809 CFI_SIGNAL_FRAME
6810- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
6811+ CFI_DEF_CFA rsp,0
6812 CFI_REGISTER rip,rcx
6813 /*CFI_REGISTER rflags,r11*/
6814 SWAPGS_UNSAFE_STACK
6815 movl %esp,%r8d
6816 CFI_REGISTER rsp,r8
6817 movq PER_CPU_VAR(kernel_stack),%rsp
6818+ SAVE_ARGS 8*6,0,0
6819+ pax_enter_kernel_user
6820 /*
6821 * No need to follow this irqs on/off section: the syscall
6822 * disabled irqs and here we enable it straight after entry:
6823 */
6824 ENABLE_INTERRUPTS(CLBR_NONE)
6825- SAVE_ARGS 8,0,0
6826 movl %eax,%eax /* zero extension */
6827 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
6828 movq %rcx,RIP-ARGOFFSET(%rsp)
6829@@ -305,13 +347,19 @@ ENTRY(ia32_cstar_target)
6830 /* no need to do an access_ok check here because r8 has been
6831 32bit zero extended */
6832 /* hardware stack frame is complete now */
6833+
6834+#ifdef CONFIG_PAX_MEMORY_UDEREF
6835+ mov $PAX_USER_SHADOW_BASE,%r11
6836+ add %r11,%r8
6837+#endif
6838+
6839 1: movl (%r8),%r9d
6840 .section __ex_table,"a"
6841 .quad 1b,ia32_badarg
6842 .previous
6843- GET_THREAD_INFO(%r10)
6844- orl $TS_COMPAT,TI_status(%r10)
6845- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
6846+ GET_THREAD_INFO(%r11)
6847+ orl $TS_COMPAT,TI_status(%r11)
6848+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
6849 CFI_REMEMBER_STATE
6850 jnz cstar_tracesys
6851 cmpq $IA32_NR_syscalls-1,%rax
6852@@ -321,13 +369,15 @@ cstar_do_call:
6853 cstar_dispatch:
6854 call *ia32_sys_call_table(,%rax,8)
6855 movq %rax,RAX-ARGOFFSET(%rsp)
6856- GET_THREAD_INFO(%r10)
6857+ GET_THREAD_INFO(%r11)
6858 DISABLE_INTERRUPTS(CLBR_NONE)
6859 TRACE_IRQS_OFF
6860- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6861+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
6862 jnz sysretl_audit
6863 sysretl_from_sys_call:
6864- andl $~TS_COMPAT,TI_status(%r10)
6865+ pax_exit_kernel_user
6866+ pax_erase_kstack
6867+ andl $~TS_COMPAT,TI_status(%r11)
6868 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
6869 movl RIP-ARGOFFSET(%rsp),%ecx
6870 CFI_REGISTER rip,rcx
6871@@ -355,7 +405,7 @@ sysretl_audit:
6872
6873 cstar_tracesys:
6874 #ifdef CONFIG_AUDITSYSCALL
6875- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
6876+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
6877 jz cstar_auditsys
6878 #endif
6879 xchgl %r9d,%ebp
6880@@ -364,6 +414,9 @@ cstar_tracesys:
6881 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6882 movq %rsp,%rdi /* &pt_regs -> arg1 */
6883 call syscall_trace_enter
6884+
6885+ pax_erase_kstack
6886+
6887 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
6888 RESTORE_REST
6889 xchgl %ebp,%r9d
6890@@ -409,20 +462,21 @@ ENTRY(ia32_syscall)
6891 CFI_REL_OFFSET rip,RIP-RIP
6892 PARAVIRT_ADJUST_EXCEPTION_FRAME
6893 SWAPGS
6894- /*
6895- * No need to follow this irqs on/off section: the syscall
6896- * disabled irqs and here we enable it straight after entry:
6897- */
6898- ENABLE_INTERRUPTS(CLBR_NONE)
6899 movl %eax,%eax
6900 pushq_cfi %rax
6901 cld
6902 /* note the registers are not zero extended to the sf.
6903 this could be a problem. */
6904 SAVE_ARGS 0,1,0
6905- GET_THREAD_INFO(%r10)
6906- orl $TS_COMPAT,TI_status(%r10)
6907- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
6908+ pax_enter_kernel_user
6909+ /*
6910+ * No need to follow this irqs on/off section: the syscall
6911+ * disabled irqs and here we enable it straight after entry:
6912+ */
6913+ ENABLE_INTERRUPTS(CLBR_NONE)
6914+ GET_THREAD_INFO(%r11)
6915+ orl $TS_COMPAT,TI_status(%r11)
6916+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
6917 jnz ia32_tracesys
6918 cmpq $(IA32_NR_syscalls-1),%rax
6919 ja ia32_badsys
6920@@ -441,6 +495,9 @@ ia32_tracesys:
6921 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6922 movq %rsp,%rdi /* &pt_regs -> arg1 */
6923 call syscall_trace_enter
6924+
6925+ pax_erase_kstack
6926+
6927 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6928 RESTORE_REST
6929 cmpq $(IA32_NR_syscalls-1),%rax
6930@@ -455,6 +512,7 @@ ia32_badsys:
6931
6932 quiet_ni_syscall:
6933 movq $-ENOSYS,%rax
6934+ pax_force_retaddr
6935 ret
6936 CFI_ENDPROC
6937
6938diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
6939index f6f5c53..b358b28 100644
6940--- a/arch/x86/ia32/sys_ia32.c
6941+++ b/arch/x86/ia32/sys_ia32.c
6942@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
6e9df6a3
MT
6943 */
6944 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
6945 {
6946- typeof(ubuf->st_uid) uid = 0;
6947- typeof(ubuf->st_gid) gid = 0;
6948+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
6949+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
6950 SET_UID(uid, stat->uid);
6951 SET_GID(gid, stat->gid);
6952 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
fe2de317 6953@@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
6e9df6a3
MT
6954 }
6955 set_fs(KERNEL_DS);
6956 ret = sys_rt_sigprocmask(how,
6957- set ? (sigset_t __user *)&s : NULL,
6958- oset ? (sigset_t __user *)&s : NULL,
6959+ set ? (sigset_t __force_user *)&s : NULL,
6960+ oset ? (sigset_t __force_user *)&s : NULL,
6961 sigsetsize);
6962 set_fs(old_fs);
6963 if (ret)
fe2de317 6964@@ -332,7 +332,7 @@ asmlinkage long sys32_alarm(unsigned int seconds)
6e9df6a3
MT
6965 return alarm_setitimer(seconds);
6966 }
6967
6968-asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
6969+asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
6970 int options)
6971 {
6972 return compat_sys_wait4(pid, stat_addr, options, NULL);
fe2de317 6973@@ -353,7 +353,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
6e9df6a3
MT
6974 mm_segment_t old_fs = get_fs();
6975
6976 set_fs(KERNEL_DS);
6977- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
6978+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
6979 set_fs(old_fs);
6980 if (put_compat_timespec(&t, interval))
6981 return -EFAULT;
fe2de317 6982@@ -369,7 +369,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
6e9df6a3
MT
6983 mm_segment_t old_fs = get_fs();
6984
6985 set_fs(KERNEL_DS);
6986- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
6987+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
6988 set_fs(old_fs);
6989 if (!ret) {
6990 switch (_NSIG_WORDS) {
fe2de317 6991@@ -394,7 +394,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
6e9df6a3
MT
6992 if (copy_siginfo_from_user32(&info, uinfo))
6993 return -EFAULT;
6994 set_fs(KERNEL_DS);
6995- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
6996+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
6997 set_fs(old_fs);
6998 return ret;
6999 }
fe2de317 7000@@ -439,7 +439,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
6e9df6a3
MT
7001 return -EFAULT;
7002
7003 set_fs(KERNEL_DS);
7004- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
7005+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
7006 count);
7007 set_fs(old_fs);
7008
fe2de317
MT
7009diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
7010index 091508b..0ee32ec 100644
7011--- a/arch/x86/include/asm/alternative-asm.h
7012+++ b/arch/x86/include/asm/alternative-asm.h
7013@@ -15,6 +15,45 @@
6e9df6a3
MT
7014 .endm
7015 #endif
7016
7017+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
fe2de317
MT
7018+ .macro pax_force_retaddr_bts rip=0
7019+ btsq $63,\rip(%rsp)
7020+ .endm
7021+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
7022+ .macro pax_force_retaddr rip=0, reload=0
6e9df6a3
MT
7023+ btsq $63,\rip(%rsp)
7024+ .endm
7025+ .macro pax_force_fptr ptr
7026+ btsq $63,\ptr
7027+ .endm
fe2de317
MT
7028+ .macro pax_set_fptr_mask
7029+ .endm
7030+#endif
7031+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
7032+ .macro pax_force_retaddr rip=0, reload=0
7033+ .if \reload
7034+ pax_set_fptr_mask
7035+ .endif
7036+ orq %r10,\rip(%rsp)
7037+ .endm
7038+ .macro pax_force_fptr ptr
7039+ orq %r10,\ptr
7040+ .endm
7041+ .macro pax_set_fptr_mask
7042+ movabs $0x8000000000000000,%r10
7043+ .endm
7044+#endif
6e9df6a3 7045+#else
fe2de317 7046+ .macro pax_force_retaddr rip=0, reload=0
6e9df6a3
MT
7047+ .endm
7048+ .macro pax_force_fptr ptr
7049+ .endm
fe2de317
MT
7050+ .macro pax_force_retaddr_bts rip=0
7051+ .endm
7052+ .macro pax_set_fptr_mask
7053+ .endm
6e9df6a3
MT
7054+#endif
7055+
7056 .macro altinstruction_entry orig alt feature orig_len alt_len
7057 .long \orig - .
7058 .long \alt - .
fe2de317
MT
7059diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
7060index 37ad100..7d47faa 100644
7061--- a/arch/x86/include/asm/alternative.h
7062+++ b/arch/x86/include/asm/alternative.h
7063@@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
6892158b 7064 ".section .discard,\"aw\",@progbits\n" \
ae4e228f 7065 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
58c5fc13
MT
7066 ".previous\n" \
7067- ".section .altinstr_replacement, \"ax\"\n" \
7068+ ".section .altinstr_replacement, \"a\"\n" \
7069 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
7070 ".previous"
7071
fe2de317
MT
7072diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
7073index 9b7273c..e9fcc24 100644
7074--- a/arch/x86/include/asm/apic.h
7075+++ b/arch/x86/include/asm/apic.h
7076@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
15a11c5b
MT
7077
7078 #ifdef CONFIG_X86_LOCAL_APIC
7079
7080-extern unsigned int apic_verbosity;
7081+extern int apic_verbosity;
7082 extern int local_apic_timer_c2_ok;
7083
7084 extern int disable_apic;
fe2de317
MT
7085diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
7086index 20370c6..a2eb9b0 100644
7087--- a/arch/x86/include/asm/apm.h
7088+++ b/arch/x86/include/asm/apm.h
7089@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
58c5fc13
MT
7090 __asm__ __volatile__(APM_DO_ZERO_SEGS
7091 "pushl %%edi\n\t"
7092 "pushl %%ebp\n\t"
7093- "lcall *%%cs:apm_bios_entry\n\t"
7094+ "lcall *%%ss:apm_bios_entry\n\t"
7095 "setc %%al\n\t"
7096 "popl %%ebp\n\t"
7097 "popl %%edi\n\t"
fe2de317 7098@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
58c5fc13
MT
7099 __asm__ __volatile__(APM_DO_ZERO_SEGS
7100 "pushl %%edi\n\t"
7101 "pushl %%ebp\n\t"
7102- "lcall *%%cs:apm_bios_entry\n\t"
7103+ "lcall *%%ss:apm_bios_entry\n\t"
7104 "setc %%bl\n\t"
7105 "popl %%ebp\n\t"
7106 "popl %%edi\n\t"
fe2de317
MT
7107diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
7108index 10572e3..2618d91 100644
7109--- a/arch/x86/include/asm/atomic.h
7110+++ b/arch/x86/include/asm/atomic.h
7111@@ -22,7 +22,18 @@
bc901d79 7112 */
fe2de317 7113 static inline int atomic_read(const atomic_t *v)
bc901d79 7114 {
fe2de317
MT
7115- return (*(volatile int *)&(v)->counter);
7116+ return (*(volatile const int *)&(v)->counter);
bc901d79
MT
7117+}
7118+
7119+/**
fe2de317
MT
7120+ * atomic_read_unchecked - read atomic variable
7121+ * @v: pointer of type atomic_unchecked_t
ae4e228f
MT
7122+ *
7123+ * Atomically reads the value of @v.
ae4e228f 7124+ */
fe2de317 7125+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
ae4e228f 7126+{
fe2de317 7127+ return (*(volatile const int *)&(v)->counter);
bc901d79
MT
7128 }
7129
7130 /**
fe2de317 7131@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
ae4e228f
MT
7132 }
7133
7134 /**
fe2de317
MT
7135+ * atomic_set_unchecked - set atomic variable
7136+ * @v: pointer of type atomic_unchecked_t
ae4e228f
MT
7137+ * @i: required value
7138+ *
7139+ * Atomically sets the value of @v to @i.
7140+ */
fe2de317 7141+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
ae4e228f
MT
7142+{
7143+ v->counter = i;
7144+}
7145+
7146+/**
fe2de317 7147 * atomic_add - add integer to atomic variable
ae4e228f 7148 * @i: integer value to add
fe2de317
MT
7149 * @v: pointer of type atomic_t
7150@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
58c5fc13 7151 */
fe2de317 7152 static inline void atomic_add(int i, atomic_t *v)
58c5fc13 7153 {
fe2de317
MT
7154- asm volatile(LOCK_PREFIX "addl %1,%0"
7155+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
58c5fc13
MT
7156+
7157+#ifdef CONFIG_PAX_REFCOUNT
7158+ "jno 0f\n"
fe2de317 7159+ LOCK_PREFIX "subl %1,%0\n"
58c5fc13
MT
7160+ "int $4\n0:\n"
7161+ _ASM_EXTABLE(0b, 0b)
7162+#endif
7163+
fe2de317
MT
7164+ : "+m" (v->counter)
7165+ : "ir" (i));
ae4e228f
MT
7166+}
7167+
7168+/**
fe2de317 7169+ * atomic_add_unchecked - add integer to atomic variable
ae4e228f 7170+ * @i: integer value to add
fe2de317 7171+ * @v: pointer of type atomic_unchecked_t
ae4e228f
MT
7172+ *
7173+ * Atomically adds @i to @v.
7174+ */
fe2de317 7175+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
ae4e228f 7176+{
fe2de317
MT
7177+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
7178 : "+m" (v->counter)
7179 : "ir" (i));
7180 }
7181@@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
58c5fc13 7182 */
fe2de317 7183 static inline void atomic_sub(int i, atomic_t *v)
58c5fc13 7184 {
fe2de317
MT
7185- asm volatile(LOCK_PREFIX "subl %1,%0"
7186+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
58c5fc13
MT
7187+
7188+#ifdef CONFIG_PAX_REFCOUNT
7189+ "jno 0f\n"
fe2de317 7190+ LOCK_PREFIX "addl %1,%0\n"
58c5fc13
MT
7191+ "int $4\n0:\n"
7192+ _ASM_EXTABLE(0b, 0b)
7193+#endif
7194+
fe2de317
MT
7195+ : "+m" (v->counter)
7196+ : "ir" (i));
6892158b
MT
7197+}
7198+
7199+/**
fe2de317 7200+ * atomic_sub_unchecked - subtract integer from atomic variable
6892158b 7201+ * @i: integer value to subtract
fe2de317 7202+ * @v: pointer of type atomic_unchecked_t
6892158b
MT
7203+ *
7204+ * Atomically subtracts @i from @v.
7205+ */
fe2de317 7206+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6892158b 7207+{
fe2de317
MT
7208+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7209 : "+m" (v->counter)
7210 : "ir" (i));
58c5fc13 7211 }
fe2de317 7212@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
58c5fc13
MT
7213 {
7214 unsigned char c;
7215
fe2de317
MT
7216- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7217+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
58c5fc13
MT
7218+
7219+#ifdef CONFIG_PAX_REFCOUNT
7220+ "jno 0f\n"
fe2de317 7221+ LOCK_PREFIX "addl %2,%0\n"
58c5fc13
MT
7222+ "int $4\n0:\n"
7223+ _ASM_EXTABLE(0b, 0b)
7224+#endif
7225+
7226+ "sete %1\n"
fe2de317
MT
7227 : "+m" (v->counter), "=qm" (c)
7228 : "ir" (i) : "memory");
58c5fc13 7229 return c;
fe2de317 7230@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
58c5fc13 7231 */
fe2de317 7232 static inline void atomic_inc(atomic_t *v)
58c5fc13 7233 {
fe2de317
MT
7234- asm volatile(LOCK_PREFIX "incl %0"
7235+ asm volatile(LOCK_PREFIX "incl %0\n"
58c5fc13
MT
7236+
7237+#ifdef CONFIG_PAX_REFCOUNT
7238+ "jno 0f\n"
fe2de317 7239+ LOCK_PREFIX "decl %0\n"
6892158b
MT
7240+ "int $4\n0:\n"
7241+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
7242+#endif
7243+
fe2de317 7244+ : "+m" (v->counter));
ae4e228f
MT
7245+}
7246+
7247+/**
fe2de317
MT
7248+ * atomic_inc_unchecked - increment atomic variable
7249+ * @v: pointer of type atomic_unchecked_t
ae4e228f
MT
7250+ *
7251+ * Atomically increments @v by 1.
7252+ */
fe2de317 7253+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
ae4e228f 7254+{
fe2de317
MT
7255+ asm volatile(LOCK_PREFIX "incl %0\n"
7256 : "+m" (v->counter));
7257 }
7258
7259@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
58c5fc13 7260 */
fe2de317 7261 static inline void atomic_dec(atomic_t *v)
58c5fc13 7262 {
fe2de317
MT
7263- asm volatile(LOCK_PREFIX "decl %0"
7264+ asm volatile(LOCK_PREFIX "decl %0\n"
58c5fc13
MT
7265+
7266+#ifdef CONFIG_PAX_REFCOUNT
7267+ "jno 0f\n"
fe2de317 7268+ LOCK_PREFIX "incl %0\n"
6892158b
MT
7269+ "int $4\n0:\n"
7270+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
7271+#endif
7272+
fe2de317 7273+ : "+m" (v->counter));
df50ba0c
MT
7274+}
7275+
7276+/**
fe2de317
MT
7277+ * atomic_dec_unchecked - decrement atomic variable
7278+ * @v: pointer of type atomic_unchecked_t
df50ba0c
MT
7279+ *
7280+ * Atomically decrements @v by 1.
7281+ */
fe2de317 7282+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
df50ba0c 7283+{
fe2de317
MT
7284+ asm volatile(LOCK_PREFIX "decl %0\n"
7285 : "+m" (v->counter));
58c5fc13 7286 }
fe2de317
MT
7287
7288@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
58c5fc13
MT
7289 {
7290 unsigned char c;
7291
fe2de317
MT
7292- asm volatile(LOCK_PREFIX "decl %0; sete %1"
7293+ asm volatile(LOCK_PREFIX "decl %0\n"
58c5fc13
MT
7294+
7295+#ifdef CONFIG_PAX_REFCOUNT
7296+ "jno 0f\n"
fe2de317 7297+ LOCK_PREFIX "incl %0\n"
6892158b
MT
7298+ "int $4\n0:\n"
7299+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
7300+#endif
7301+
7302+ "sete %1\n"
fe2de317
MT
7303 : "+m" (v->counter), "=qm" (c)
7304 : : "memory");
58c5fc13 7305 return c != 0;
fe2de317 7306@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
58c5fc13
MT
7307 {
7308 unsigned char c;
7309
fe2de317
MT
7310- asm volatile(LOCK_PREFIX "incl %0; sete %1"
7311+ asm volatile(LOCK_PREFIX "incl %0\n"
58c5fc13
MT
7312+
7313+#ifdef CONFIG_PAX_REFCOUNT
7314+ "jno 0f\n"
fe2de317 7315+ LOCK_PREFIX "decl %0\n"
6892158b
MT
7316+ "int $4\n0:\n"
7317+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
7318+#endif
7319+
7320+ "sete %1\n"
fe2de317
MT
7321+ : "+m" (v->counter), "=qm" (c)
7322+ : : "memory");
7323+ return c != 0;
7324+}
7325+
7326+/**
7327+ * atomic_inc_and_test_unchecked - increment and test
7328+ * @v: pointer of type atomic_unchecked_t
7329+ *
7330+ * Atomically increments @v by 1
7331+ * and returns true if the result is zero, or false for all
7332+ * other cases.
7333+ */
7334+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7335+{
7336+ unsigned char c;
7337+
7338+ asm volatile(LOCK_PREFIX "incl %0\n"
7339+ "sete %1\n"
7340 : "+m" (v->counter), "=qm" (c)
7341 : : "memory");
58c5fc13 7342 return c != 0;
fe2de317 7343@@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
58c5fc13
MT
7344 {
7345 unsigned char c;
7346
fe2de317
MT
7347- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7348+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
58c5fc13
MT
7349+
7350+#ifdef CONFIG_PAX_REFCOUNT
7351+ "jno 0f\n"
fe2de317 7352+ LOCK_PREFIX "subl %2,%0\n"
58c5fc13
MT
7353+ "int $4\n0:\n"
7354+ _ASM_EXTABLE(0b, 0b)
7355+#endif
7356+
7357+ "sets %1\n"
fe2de317
MT
7358 : "+m" (v->counter), "=qm" (c)
7359 : "ir" (i) : "memory");
58c5fc13 7360 return c;
fe2de317
MT
7361@@ -180,6 +342,46 @@ static inline int atomic_add_return(int i, atomic_t *v)
7362 #endif
7363 /* Modern 486+ processor */
7364 __i = i;
7365+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
58c5fc13
MT
7366+
7367+#ifdef CONFIG_PAX_REFCOUNT
7368+ "jno 0f\n"
fe2de317 7369+ "movl %0, %1\n"
58c5fc13
MT
7370+ "int $4\n0:\n"
7371+ _ASM_EXTABLE(0b, 0b)
7372+#endif
7373+
ae4e228f
MT
7374+ : "+r" (i), "+m" (v->counter)
7375+ : : "memory");
7376+ return i + __i;
fe2de317
MT
7377+
7378+#ifdef CONFIG_M386
7379+no_xadd: /* Legacy 386 processor */
7380+ local_irq_save(flags);
7381+ __i = atomic_read(v);
7382+ atomic_set(v, i + __i);
7383+ local_irq_restore(flags);
7384+ return i + __i;
7385+#endif
ae4e228f
MT
7386+}
7387+
7388+/**
fe2de317
MT
7389+ * atomic_add_return_unchecked - add integer and return
7390+ * @v: pointer of type atomic_unchecked_t
ae4e228f 7391+ * @i: integer value to add
ae4e228f
MT
7392+ *
7393+ * Atomically adds @i to @v and returns @i + @v
7394+ */
fe2de317 7395+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
ae4e228f 7396+{
fe2de317
MT
7397+ int __i;
7398+#ifdef CONFIG_M386
7399+ unsigned long flags;
7400+ if (unlikely(boot_cpu_data.x86 <= 3))
7401+ goto no_xadd;
7402+#endif
7403+ /* Modern 486+ processor */
7404+ __i = i;
7405 asm volatile(LOCK_PREFIX "xaddl %0, %1"
58c5fc13
MT
7406 : "+r" (i), "+m" (v->counter)
7407 : : "memory");
fe2de317 7408@@ -208,6 +410,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
ae4e228f
MT
7409 }
7410
fe2de317
MT
7411 #define atomic_inc_return(v) (atomic_add_return(1, v))
7412+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
57199397 7413+{
fe2de317 7414+ return atomic_add_return_unchecked(1, v);
57199397 7415+}
fe2de317 7416 #define atomic_dec_return(v) (atomic_sub_return(1, v))
ae4e228f 7417
fe2de317
MT
7418 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
7419@@ -215,11 +421,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
66a7e928
MT
7420 return cmpxchg(&v->counter, old, new);
7421 }
7422
fe2de317 7423+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
66a7e928
MT
7424+{
7425+ return cmpxchg(&v->counter, old, new);
7426+}
7427+
fe2de317 7428 static inline int atomic_xchg(atomic_t *v, int new)
66a7e928
MT
7429 {
7430 return xchg(&v->counter, new);
fe2de317
MT
7431 }
7432
7433+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7434+{
7435+ return xchg(&v->counter, new);
7436+}
7437+
7438 /**
7439 * __atomic_add_unless - add unless the number is already a given value
7440 * @v: pointer of type atomic_t
7441@@ -231,12 +447,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
58c5fc13 7442 */
fe2de317 7443 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
58c5fc13 7444 {
fe2de317
MT
7445- int c, old;
7446+ int c, old, new;
7447 c = atomic_read(v);
58c5fc13
MT
7448 for (;;) {
7449- if (unlikely(c == (u)))
7450+ if (unlikely(c == u))
7451 break;
fe2de317 7452- old = atomic_cmpxchg((v), c, c + (a));
58c5fc13 7453+
fe2de317 7454+ asm volatile("addl %2,%0\n"
58c5fc13
MT
7455+
7456+#ifdef CONFIG_PAX_REFCOUNT
7457+ "jno 0f\n"
fe2de317 7458+ "subl %2,%0\n"
58c5fc13
MT
7459+ "int $4\n0:\n"
7460+ _ASM_EXTABLE(0b, 0b)
7461+#endif
7462+
7463+ : "=r" (new)
7464+ : "0" (c), "ir" (a));
7465+
fe2de317 7466+ old = atomic_cmpxchg(v, c, new);
58c5fc13
MT
7467 if (likely(old == c))
7468 break;
7469 c = old;
fe2de317
MT
7470@@ -244,6 +473,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7471 return c;
58c5fc13
MT
7472 }
7473
fe2de317
MT
7474+/**
7475+ * atomic_inc_not_zero_hint - increment if not null
7476+ * @v: pointer of type atomic_t
7477+ * @hint: probable value of the atomic before the increment
7478+ *
7479+ * This version of atomic_inc_not_zero() gives a hint of probable
7480+ * value of the atomic. This helps processor to not read the memory
7481+ * before doing the atomic read/modify/write cycle, lowering
7482+ * number of bus transactions on some arches.
7483+ *
7484+ * Returns: 0 if increment was not done, 1 otherwise.
7485+ */
7486+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
7487+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
7488+{
7489+ int val, c = hint, new;
7490+
7491+ /* sanity test, should be removed by compiler if hint is a constant */
7492+ if (!hint)
7493+ return __atomic_add_unless(v, 1, 0);
7494+
7495+ do {
7496+ asm volatile("incl %0\n"
7497+
7498+#ifdef CONFIG_PAX_REFCOUNT
7499+ "jno 0f\n"
7500+ "decl %0\n"
7501+ "int $4\n0:\n"
7502+ _ASM_EXTABLE(0b, 0b)
7503+#endif
7504+
7505+ : "=r" (new)
7506+ : "0" (c));
7507+
7508+ val = atomic_cmpxchg(v, c, new);
7509+ if (val == c)
7510+ return 1;
7511+ c = val;
7512+ } while (c);
7513+
7514+ return 0;
7515+}
7516
7517 /*
7518 * atomic_dec_if_positive - decrement by 1 if old value positive
7519diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
7520index 24098aa..1e37723 100644
7521--- a/arch/x86/include/asm/atomic64_32.h
7522+++ b/arch/x86/include/asm/atomic64_32.h
7523@@ -12,6 +12,14 @@ typedef struct {
7524 u64 __aligned(8) counter;
7525 } atomic64_t;
7526
7527+#ifdef CONFIG_PAX_REFCOUNT
7528+typedef struct {
7529+ u64 __aligned(8) counter;
7530+} atomic64_unchecked_t;
7531+#else
7532+typedef atomic64_t atomic64_unchecked_t;
7533+#endif
7534+
7535 #define ATOMIC64_INIT(val) { (val) }
7536
7537 #ifdef CONFIG_X86_CMPXCHG64
7538@@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
7539 }
7540
7541 /**
7542+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
7543+ * @p: pointer to type atomic64_unchecked_t
7544+ * @o: expected value
7545+ * @n: new value
7546+ *
7547+ * Atomically sets @v to @n if it was equal to @o and returns
7548+ * the old value.
7549+ */
7550+
7551+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
7552+{
7553+ return cmpxchg64(&v->counter, o, n);
7554+}
7555+
7556+/**
7557 * atomic64_xchg - xchg atomic64 variable
7558 * @v: pointer to type atomic64_t
7559 * @n: value to assign
7560@@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64_t *v, long long i)
7561 }
7562
7563 /**
7564+ * atomic64_set_unchecked - set atomic64 variable
7565+ * @v: pointer to type atomic64_unchecked_t
7566+ * @n: value to assign
7567+ *
7568+ * Atomically sets the value of @v to @n.
7569+ */
7570+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
7571+{
7572+ unsigned high = (unsigned)(i >> 32);
7573+ unsigned low = (unsigned)i;
7574+ asm volatile(ATOMIC64_ALTERNATIVE(set)
7575+ : "+b" (low), "+c" (high)
7576+ : "S" (v)
7577+ : "eax", "edx", "memory"
7578+ );
7579+}
7580+
7581+/**
7582 * atomic64_read - read atomic64 variable
7583 * @v: pointer to type atomic64_t
7584 *
7585@@ -93,6 +134,22 @@ static inline long long atomic64_read(atomic64_t *v)
7586 }
7587
7588 /**
7589+ * atomic64_read_unchecked - read atomic64 variable
7590+ * @v: pointer to type atomic64_unchecked_t
7591+ *
7592+ * Atomically reads the value of @v and returns it.
7593+ */
7594+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
7595+{
7596+ long long r;
7597+ asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
7598+ : "=A" (r), "+c" (v)
7599+ : : "memory"
7600+ );
7601+ return r;
7602+ }
7603+
7604+/**
7605 * atomic64_add_return - add and return
7606 * @i: integer value to add
7607 * @v: pointer to type atomic64_t
7608@@ -108,6 +165,22 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
7609 return i;
7610 }
7611
7612+/**
7613+ * atomic64_add_return_unchecked - add and return
7614+ * @i: integer value to add
7615+ * @v: pointer to type atomic64_unchecked_t
7616+ *
7617+ * Atomically adds @i to @v and returns @i + *@v
7618+ */
7619+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
7620+{
7621+ asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
7622+ : "+A" (i), "+c" (v)
7623+ : : "memory"
7624+ );
7625+ return i;
7626+}
7627+
7628 /*
7629 * Other variants with different arithmetic operators:
bc901d79 7630 */
fe2de317
MT
7631@@ -131,6 +204,17 @@ static inline long long atomic64_inc_return(atomic64_t *v)
7632 return a;
7633 }
7634
7635+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7636+{
7637+ long long a;
7638+ asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
7639+ : "=A" (a)
7640+ : "S" (v)
7641+ : "memory", "ecx"
7642+ );
7643+ return a;
7644+}
7645+
7646 static inline long long atomic64_dec_return(atomic64_t *v)
bc901d79 7647 {
fe2de317
MT
7648 long long a;
7649@@ -159,6 +243,22 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
7650 }
7651
7652 /**
7653+ * atomic64_add_unchecked - add integer to atomic64 variable
7654+ * @i: integer value to add
7655+ * @v: pointer to type atomic64_unchecked_t
7656+ *
7657+ * Atomically adds @i to @v.
7658+ */
7659+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
7660+{
7661+ asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
7662+ : "+A" (i), "+c" (v)
7663+ : : "memory"
7664+ );
7665+ return i;
bc901d79
MT
7666+}
7667+
7668+/**
fe2de317
MT
7669 * atomic64_sub - subtract the atomic64 variable
7670 * @i: integer value to subtract
7671 * @v: pointer to type atomic64_t
7672diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
7673index 017594d..d3fcf72 100644
7674--- a/arch/x86/include/asm/atomic64_64.h
7675+++ b/arch/x86/include/asm/atomic64_64.h
7676@@ -18,7 +18,19 @@
7677 */
7678 static inline long atomic64_read(const atomic64_t *v)
7679 {
7680- return (*(volatile long *)&(v)->counter);
7681+ return (*(volatile const long *)&(v)->counter);
7682+}
7683+
7684+/**
7685+ * atomic64_read_unchecked - read atomic64 variable
7686+ * @v: pointer of type atomic64_unchecked_t
57199397
MT
7687+ *
7688+ * Atomically reads the value of @v.
fe2de317 7689+ * Doesn't imply a read memory barrier.
57199397 7690+ */
fe2de317 7691+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
57199397 7692+{
fe2de317 7693+ return (*(volatile const long *)&(v)->counter);
bc901d79
MT
7694 }
7695
7696 /**
fe2de317 7697@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
57199397
MT
7698 }
7699
7700 /**
fe2de317
MT
7701+ * atomic64_set_unchecked - set atomic64 variable
7702+ * @v: pointer to type atomic64_unchecked_t
57199397
MT
7703+ * @i: required value
7704+ *
7705+ * Atomically sets the value of @v to @i.
7706+ */
fe2de317 7707+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
57199397
MT
7708+{
7709+ v->counter = i;
7710+}
7711+
7712+/**
fe2de317 7713 * atomic64_add - add integer to atomic64 variable
57199397 7714 * @i: integer value to add
fe2de317
MT
7715 * @v: pointer to type atomic64_t
7716@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
57199397 7717 */
fe2de317 7718 static inline void atomic64_add(long i, atomic64_t *v)
57199397 7719 {
fe2de317 7720+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
57199397
MT
7721+
7722+#ifdef CONFIG_PAX_REFCOUNT
7723+ "jno 0f\n"
fe2de317 7724+ LOCK_PREFIX "subq %1,%0\n"
bc901d79 7725+ "int $4\n0:\n"
57199397
MT
7726+ _ASM_EXTABLE(0b, 0b)
7727+#endif
7728+
fe2de317
MT
7729+ : "=m" (v->counter)
7730+ : "er" (i), "m" (v->counter));
57199397
MT
7731+}
7732+
7733+/**
fe2de317 7734+ * atomic64_add_unchecked - add integer to atomic64 variable
57199397 7735+ * @i: integer value to add
fe2de317 7736+ * @v: pointer to type atomic64_unchecked_t
57199397
MT
7737+ *
7738+ * Atomically adds @i to @v.
7739+ */
fe2de317 7740+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
57199397 7741+{
fe2de317
MT
7742 asm volatile(LOCK_PREFIX "addq %1,%0"
7743 : "=m" (v->counter)
7744 : "er" (i), "m" (v->counter));
7745@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
57199397 7746 */
fe2de317 7747 static inline void atomic64_sub(long i, atomic64_t *v)
57199397 7748 {
fe2de317
MT
7749- asm volatile(LOCK_PREFIX "subq %1,%0"
7750+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
57199397
MT
7751+
7752+#ifdef CONFIG_PAX_REFCOUNT
7753+ "jno 0f\n"
fe2de317 7754+ LOCK_PREFIX "addq %1,%0\n"
bc901d79 7755+ "int $4\n0:\n"
57199397
MT
7756+ _ASM_EXTABLE(0b, 0b)
7757+#endif
7758+
fe2de317
MT
7759+ : "=m" (v->counter)
7760+ : "er" (i), "m" (v->counter));
57199397
MT
7761+}
7762+
7763+/**
fe2de317 7764+ * atomic64_sub_unchecked - subtract the atomic64 variable
57199397 7765+ * @i: integer value to subtract
fe2de317 7766+ * @v: pointer to type atomic64_unchecked_t
57199397
MT
7767+ *
7768+ * Atomically subtracts @i from @v.
7769+ */
fe2de317 7770+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
57199397 7771+{
fe2de317
MT
7772+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
7773 : "=m" (v->counter)
7774 : "er" (i), "m" (v->counter));
57199397 7775 }
fe2de317 7776@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
57199397
MT
7777 {
7778 unsigned char c;
7779
fe2de317
MT
7780- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
7781+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
57199397
MT
7782+
7783+#ifdef CONFIG_PAX_REFCOUNT
7784+ "jno 0f\n"
fe2de317 7785+ LOCK_PREFIX "addq %2,%0\n"
bc901d79 7786+ "int $4\n0:\n"
57199397
MT
7787+ _ASM_EXTABLE(0b, 0b)
7788+#endif
7789+
7790+ "sete %1\n"
fe2de317
MT
7791 : "=m" (v->counter), "=qm" (c)
7792 : "er" (i), "m" (v->counter) : "memory");
57199397 7793 return c;
fe2de317 7794@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
57199397 7795 */
fe2de317 7796 static inline void atomic64_inc(atomic64_t *v)
57199397 7797 {
fe2de317 7798+ asm volatile(LOCK_PREFIX "incq %0\n"
57199397
MT
7799+
7800+#ifdef CONFIG_PAX_REFCOUNT
7801+ "jno 0f\n"
fe2de317 7802+ LOCK_PREFIX "decq %0\n"
bc901d79 7803+ "int $4\n0:\n"
57199397
MT
7804+ _ASM_EXTABLE(0b, 0b)
7805+#endif
7806+
fe2de317
MT
7807+ : "=m" (v->counter)
7808+ : "m" (v->counter));
57199397
MT
7809+}
7810+
7811+/**
fe2de317
MT
7812+ * atomic64_inc_unchecked - increment atomic64 variable
7813+ * @v: pointer to type atomic64_unchecked_t
57199397
MT
7814+ *
7815+ * Atomically increments @v by 1.
7816+ */
fe2de317 7817+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
57199397 7818+{
fe2de317
MT
7819 asm volatile(LOCK_PREFIX "incq %0"
7820 : "=m" (v->counter)
7821 : "m" (v->counter));
7822@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
57199397 7823 */
fe2de317 7824 static inline void atomic64_dec(atomic64_t *v)
57199397 7825 {
fe2de317
MT
7826- asm volatile(LOCK_PREFIX "decq %0"
7827+ asm volatile(LOCK_PREFIX "decq %0\n"
57199397
MT
7828+
7829+#ifdef CONFIG_PAX_REFCOUNT
7830+ "jno 0f\n"
fe2de317 7831+ LOCK_PREFIX "incq %0\n"
bc901d79 7832+ "int $4\n0:\n"
57199397
MT
7833+ _ASM_EXTABLE(0b, 0b)
7834+#endif
7835+
fe2de317
MT
7836+ : "=m" (v->counter)
7837+ : "m" (v->counter));
57199397
MT
7838+}
7839+
7840+/**
fe2de317
MT
7841+ * atomic64_dec_unchecked - decrement atomic64 variable
7842+ * @v: pointer to type atomic64_t
57199397
MT
7843+ *
7844+ * Atomically decrements @v by 1.
7845+ */
fe2de317 7846+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
57199397 7847+{
fe2de317
MT
7848+ asm volatile(LOCK_PREFIX "decq %0\n"
7849 : "=m" (v->counter)
7850 : "m" (v->counter));
57199397 7851 }
fe2de317 7852@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
57199397
MT
7853 {
7854 unsigned char c;
7855
fe2de317
MT
7856- asm volatile(LOCK_PREFIX "decq %0; sete %1"
7857+ asm volatile(LOCK_PREFIX "decq %0\n"
57199397
MT
7858+
7859+#ifdef CONFIG_PAX_REFCOUNT
7860+ "jno 0f\n"
fe2de317 7861+ LOCK_PREFIX "incq %0\n"
bc901d79 7862+ "int $4\n0:\n"
57199397
MT
7863+ _ASM_EXTABLE(0b, 0b)
7864+#endif
7865+
7866+ "sete %1\n"
fe2de317
MT
7867 : "=m" (v->counter), "=qm" (c)
7868 : "m" (v->counter) : "memory");
57199397 7869 return c != 0;
fe2de317 7870@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
57199397
MT
7871 {
7872 unsigned char c;
7873
fe2de317
MT
7874- asm volatile(LOCK_PREFIX "incq %0; sete %1"
7875+ asm volatile(LOCK_PREFIX "incq %0\n"
57199397
MT
7876+
7877+#ifdef CONFIG_PAX_REFCOUNT
7878+ "jno 0f\n"
fe2de317 7879+ LOCK_PREFIX "decq %0\n"
bc901d79 7880+ "int $4\n0:\n"
57199397
MT
7881+ _ASM_EXTABLE(0b, 0b)
7882+#endif
7883+
8308f9c9 7884+ "sete %1\n"
fe2de317
MT
7885 : "=m" (v->counter), "=qm" (c)
7886 : "m" (v->counter) : "memory");
57199397 7887 return c != 0;
fe2de317 7888@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
57199397
MT
7889 {
7890 unsigned char c;
7891
fe2de317
MT
7892- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
7893+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
57199397
MT
7894+
7895+#ifdef CONFIG_PAX_REFCOUNT
7896+ "jno 0f\n"
fe2de317 7897+ LOCK_PREFIX "subq %2,%0\n"
bc901d79 7898+ "int $4\n0:\n"
57199397
MT
7899+ _ASM_EXTABLE(0b, 0b)
7900+#endif
7901+
7902+ "sets %1\n"
fe2de317
MT
7903 : "=m" (v->counter), "=qm" (c)
7904 : "er" (i), "m" (v->counter) : "memory");
57199397 7905 return c;
fe2de317
MT
7906@@ -171,7 +317,31 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
7907 static inline long atomic64_add_return(long i, atomic64_t *v)
7908 {
7909 long __i = i;
7910- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
7911+ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
57199397
MT
7912+
7913+#ifdef CONFIG_PAX_REFCOUNT
7914+ "jno 0f\n"
fe2de317 7915+ "movq %0, %1\n"
bc901d79 7916+ "int $4\n0:\n"
57199397
MT
7917+ _ASM_EXTABLE(0b, 0b)
7918+#endif
7919+
7920+ : "+r" (i), "+m" (v->counter)
7921+ : : "memory");
7922+ return i + __i;
57199397
MT
7923+}
7924+
7925+/**
fe2de317 7926+ * atomic64_add_return_unchecked - add and return
57199397 7927+ * @i: integer value to add
fe2de317 7928+ * @v: pointer to type atomic64_unchecked_t
57199397
MT
7929+ *
7930+ * Atomically adds @i to @v and returns @i + @v
7931+ */
fe2de317 7932+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
57199397 7933+{
fe2de317
MT
7934+ long __i = i;
7935+ asm volatile(LOCK_PREFIX "xaddq %0, %1"
57199397
MT
7936 : "+r" (i), "+m" (v->counter)
7937 : : "memory");
fe2de317
MT
7938 return i + __i;
7939@@ -183,6 +353,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
57199397
MT
7940 }
7941
fe2de317
MT
7942 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
7943+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
57199397 7944+{
fe2de317 7945+ return atomic64_add_return_unchecked(1, v);
57199397 7946+}
fe2de317 7947 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
57199397 7948
fe2de317
MT
7949 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
7950@@ -190,6 +364,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
8308f9c9
MT
7951 return cmpxchg(&v->counter, old, new);
7952 }
7953
fe2de317 7954+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
8308f9c9
MT
7955+{
7956+ return cmpxchg(&v->counter, old, new);
7957+}
7958+
fe2de317 7959 static inline long atomic64_xchg(atomic64_t *v, long new)
8308f9c9
MT
7960 {
7961 return xchg(&v->counter, new);
fe2de317 7962@@ -206,17 +385,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
57199397 7963 */
fe2de317 7964 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
57199397 7965 {
fe2de317
MT
7966- long c, old;
7967+ long c, old, new;
7968 c = atomic64_read(v);
57199397
MT
7969 for (;;) {
7970- if (unlikely(c == (u)))
7971+ if (unlikely(c == u))
7972 break;
fe2de317 7973- old = atomic64_cmpxchg((v), c, c + (a));
57199397 7974+
fe2de317 7975+ asm volatile("add %2,%0\n"
57199397
MT
7976+
7977+#ifdef CONFIG_PAX_REFCOUNT
7978+ "jno 0f\n"
fe2de317 7979+ "sub %2,%0\n"
bc901d79 7980+ "int $4\n0:\n"
57199397
MT
7981+ _ASM_EXTABLE(0b, 0b)
7982+#endif
7983+
7984+ : "=r" (new)
7985+ : "0" (c), "ir" (a));
7986+
fe2de317 7987+ old = atomic64_cmpxchg(v, c, new);
57199397
MT
7988 if (likely(old == c))
7989 break;
7990 c = old;
fe2de317
MT
7991 }
7992- return c != (u);
7993+ return c != u;
57199397
MT
7994 }
7995
fe2de317
MT
7996 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
7997diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
7998index 1775d6e..b65017f 100644
7999--- a/arch/x86/include/asm/bitops.h
8000+++ b/arch/x86/include/asm/bitops.h
bc901d79
MT
8001@@ -38,7 +38,7 @@
8002 * a mask operation on a byte.
8003 */
8004 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
8005-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
8006+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
8007 #define CONST_MASK(nr) (1 << ((nr) & 7))
8008
8009 /**
fe2de317
MT
8010diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
8011index 5e1a2ee..c9f9533 100644
8012--- a/arch/x86/include/asm/boot.h
8013+++ b/arch/x86/include/asm/boot.h
efbe55a5
MT
8014@@ -11,10 +11,15 @@
8015 #include <asm/pgtable_types.h>
df50ba0c 8016
efbe55a5
MT
8017 /* Physical address where kernel should be loaded. */
8018-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8019+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8020 + (CONFIG_PHYSICAL_ALIGN - 1)) \
8021 & ~(CONFIG_PHYSICAL_ALIGN - 1))
df50ba0c 8022
efbe55a5
MT
8023+#ifndef __ASSEMBLY__
8024+extern unsigned char __LOAD_PHYSICAL_ADDR[];
8025+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
df50ba0c
MT
8026+#endif
8027+
efbe55a5
MT
8028 /* Minimum kernel alignment, as a power of two */
8029 #ifdef CONFIG_X86_64
8030 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
fe2de317
MT
8031diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
8032index 48f99f1..d78ebf9 100644
8033--- a/arch/x86/include/asm/cache.h
8034+++ b/arch/x86/include/asm/cache.h
8308f9c9
MT
8035@@ -5,12 +5,13 @@
8036
8037 /* L1 cache line size */
8038 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
8039-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
15a11c5b 8040+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
efbe55a5 8041
57199397
MT
8042 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
8043+#define __read_only __attribute__((__section__(".data..read_only")))
efbe55a5
MT
8044
8045 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
8308f9c9 8046-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
15a11c5b 8047+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
8308f9c9
MT
8048
8049 #ifdef CONFIG_X86_VSMP
8050 #ifdef CONFIG_SMP
fe2de317
MT
8051diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
8052index 4e12668..501d239 100644
8053--- a/arch/x86/include/asm/cacheflush.h
8054+++ b/arch/x86/include/asm/cacheflush.h
8055@@ -26,7 +26,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
8056 unsigned long pg_flags = pg->flags & _PGMT_MASK;
8057
8058 if (pg_flags == _PGMT_DEFAULT)
8059- return -1;
8060+ return ~0UL;
8061 else if (pg_flags == _PGMT_WC)
8062 return _PAGE_CACHE_WC;
8063 else if (pg_flags == _PGMT_UC_MINUS)
8064diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
8065index 46fc474..b02b0f9 100644
8066--- a/arch/x86/include/asm/checksum_32.h
8067+++ b/arch/x86/include/asm/checksum_32.h
8068@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
efbe55a5
MT
8069 int len, __wsum sum,
8070 int *src_err_ptr, int *dst_err_ptr);
df50ba0c 8071
efbe55a5
MT
8072+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
8073+ int len, __wsum sum,
8074+ int *src_err_ptr, int *dst_err_ptr);
df50ba0c 8075+
efbe55a5
MT
8076+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
8077+ int len, __wsum sum,
8078+ int *src_err_ptr, int *dst_err_ptr);
df50ba0c 8079+
efbe55a5
MT
8080 /*
8081 * Note: when you get a NULL pointer exception here this means someone
8082 * passed in an incorrect kernel address to one of these functions.
fe2de317 8083@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
efbe55a5 8084 int *err_ptr)
df50ba0c 8085 {
efbe55a5
MT
8086 might_sleep();
8087- return csum_partial_copy_generic((__force void *)src, dst,
8088+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
8089 len, sum, err_ptr, NULL);
58c5fc13
MT
8090 }
8091
fe2de317 8092@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
58c5fc13
MT
8093 {
8094 might_sleep();
8095 if (access_ok(VERIFY_WRITE, dst, len))
8096- return csum_partial_copy_generic(src, (__force void *)dst,
8097+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
8098 len, sum, NULL, err_ptr);
8099
8100 if (len)
fe2de317
MT
8101diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
8102index 88b23a4..d2e5f9f 100644
8103--- a/arch/x86/include/asm/cpufeature.h
8104+++ b/arch/x86/include/asm/cpufeature.h
8105@@ -358,7 +358,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
6892158b
MT
8106 ".section .discard,\"aw\",@progbits\n"
8107 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
57199397
MT
8108 ".previous\n"
8109- ".section .altinstr_replacement,\"ax\"\n"
8110+ ".section .altinstr_replacement,\"a\"\n"
8111 "3: movb $1,%0\n"
8112 "4:\n"
8113 ".previous\n"
fe2de317
MT
8114diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
8115index 41935fa..3b40db8 100644
8116--- a/arch/x86/include/asm/desc.h
8117+++ b/arch/x86/include/asm/desc.h
ae4e228f
MT
8118@@ -4,6 +4,7 @@
8119 #include <asm/desc_defs.h>
8120 #include <asm/ldt.h>
8121 #include <asm/mmu.h>
8122+#include <asm/pgtable.h>
15a11c5b 8123
ae4e228f
MT
8124 #include <linux/smp.h>
8125
fe2de317 8126@@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
15a11c5b
MT
8127
8128 desc->type = (info->read_exec_only ^ 1) << 1;
8129 desc->type |= info->contents << 2;
8130+ desc->type |= info->seg_not_present ^ 1;
8131
8132 desc->s = 1;
8133 desc->dpl = 0x3;
fe2de317 8134@@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
58c5fc13
MT
8135 }
8136
8137 extern struct desc_ptr idt_descr;
8138-extern gate_desc idt_table[];
8139-
8140-struct gdt_page {
8141- struct desc_struct gdt[GDT_ENTRIES];
8142-} __attribute__((aligned(PAGE_SIZE)));
15a11c5b 8143-
58c5fc13
MT
8144-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
8145+extern gate_desc idt_table[256];
8146
8147+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
8148 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
8149 {
8150- return per_cpu(gdt_page, cpu).gdt;
8151+ return cpu_gdt_table[cpu];
8152 }
8153
8154 #ifdef CONFIG_X86_64
fe2de317 8155@@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
317566c1
MT
8156 unsigned long base, unsigned dpl, unsigned flags,
8157 unsigned short seg)
8158 {
8159- gate->a = (seg << 16) | (base & 0xffff);
15a11c5b
MT
8160- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
8161+ gate->gate.offset_low = base;
8162+ gate->gate.seg = seg;
8163+ gate->gate.reserved = 0;
8164+ gate->gate.type = type;
8165+ gate->gate.s = 0;
8166+ gate->gate.dpl = dpl;
8167+ gate->gate.p = 1;
8168+ gate->gate.offset_high = base >> 16;
317566c1
MT
8169 }
8170
8171 #endif
fe2de317 8172@@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
15a11c5b
MT
8173
8174 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
58c5fc13 8175 {
ae4e228f 8176+ pax_open_kernel();
58c5fc13 8177 memcpy(&idt[entry], gate, sizeof(*gate));
ae4e228f 8178+ pax_close_kernel();
58c5fc13
MT
8179 }
8180
15a11c5b 8181 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
58c5fc13 8182 {
ae4e228f 8183+ pax_open_kernel();
58c5fc13 8184 memcpy(&ldt[entry], desc, 8);
ae4e228f 8185+ pax_close_kernel();
58c5fc13
MT
8186 }
8187
15a11c5b 8188 static inline void
fe2de317 8189@@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
15a11c5b 8190 default: size = sizeof(*gdt); break;
58c5fc13 8191 }
15a11c5b 8192
ae4e228f 8193+ pax_open_kernel();
58c5fc13 8194 memcpy(&gdt[entry], desc, size);
ae4e228f 8195+ pax_close_kernel();
58c5fc13
MT
8196 }
8197
8198 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
fe2de317 8199@@ -207,7 +216,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
58c5fc13
MT
8200
8201 static inline void native_load_tr_desc(void)
8202 {
ae4e228f 8203+ pax_open_kernel();
58c5fc13 8204 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
ae4e228f 8205+ pax_close_kernel();
58c5fc13
MT
8206 }
8207
8208 static inline void native_load_gdt(const struct desc_ptr *dtr)
fe2de317 8209@@ -244,8 +255,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
58c5fc13 8210 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
15a11c5b 8211 unsigned int i;
58c5fc13 8212
ae4e228f 8213+ pax_open_kernel();
58c5fc13
MT
8214 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
8215 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
ae4e228f 8216+ pax_close_kernel();
58c5fc13
MT
8217 }
8218
8219 #define _LDT_empty(info) \
fe2de317 8220@@ -307,7 +320,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
df50ba0c
MT
8221 desc->limit = (limit >> 16) & 0xf;
8222 }
8223
8224-static inline void _set_gate(int gate, unsigned type, void *addr,
8225+static inline void _set_gate(int gate, unsigned type, const void *addr,
8226 unsigned dpl, unsigned ist, unsigned seg)
8227 {
8228 gate_desc s;
fe2de317 8229@@ -326,7 +339,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
df50ba0c
MT
8230 * Pentium F0 0F bugfix can have resulted in the mapped
8231 * IDT being write-protected.
8232 */
8233-static inline void set_intr_gate(unsigned int n, void *addr)
8234+static inline void set_intr_gate(unsigned int n, const void *addr)
8235 {
8236 BUG_ON((unsigned)n > 0xFF);
8237 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
fe2de317 8238@@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
df50ba0c
MT
8239 /*
8240 * This routine sets up an interrupt gate at directory privilege level 3.
8241 */
8242-static inline void set_system_intr_gate(unsigned int n, void *addr)
8243+static inline void set_system_intr_gate(unsigned int n, const void *addr)
8244 {
8245 BUG_ON((unsigned)n > 0xFF);
8246 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
8247 }
8248
8249-static inline void set_system_trap_gate(unsigned int n, void *addr)
8250+static inline void set_system_trap_gate(unsigned int n, const void *addr)
8251 {
8252 BUG_ON((unsigned)n > 0xFF);
8253 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
8254 }
8255
8256-static inline void set_trap_gate(unsigned int n, void *addr)
8257+static inline void set_trap_gate(unsigned int n, const void *addr)
8258 {
8259 BUG_ON((unsigned)n > 0xFF);
8260 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
fe2de317 8261@@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
df50ba0c
MT
8262 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
8263 {
8264 BUG_ON((unsigned)n > 0xFF);
8265- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
8266+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
8267 }
8268
8269-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
8270+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
8271 {
8272 BUG_ON((unsigned)n > 0xFF);
8273 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
8274 }
8275
8276-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
8277+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
8278 {
8279 BUG_ON((unsigned)n > 0xFF);
58c5fc13
MT
8280 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
8281 }
8282
8283+#ifdef CONFIG_X86_32
8284+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
8285+{
8286+ struct desc_struct d;
8287+
8288+ if (likely(limit))
8289+ limit = (limit - 1UL) >> PAGE_SHIFT;
8290+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
8291+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
8292+}
8293+#endif
8294+
8295 #endif /* _ASM_X86_DESC_H */
fe2de317
MT
8296diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
8297index 278441f..b95a174 100644
8298--- a/arch/x86/include/asm/desc_defs.h
8299+++ b/arch/x86/include/asm/desc_defs.h
8300@@ -31,6 +31,12 @@ struct desc_struct {
8301 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
8302 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
8303 };
8304+ struct {
8305+ u16 offset_low;
8306+ u16 seg;
8307+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
8308+ unsigned offset_high: 16;
8309+ } gate;
8310 };
8311 } __attribute__((packed));
8312
8313diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
8314index 908b969..a1f4eb4 100644
8315--- a/arch/x86/include/asm/e820.h
8316+++ b/arch/x86/include/asm/e820.h
57199397 8317@@ -69,7 +69,7 @@ struct e820map {
ae4e228f 8318 #define ISA_START_ADDRESS 0xa0000
58c5fc13 8319 #define ISA_END_ADDRESS 0x100000
58c5fc13
MT
8320
8321-#define BIOS_BEGIN 0x000a0000
8322+#define BIOS_BEGIN 0x000c0000
8323 #define BIOS_END 0x00100000
8324
bc901d79 8325 #define BIOS_ROM_BASE 0xffe00000
fe2de317
MT
8326diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
8327index f2ad216..eb24c96 100644
8328--- a/arch/x86/include/asm/elf.h
8329+++ b/arch/x86/include/asm/elf.h
ae4e228f 8330@@ -237,7 +237,25 @@ extern int force_personality32;
58c5fc13
MT
8331 the loader. We need to make sure that it is out of the way of the program
8332 that it will "exec", and that there is sufficient room for the brk. */
8333
8334+#ifdef CONFIG_PAX_SEGMEXEC
8335+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
8336+#else
8337 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
8338+#endif
8339+
8340+#ifdef CONFIG_PAX_ASLR
8341+#ifdef CONFIG_X86_32
8342+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
8343+
8344+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8345+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8346+#else
8347+#define PAX_ELF_ET_DYN_BASE 0x400000UL
8348+
df50ba0c
MT
8349+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8350+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
58c5fc13
MT
8351+#endif
8352+#endif
8353
8354 /* This yields a mask that user programs can use to figure out what
8355 instruction set this CPU supports. This could be done in user space,
15a11c5b
MT
8356@@ -290,9 +308,7 @@ do { \
8357
58c5fc13
MT
8358 #define ARCH_DLINFO \
8359 do { \
15a11c5b 8360- if (vdso_enabled) \
58c5fc13
MT
8361- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
8362- (unsigned long)current->mm->context.vdso); \
15a11c5b 8363+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
58c5fc13
MT
8364 } while (0)
8365
8366 #define AT_SYSINFO 32
15a11c5b 8367@@ -303,7 +319,7 @@ do { \
58c5fc13
MT
8368
8369 #endif /* !CONFIG_X86_32 */
8370
8371-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
8372+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
8373
8374 #define VDSO_ENTRY \
8375 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
fe2de317 8376@@ -317,7 +333,4 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
58c5fc13
MT
8377 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
8378 #define compat_arch_setup_additional_pages syscall32_setup_pages
8379
8380-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8381-#define arch_randomize_brk arch_randomize_brk
8382-
8383 #endif /* _ASM_X86_ELF_H */
fe2de317
MT
8384diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
8385index cc70c1c..d96d011 100644
8386--- a/arch/x86/include/asm/emergency-restart.h
8387+++ b/arch/x86/include/asm/emergency-restart.h
66a7e928
MT
8388@@ -15,6 +15,6 @@ enum reboot_type {
8389
8390 extern enum reboot_type reboot_type;
8391
8392-extern void machine_emergency_restart(void);
8393+extern void machine_emergency_restart(void) __noreturn;
8394
8395 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
fe2de317
MT
8396diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
8397index d09bb03..4ea4194 100644
8398--- a/arch/x86/include/asm/futex.h
8399+++ b/arch/x86/include/asm/futex.h
bc901d79 8400@@ -12,16 +12,18 @@
58c5fc13
MT
8401 #include <asm/system.h>
8402
df50ba0c 8403 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
6e9df6a3 8404+ typecheck(u32 __user *, uaddr); \
58c5fc13
MT
8405 asm volatile("1:\t" insn "\n" \
8406 "2:\t.section .fixup,\"ax\"\n" \
df50ba0c
MT
8407 "3:\tmov\t%3, %1\n" \
8408 "\tjmp\t2b\n" \
8409 "\t.previous\n" \
8410 _ASM_EXTABLE(1b, 3b) \
8411- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
6e9df6a3 8412+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
df50ba0c
MT
8413 : "i" (-EFAULT), "0" (oparg), "1" (0))
8414
8415 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
6e9df6a3 8416+ typecheck(u32 __user *, uaddr); \
df50ba0c
MT
8417 asm volatile("1:\tmovl %2, %0\n" \
8418 "\tmovl\t%0, %3\n" \
8419 "\t" insn "\n" \
66a7e928 8420@@ -34,7 +36,7 @@
df50ba0c
MT
8421 _ASM_EXTABLE(1b, 4b) \
8422 _ASM_EXTABLE(2b, 4b) \
58c5fc13 8423 : "=&a" (oldval), "=&r" (ret), \
df50ba0c 8424- "+m" (*uaddr), "=&r" (tem) \
6e9df6a3 8425+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
58c5fc13 8426 : "r" (oparg), "i" (-EFAULT), "1" (0))
58c5fc13 8427
66a7e928 8428 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
fe2de317 8429@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
58c5fc13
MT
8430
8431 switch (op) {
8432 case FUTEX_OP_SET:
bc901d79 8433- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
16454cff 8434+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
58c5fc13
MT
8435 break;
8436 case FUTEX_OP_ADD:
bc901d79 8437- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
16454cff 8438+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
58c5fc13 8439 uaddr, oparg);
58c5fc13
MT
8440 break;
8441 case FUTEX_OP_OR:
fe2de317 8442@@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
66a7e928 8443 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
58c5fc13
MT
8444 return -EFAULT;
8445
66a7e928
MT
8446- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
8447+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
bc901d79 8448 "2:\t.section .fixup, \"ax\"\n"
66a7e928 8449 "3:\tmov %3, %0\n"
58c5fc13
MT
8450 "\tjmp 2b\n"
8451 "\t.previous\n"
8452 _ASM_EXTABLE(1b, 3b)
66a7e928 8453- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
6e9df6a3 8454+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
66a7e928 8455 : "i" (-EFAULT), "r" (newval), "1" (oldval)
58c5fc13
MT
8456 : "memory"
8457 );
fe2de317
MT
8458diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
8459index 0919905..2cf38d6 100644
8460--- a/arch/x86/include/asm/hw_irq.h
8461+++ b/arch/x86/include/asm/hw_irq.h
6e9df6a3 8462@@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
8308f9c9
MT
8463 extern void enable_IO_APIC(void);
8464
8465 /* Statistics */
8466-extern atomic_t irq_err_count;
8467-extern atomic_t irq_mis_count;
8468+extern atomic_unchecked_t irq_err_count;
8469+extern atomic_unchecked_t irq_mis_count;
8470
8471 /* EISA */
8472 extern void eisa_set_level_irq(unsigned int irq);
fe2de317
MT
8473diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
8474index c9e09ea..73888df 100644
8475--- a/arch/x86/include/asm/i387.h
8476+++ b/arch/x86/include/asm/i387.h
8477@@ -92,6 +92,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
df50ba0c
MT
8478 {
8479 int err;
8480
8481+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8482+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
6e9df6a3 8483+ fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
df50ba0c
MT
8484+#endif
8485+
bc901d79 8486 /* See comment in fxsave() below. */
16454cff
MT
8487 #ifdef CONFIG_AS_FXSAVEQ
8488 asm volatile("1: fxrstorq %[fx]\n\t"
fe2de317 8489@@ -121,6 +126,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
df50ba0c
MT
8490 {
8491 int err;
8492
8493+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8494+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8495+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
8496+#endif
8497+
6892158b
MT
8498 /*
8499 * Clear the bytes not touched by the fxsave and reserved
8500 * for the SW usage.
fe2de317 8501@@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu *fpu)
bc901d79 8502 #endif /* CONFIG_X86_64 */
58c5fc13
MT
8503
8504 /* We need a safe address that is cheap to find and that is already
8505- in L1 during context switch. The best choices are unfortunately
8506- different for UP and SMP */
8507-#ifdef CONFIG_SMP
8508-#define safe_address (__per_cpu_offset[0])
8509-#else
8510-#define safe_address (kstat_cpu(0).cpustat.user)
8511-#endif
8512+ in L1 during context switch. */
8513+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
8514
8515 /*
8516 * These must be called with preempt disabled
fe2de317 8517@@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void)
71d190be
MT
8518 struct thread_info *me = current_thread_info();
8519 preempt_disable();
8520 if (me->status & TS_USEDFPU)
8521- __save_init_fpu(me->task);
8522+ __save_init_fpu(current);
8523 else
8524 clts();
8525 }
fe2de317
MT
8526diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
8527index d8e8eef..99f81ae 100644
8528--- a/arch/x86/include/asm/io.h
8529+++ b/arch/x86/include/asm/io.h
6e9df6a3 8530@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
58c5fc13
MT
8531
8532 #include <linux/vmalloc.h>
8533
8534+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
ae4e228f 8535+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
58c5fc13 8536+{
c52201e0 8537+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
58c5fc13
MT
8538+}
8539+
ae4e228f 8540+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
58c5fc13 8541+{
c52201e0 8542+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
58c5fc13
MT
8543+}
8544+
df50ba0c
MT
8545 /*
8546 * Convert a virtual cached pointer to an uncached pointer
8547 */
fe2de317
MT
8548diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
8549index bba3cf8..06bc8da 100644
8550--- a/arch/x86/include/asm/irqflags.h
8551+++ b/arch/x86/include/asm/irqflags.h
8552@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
ae4e228f
MT
8553 sti; \
8554 sysexit
8555
df50ba0c
MT
8556+#define GET_CR0_INTO_RDI mov %cr0, %rdi
8557+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
8558+#define GET_CR3_INTO_RDI mov %cr3, %rdi
8559+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
ae4e228f
MT
8560+
8561 #else
58c5fc13
MT
8562 #define INTERRUPT_RETURN iret
8563 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
fe2de317
MT
8564diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
8565index 5478825..839e88c 100644
8566--- a/arch/x86/include/asm/kprobes.h
8567+++ b/arch/x86/include/asm/kprobes.h
71d190be
MT
8568@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
8569 #define RELATIVEJUMP_SIZE 5
8570 #define RELATIVECALL_OPCODE 0xe8
8571 #define RELATIVE_ADDR_SIZE 4
8572-#define MAX_STACK_SIZE 64
8573-#define MIN_STACK_SIZE(ADDR) \
8574- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
8575- THREAD_SIZE - (unsigned long)(ADDR))) \
8576- ? (MAX_STACK_SIZE) \
8577- : (((unsigned long)current_thread_info()) + \
8578- THREAD_SIZE - (unsigned long)(ADDR)))
8579+#define MAX_STACK_SIZE 64UL
8580+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
8581
8582 #define flush_insn_slot(p) do { } while (0)
8583
fe2de317
MT
8584diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
8585index dd51c83..66cbfac 100644
8586--- a/arch/x86/include/asm/kvm_host.h
8587+++ b/arch/x86/include/asm/kvm_host.h
6e9df6a3 8588@@ -456,7 +456,7 @@ struct kvm_arch {
8308f9c9
MT
8589 unsigned int n_requested_mmu_pages;
8590 unsigned int n_max_mmu_pages;
6e9df6a3 8591 unsigned int indirect_shadow_pages;
8308f9c9
MT
8592- atomic_t invlpg_counter;
8593+ atomic_unchecked_t invlpg_counter;
8594 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
8595 /*
8596 * Hash table of struct kvm_mmu_page.
6e9df6a3 8597@@ -636,7 +636,7 @@ struct kvm_x86_ops {
15a11c5b
MT
8598 enum x86_intercept_stage stage);
8599
8600 const struct trace_print_flags *exit_reasons_str;
8601-};
8602+} __do_const;
8603
8604 struct kvm_arch_async_pf {
8605 u32 token;
fe2de317
MT
8606diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
8607index 9cdae5d..300d20f 100644
8608--- a/arch/x86/include/asm/local.h
8609+++ b/arch/x86/include/asm/local.h
bc901d79 8610@@ -18,26 +18,58 @@ typedef struct {
58c5fc13
MT
8611
8612 static inline void local_inc(local_t *l)
8613 {
8614- asm volatile(_ASM_INC "%0"
8615+ asm volatile(_ASM_INC "%0\n"
8616+
8617+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 8618+ "jno 0f\n"
58c5fc13 8619+ _ASM_DEC "%0\n"
bc901d79
MT
8620+ "int $4\n0:\n"
8621+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
8622+#endif
8623+
8624 : "+m" (l->a.counter));
8625 }
8626
8627 static inline void local_dec(local_t *l)
8628 {
8629- asm volatile(_ASM_DEC "%0"
8630+ asm volatile(_ASM_DEC "%0\n"
8631+
8632+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 8633+ "jno 0f\n"
58c5fc13 8634+ _ASM_INC "%0\n"
bc901d79
MT
8635+ "int $4\n0:\n"
8636+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
8637+#endif
8638+
8639 : "+m" (l->a.counter));
8640 }
8641
8642 static inline void local_add(long i, local_t *l)
8643 {
8644- asm volatile(_ASM_ADD "%1,%0"
8645+ asm volatile(_ASM_ADD "%1,%0\n"
8646+
8647+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 8648+ "jno 0f\n"
58c5fc13 8649+ _ASM_SUB "%1,%0\n"
bc901d79
MT
8650+ "int $4\n0:\n"
8651+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
8652+#endif
8653+
8654 : "+m" (l->a.counter)
8655 : "ir" (i));
8656 }
8657
8658 static inline void local_sub(long i, local_t *l)
8659 {
8660- asm volatile(_ASM_SUB "%1,%0"
8661+ asm volatile(_ASM_SUB "%1,%0\n"
8662+
8663+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 8664+ "jno 0f\n"
58c5fc13 8665+ _ASM_ADD "%1,%0\n"
bc901d79
MT
8666+ "int $4\n0:\n"
8667+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
8668+#endif
8669+
8670 : "+m" (l->a.counter)
8671 : "ir" (i));
8672 }
fe2de317 8673@@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
58c5fc13
MT
8674 {
8675 unsigned char c;
8676
8677- asm volatile(_ASM_SUB "%2,%0; sete %1"
8678+ asm volatile(_ASM_SUB "%2,%0\n"
8679+
8680+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 8681+ "jno 0f\n"
58c5fc13 8682+ _ASM_ADD "%2,%0\n"
bc901d79
MT
8683+ "int $4\n0:\n"
8684+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
8685+#endif
8686+
8687+ "sete %1\n"
8688 : "+m" (l->a.counter), "=qm" (c)
8689 : "ir" (i) : "memory");
8690 return c;
fe2de317 8691@@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
58c5fc13
MT
8692 {
8693 unsigned char c;
8694
8695- asm volatile(_ASM_DEC "%0; sete %1"
8696+ asm volatile(_ASM_DEC "%0\n"
8697+
8698+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 8699+ "jno 0f\n"
58c5fc13 8700+ _ASM_INC "%0\n"
bc901d79
MT
8701+ "int $4\n0:\n"
8702+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
8703+#endif
8704+
8705+ "sete %1\n"
8706 : "+m" (l->a.counter), "=qm" (c)
8707 : : "memory");
8708 return c != 0;
fe2de317 8709@@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
58c5fc13
MT
8710 {
8711 unsigned char c;
8712
8713- asm volatile(_ASM_INC "%0; sete %1"
8714+ asm volatile(_ASM_INC "%0\n"
8715+
8716+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 8717+ "jno 0f\n"
58c5fc13 8718+ _ASM_DEC "%0\n"
bc901d79
MT
8719+ "int $4\n0:\n"
8720+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
8721+#endif
8722+
8723+ "sete %1\n"
8724 : "+m" (l->a.counter), "=qm" (c)
8725 : : "memory");
8726 return c != 0;
fe2de317 8727@@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
58c5fc13
MT
8728 {
8729 unsigned char c;
8730
8731- asm volatile(_ASM_ADD "%2,%0; sets %1"
8732+ asm volatile(_ASM_ADD "%2,%0\n"
8733+
8734+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 8735+ "jno 0f\n"
58c5fc13 8736+ _ASM_SUB "%2,%0\n"
bc901d79
MT
8737+ "int $4\n0:\n"
8738+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
8739+#endif
8740+
8741+ "sets %1\n"
8742 : "+m" (l->a.counter), "=qm" (c)
8743 : "ir" (i) : "memory");
8744 return c;
fe2de317 8745@@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
58c5fc13
MT
8746 #endif
8747 /* Modern 486+ processor */
8748 __i = i;
8749- asm volatile(_ASM_XADD "%0, %1;"
8750+ asm volatile(_ASM_XADD "%0, %1\n"
8751+
8752+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 8753+ "jno 0f\n"
58c5fc13 8754+ _ASM_MOV "%0,%1\n"
bc901d79
MT
8755+ "int $4\n0:\n"
8756+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
8757+#endif
8758+
8759 : "+r" (i), "+m" (l->a.counter)
8760 : : "memory");
8761 return i + __i;
fe2de317
MT
8762diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
8763index 593e51d..fa69c9a 100644
8764--- a/arch/x86/include/asm/mman.h
8765+++ b/arch/x86/include/asm/mman.h
ae4e228f
MT
8766@@ -5,4 +5,14 @@
8767
8768 #include <asm-generic/mman.h>
58c5fc13
MT
8769
8770+#ifdef __KERNEL__
8771+#ifndef __ASSEMBLY__
8772+#ifdef CONFIG_X86_32
8773+#define arch_mmap_check i386_mmap_check
8774+int i386_mmap_check(unsigned long addr, unsigned long len,
8775+ unsigned long flags);
8776+#endif
8777+#endif
8778+#endif
8779+
8780 #endif /* _ASM_X86_MMAN_H */
fe2de317
MT
8781diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
8782index 5f55e69..e20bfb1 100644
8783--- a/arch/x86/include/asm/mmu.h
8784+++ b/arch/x86/include/asm/mmu.h
8785@@ -9,7 +9,7 @@
8786 * we put the segment information here.
8787 */
8788 typedef struct {
8789- void *ldt;
8790+ struct desc_struct *ldt;
8791 int size;
8792
8793 #ifdef CONFIG_X86_64
8794@@ -18,7 +18,19 @@ typedef struct {
8795 #endif
8796
8797 struct mutex lock;
8798- void *vdso;
8799+ unsigned long vdso;
8800+
8801+#ifdef CONFIG_X86_32
8802+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
8803+ unsigned long user_cs_base;
8804+ unsigned long user_cs_limit;
8805+
8806+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
8807+ cpumask_t cpu_user_cs_mask;
8808+#endif
8809+
8810+#endif
8811+#endif
8812 } mm_context_t;
8813
8814 #ifdef CONFIG_SMP
8815diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
8816index 6902152..399f3a2 100644
8817--- a/arch/x86/include/asm/mmu_context.h
8818+++ b/arch/x86/include/asm/mmu_context.h
8819@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
df50ba0c
MT
8820
8821 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
8822 {
8823+
8824+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8825+ unsigned int i;
8826+ pgd_t *pgd;
8827+
8828+ pax_open_kernel();
8829+ pgd = get_cpu_pgd(smp_processor_id());
8830+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
15a11c5b 8831+ set_pgd_batched(pgd+i, native_make_pgd(0));
df50ba0c
MT
8832+ pax_close_kernel();
8833+#endif
8834+
8835 #ifdef CONFIG_SMP
8836 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
8837 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
fe2de317 8838@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
58c5fc13
MT
8839 struct task_struct *tsk)
8840 {
8841 unsigned cpu = smp_processor_id();
15a11c5b 8842+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
58c5fc13
MT
8843+ int tlbstate = TLBSTATE_OK;
8844+#endif
8845
8846 if (likely(prev != next)) {
58c5fc13 8847 #ifdef CONFIG_SMP
15a11c5b 8848+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
58c5fc13
MT
8849+ tlbstate = percpu_read(cpu_tlbstate.state);
8850+#endif
8851 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
8852 percpu_write(cpu_tlbstate.active_mm, next);
8853 #endif
df50ba0c
MT
8854 cpumask_set_cpu(cpu, mm_cpumask(next));
8855
8856 /* Re-load page tables */
8857+#ifdef CONFIG_PAX_PER_CPU_PGD
8858+ pax_open_kernel();
8859+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
8860+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
8861+ pax_close_kernel();
8862+ load_cr3(get_cpu_pgd(cpu));
8863+#else
8864 load_cr3(next->pgd);
8865+#endif
ea610fa8 8866
c52201e0
MT
8867 /* stop flush ipis for the previous mm */
8868 cpumask_clear_cpu(cpu, mm_cpumask(prev));
fe2de317 8869@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
58c5fc13
MT
8870 */
8871 if (unlikely(prev->context.ldt != next->context.ldt))
8872 load_LDT_nolock(&next->context);
df50ba0c 8873- }
58c5fc13
MT
8874+
8875+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
ae4e228f 8876+ if (!(__supported_pte_mask & _PAGE_NX)) {
58c5fc13
MT
8877+ smp_mb__before_clear_bit();
8878+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
8879+ smp_mb__after_clear_bit();
8880+ cpu_set(cpu, next->context.cpu_user_cs_mask);
8881+ }
8882+#endif
8883+
8884+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8885+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
ae4e228f
MT
8886+ prev->context.user_cs_limit != next->context.user_cs_limit))
8887+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
df50ba0c 8888 #ifdef CONFIG_SMP
ae4e228f 8889+ else if (unlikely(tlbstate != TLBSTATE_OK))
58c5fc13
MT
8890+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8891+#endif
ae4e228f 8892+#endif
58c5fc13 8893+
df50ba0c 8894+ }
58c5fc13 8895 else {
df50ba0c
MT
8896+
8897+#ifdef CONFIG_PAX_PER_CPU_PGD
8898+ pax_open_kernel();
8899+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
8900+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
8901+ pax_close_kernel();
8902+ load_cr3(get_cpu_pgd(cpu));
8903+#endif
8904+
8905+#ifdef CONFIG_SMP
8906 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
8907 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
8908
fe2de317 8909@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
df50ba0c
MT
8910 * tlb flush IPI delivery. We must reload CR3
8911 * to make sure to use no freed page tables.
58c5fc13 8912 */
df50ba0c
MT
8913+
8914+#ifndef CONFIG_PAX_PER_CPU_PGD
58c5fc13 8915 load_cr3(next->pgd);
df50ba0c
MT
8916+#endif
8917+
58c5fc13
MT
8918 load_LDT_nolock(&next->context);
8919+
8920+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
ae4e228f 8921+ if (!(__supported_pte_mask & _PAGE_NX))
58c5fc13
MT
8922+ cpu_set(cpu, next->context.cpu_user_cs_mask);
8923+#endif
8924+
8925+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8926+#ifdef CONFIG_PAX_PAGEEXEC
ae4e228f 8927+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
58c5fc13
MT
8928+#endif
8929+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8930+#endif
8931+
8932 }
fe2de317
MT
8933+#endif
8934 }
8935-#endif
df50ba0c
MT
8936 }
8937
8938 #define activate_mm(prev, next) \
fe2de317
MT
8939diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
8940index 9eae775..c914fea 100644
8941--- a/arch/x86/include/asm/module.h
8942+++ b/arch/x86/include/asm/module.h
71d190be
MT
8943@@ -5,6 +5,7 @@
8944
8945 #ifdef CONFIG_X86_64
8946 /* X86_64 does not define MODULE_PROC_FAMILY */
8947+#define MODULE_PROC_FAMILY ""
8948 #elif defined CONFIG_M386
8949 #define MODULE_PROC_FAMILY "386 "
8950 #elif defined CONFIG_M486
fe2de317 8951@@ -59,8 +60,20 @@
df50ba0c
MT
8952 #error unknown processor family
8953 #endif
8954
71d190be
MT
8955-#ifdef CONFIG_X86_32
8956-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
fe2de317
MT
8957+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
8958+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
8959+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
8960+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
df50ba0c 8961+#else
71d190be 8962+#define MODULE_PAX_KERNEXEC ""
58c5fc13
MT
8963 #endif
8964
6e9df6a3
MT
8965+#ifdef CONFIG_PAX_MEMORY_UDEREF
8966+#define MODULE_PAX_UDEREF "UDEREF "
71d190be 8967+#else
6e9df6a3 8968+#define MODULE_PAX_UDEREF ""
71d190be
MT
8969+#endif
8970+
6e9df6a3 8971+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
71d190be 8972+
58c5fc13 8973 #endif /* _ASM_X86_MODULE_H */
fe2de317
MT
8974diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
8975index 7639dbf..e08a58c 100644
8976--- a/arch/x86/include/asm/page_64_types.h
8977+++ b/arch/x86/include/asm/page_64_types.h
bc901d79
MT
8978@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
8979
8980 /* duplicated to the one in bootmem.h */
8981 extern unsigned long max_pfn;
8982-extern unsigned long phys_base;
8983+extern const unsigned long phys_base;
8984
8985 extern unsigned long __phys_addr(unsigned long);
8986 #define __phys_reloc_hide(x) (x)
fe2de317
MT
8987diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
8988index a7d2db9..edb023e 100644
8989--- a/arch/x86/include/asm/paravirt.h
8990+++ b/arch/x86/include/asm/paravirt.h
8991@@ -667,6 +667,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
15a11c5b
MT
8992 val);
8993 }
8994
8995+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
8996+{
8997+ pgdval_t val = native_pgd_val(pgd);
8998+
8999+ if (sizeof(pgdval_t) > sizeof(long))
9000+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
9001+ val, (u64)val >> 32);
9002+ else
9003+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
9004+ val);
9005+}
9006+
9007 static inline void pgd_clear(pgd_t *pgdp)
9008 {
9009 set_pgd(pgdp, __pgd(0));
fe2de317 9010@@ -748,6 +760,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
ae4e228f
MT
9011 pv_mmu_ops.set_fixmap(idx, phys, flags);
9012 }
9013
9014+#ifdef CONFIG_PAX_KERNEXEC
9015+static inline unsigned long pax_open_kernel(void)
9016+{
efbe55a5 9017+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
ae4e228f
MT
9018+}
9019+
9020+static inline unsigned long pax_close_kernel(void)
9021+{
efbe55a5 9022+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
ae4e228f
MT
9023+}
9024+#else
9025+static inline unsigned long pax_open_kernel(void) { return 0; }
9026+static inline unsigned long pax_close_kernel(void) { return 0; }
9027+#endif
9028+
9029 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
9030
9031 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
6e9df6a3 9032@@ -964,7 +991,7 @@ extern void default_banner(void);
58c5fc13
MT
9033
9034 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
9035 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
9036-#define PARA_INDIRECT(addr) *%cs:addr
9037+#define PARA_INDIRECT(addr) *%ss:addr
9038 #endif
9039
9040 #define INTERRUPT_RETURN \
6e9df6a3 9041@@ -1041,6 +1068,21 @@ extern void default_banner(void);
df50ba0c 9042 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
ae4e228f
MT
9043 CLBR_NONE, \
9044 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
58c5fc13 9045+
df50ba0c 9046+#define GET_CR0_INTO_RDI \
ae4e228f 9047+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
df50ba0c 9048+ mov %rax,%rdi
ae4e228f 9049+
df50ba0c
MT
9050+#define SET_RDI_INTO_CR0 \
9051+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
ae4e228f 9052+
df50ba0c
MT
9053+#define GET_CR3_INTO_RDI \
9054+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
9055+ mov %rax,%rdi
9056+
9057+#define SET_RDI_INTO_CR3 \
9058+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
ae4e228f
MT
9059+
9060 #endif /* CONFIG_X86_32 */
9061
9062 #endif /* __ASSEMBLY__ */
fe2de317
MT
9063diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
9064index 8e8b9a4..f07d725 100644
9065--- a/arch/x86/include/asm/paravirt_types.h
9066+++ b/arch/x86/include/asm/paravirt_types.h
6e9df6a3 9067@@ -84,20 +84,20 @@ struct pv_init_ops {
15a11c5b
MT
9068 */
9069 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
9070 unsigned long addr, unsigned len);
9071-};
9072+} __no_const;
9073
9074
9075 struct pv_lazy_ops {
9076 /* Set deferred update mode, used for batching operations. */
9077 void (*enter)(void);
9078 void (*leave)(void);
9079-};
9080+} __no_const;
9081
9082 struct pv_time_ops {
9083 unsigned long long (*sched_clock)(void);
6e9df6a3 9084 unsigned long long (*steal_clock)(int cpu);
15a11c5b
MT
9085 unsigned long (*get_tsc_khz)(void);
9086-};
9087+} __no_const;
9088
9089 struct pv_cpu_ops {
9090 /* hooks for various privileged instructions */
6e9df6a3 9091@@ -193,7 +193,7 @@ struct pv_cpu_ops {
15a11c5b
MT
9092
9093 void (*start_context_switch)(struct task_struct *prev);
9094 void (*end_context_switch)(struct task_struct *next);
9095-};
9096+} __no_const;
9097
9098 struct pv_irq_ops {
9099 /*
6e9df6a3 9100@@ -224,7 +224,7 @@ struct pv_apic_ops {
15a11c5b
MT
9101 unsigned long start_eip,
9102 unsigned long start_esp);
9103 #endif
9104-};
9105+} __no_const;
9106
9107 struct pv_mmu_ops {
9108 unsigned long (*read_cr2)(void);
6e9df6a3 9109@@ -313,6 +313,7 @@ struct pv_mmu_ops {
15a11c5b
MT
9110 struct paravirt_callee_save make_pud;
9111
9112 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
9113+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
9114 #endif /* PAGETABLE_LEVELS == 4 */
9115 #endif /* PAGETABLE_LEVELS >= 3 */
9116
6e9df6a3 9117@@ -324,6 +325,12 @@ struct pv_mmu_ops {
ae4e228f
MT
9118 an mfn. We can tell which is which from the index. */
9119 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
9120 phys_addr_t phys, pgprot_t flags);
9121+
9122+#ifdef CONFIG_PAX_KERNEXEC
9123+ unsigned long (*pax_open_kernel)(void);
9124+ unsigned long (*pax_close_kernel)(void);
9125+#endif
9126+
9127 };
9128
9129 struct arch_spinlock;
6e9df6a3 9130@@ -334,7 +341,7 @@ struct pv_lock_ops {
15a11c5b
MT
9131 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
9132 int (*spin_trylock)(struct arch_spinlock *lock);
9133 void (*spin_unlock)(struct arch_spinlock *lock);
9134-};
9135+} __no_const;
9136
9137 /* This contains all the paravirt structures: we get a convenient
9138 * number for each function using the offset which we use to indicate
fe2de317
MT
9139diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
9140index b4389a4..b7ff22c 100644
9141--- a/arch/x86/include/asm/pgalloc.h
9142+++ b/arch/x86/include/asm/pgalloc.h
9143@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
58c5fc13
MT
9144 pmd_t *pmd, pte_t *pte)
9145 {
9146 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9147+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
9148+}
9149+
9150+static inline void pmd_populate_user(struct mm_struct *mm,
9151+ pmd_t *pmd, pte_t *pte)
9152+{
9153+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9154 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
9155 }
9156
fe2de317
MT
9157diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
9158index 98391db..8f6984e 100644
9159--- a/arch/x86/include/asm/pgtable-2level.h
9160+++ b/arch/x86/include/asm/pgtable-2level.h
9161@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
58c5fc13
MT
9162
9163 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9164 {
ae4e228f 9165+ pax_open_kernel();
58c5fc13 9166 *pmdp = pmd;
ae4e228f 9167+ pax_close_kernel();
58c5fc13
MT
9168 }
9169
9170 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
fe2de317
MT
9171diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
9172index effff47..f9e4035 100644
9173--- a/arch/x86/include/asm/pgtable-3level.h
9174+++ b/arch/x86/include/asm/pgtable-3level.h
9175@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
58c5fc13
MT
9176
9177 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9178 {
ae4e228f 9179+ pax_open_kernel();
58c5fc13 9180 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
ae4e228f 9181+ pax_close_kernel();
58c5fc13
MT
9182 }
9183
9184 static inline void native_set_pud(pud_t *pudp, pud_t pud)
9185 {
ae4e228f 9186+ pax_open_kernel();
58c5fc13 9187 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
ae4e228f 9188+ pax_close_kernel();
58c5fc13
MT
9189 }
9190
9191 /*
fe2de317
MT
9192diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
9193index 18601c8..3d716d1 100644
9194--- a/arch/x86/include/asm/pgtable.h
9195+++ b/arch/x86/include/asm/pgtable.h
9196@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
15a11c5b
MT
9197
9198 #ifndef __PAGETABLE_PUD_FOLDED
9199 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
9200+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
9201 #define pgd_clear(pgd) native_pgd_clear(pgd)
9202 #endif
9203
fe2de317 9204@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
ae4e228f
MT
9205
9206 #define arch_end_context_switch(prev) do {} while(0)
9207
9208+#define pax_open_kernel() native_pax_open_kernel()
9209+#define pax_close_kernel() native_pax_close_kernel()
9210 #endif /* CONFIG_PARAVIRT */
9211
9212+#define __HAVE_ARCH_PAX_OPEN_KERNEL
9213+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
58c5fc13
MT
9214+
9215+#ifdef CONFIG_PAX_KERNEXEC
ae4e228f
MT
9216+static inline unsigned long native_pax_open_kernel(void)
9217+{
58c5fc13
MT
9218+ unsigned long cr0;
9219+
ae4e228f
MT
9220+ preempt_disable();
9221+ barrier();
9222+ cr0 = read_cr0() ^ X86_CR0_WP;
9223+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
9224+ write_cr0(cr0);
9225+ return cr0 ^ X86_CR0_WP;
9226+}
58c5fc13 9227+
ae4e228f
MT
9228+static inline unsigned long native_pax_close_kernel(void)
9229+{
9230+ unsigned long cr0;
58c5fc13 9231+
ae4e228f
MT
9232+ cr0 = read_cr0() ^ X86_CR0_WP;
9233+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
9234+ write_cr0(cr0);
9235+ barrier();
9236+ preempt_enable_no_resched();
9237+ return cr0 ^ X86_CR0_WP;
9238+}
9239+#else
9240+static inline unsigned long native_pax_open_kernel(void) { return 0; }
9241+static inline unsigned long native_pax_close_kernel(void) { return 0; }
58c5fc13
MT
9242+#endif
9243+
ae4e228f 9244 /*
58c5fc13
MT
9245 * The following only work if pte_present() is true.
9246 * Undefined behaviour if not..
9247 */
9248+static inline int pte_user(pte_t pte)
9249+{
9250+ return pte_val(pte) & _PAGE_USER;
9251+}
9252+
9253 static inline int pte_dirty(pte_t pte)
9254 {
9255 return pte_flags(pte) & _PAGE_DIRTY;
fe2de317 9256@@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
58c5fc13
MT
9257 return pte_clear_flags(pte, _PAGE_RW);
9258 }
9259
9260+static inline pte_t pte_mkread(pte_t pte)
9261+{
9262+ return __pte(pte_val(pte) | _PAGE_USER);
9263+}
9264+
9265 static inline pte_t pte_mkexec(pte_t pte)
9266 {
9267- return pte_clear_flags(pte, _PAGE_NX);
9268+#ifdef CONFIG_X86_PAE
9269+ if (__supported_pte_mask & _PAGE_NX)
9270+ return pte_clear_flags(pte, _PAGE_NX);
9271+ else
9272+#endif
9273+ return pte_set_flags(pte, _PAGE_USER);
9274+}
9275+
9276+static inline pte_t pte_exprotect(pte_t pte)
9277+{
9278+#ifdef CONFIG_X86_PAE
9279+ if (__supported_pte_mask & _PAGE_NX)
9280+ return pte_set_flags(pte, _PAGE_NX);
9281+ else
9282+#endif
9283+ return pte_clear_flags(pte, _PAGE_USER);
9284 }
9285
9286 static inline pte_t pte_mkdirty(pte_t pte)
fe2de317 9287@@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
df50ba0c
MT
9288 #endif
9289
9290 #ifndef __ASSEMBLY__
9291+
9292+#ifdef CONFIG_PAX_PER_CPU_PGD
9293+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
9294+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
9295+{
9296+ return cpu_pgd[cpu];
9297+}
9298+#endif
9299+
9300 #include <linux/mm_types.h>
9301
9302 static inline int pte_none(pte_t pte)
fe2de317 9303@@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
58c5fc13
MT
9304
9305 static inline int pgd_bad(pgd_t pgd)
9306 {
9307- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
9308+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
9309 }
9310
9311 static inline int pgd_none(pgd_t pgd)
15a11c5b 9312@@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
df50ba0c
MT
9313 * pgd_offset() returns a (pgd_t *)
9314 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
9315 */
9316-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
9317+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
9318+
9319+#ifdef CONFIG_PAX_PER_CPU_PGD
9320+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
9321+#endif
9322+
9323 /*
9324 * a shortcut which implies the use of the kernel's pgd, instead
9325 * of a process's
15a11c5b 9326@@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
df50ba0c
MT
9327 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
9328 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
9329
9330+#ifdef CONFIG_X86_32
9331+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
9332+#else
9333+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
9334+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
9335+
9336+#ifdef CONFIG_PAX_MEMORY_UDEREF
9337+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
9338+#else
9339+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
9340+#endif
9341+
9342+#endif
9343+
9344 #ifndef __ASSEMBLY__
9345
9346 extern int direct_gbpages;
fe2de317 9347@@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
ae4e228f
MT
9348 * dst and src can be on the same page, but the range must not overlap,
9349 * and must not cross a page boundary.
58c5fc13 9350 */
ae4e228f
MT
9351-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
9352+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
58c5fc13
MT
9353 {
9354- memcpy(dst, src, count * sizeof(pgd_t));
ae4e228f
MT
9355+ pax_open_kernel();
9356+ while (count--)
9357+ *dst++ = *src++;
9358+ pax_close_kernel();
58c5fc13
MT
9359 }
9360
df50ba0c
MT
9361+#ifdef CONFIG_PAX_PER_CPU_PGD
9362+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9363+#endif
9364+
9365+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9366+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9367+#else
9368+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
9369+#endif
58c5fc13 9370
df50ba0c
MT
9371 #include <asm-generic/pgtable.h>
9372 #endif /* __ASSEMBLY__ */
fe2de317
MT
9373diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
9374index 0c92113..34a77c6 100644
9375--- a/arch/x86/include/asm/pgtable_32.h
9376+++ b/arch/x86/include/asm/pgtable_32.h
9377@@ -25,9 +25,6 @@
9378 struct mm_struct;
9379 struct vm_area_struct;
9380
9381-extern pgd_t swapper_pg_dir[1024];
9382-extern pgd_t initial_page_table[1024];
9383-
9384 static inline void pgtable_cache_init(void) { }
9385 static inline void check_pgt_cache(void) { }
9386 void paging_init(void);
9387@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
9388 # include <asm/pgtable-2level.h>
9389 #endif
9390
9391+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
9392+extern pgd_t initial_page_table[PTRS_PER_PGD];
9393+#ifdef CONFIG_X86_PAE
9394+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
9395+#endif
9396+
9397 #if defined(CONFIG_HIGHPTE)
9398 #define pte_offset_map(dir, address) \
9399 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
9400@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
9401 /* Clear a kernel PTE and flush it from the TLB */
9402 #define kpte_clear_flush(ptep, vaddr) \
9403 do { \
9404+ pax_open_kernel(); \
9405 pte_clear(&init_mm, (vaddr), (ptep)); \
9406+ pax_close_kernel(); \
9407 __flush_tlb_one((vaddr)); \
9408 } while (0)
9409
9410@@ -74,6 +79,9 @@ do { \
9411
9412 #endif /* !__ASSEMBLY__ */
9413
9414+#define HAVE_ARCH_UNMAPPED_AREA
9415+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
9416+
9417 /*
9418 * kern_addr_valid() is (1) for FLATMEM and (0) for
9419 * SPARSEMEM and DISCONTIGMEM
9420diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
9421index ed5903b..c7fe163 100644
9422--- a/arch/x86/include/asm/pgtable_32_types.h
9423+++ b/arch/x86/include/asm/pgtable_32_types.h
9424@@ -8,7 +8,7 @@
9425 */
9426 #ifdef CONFIG_X86_PAE
9427 # include <asm/pgtable-3level_types.h>
9428-# define PMD_SIZE (1UL << PMD_SHIFT)
9429+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
9430 # define PMD_MASK (~(PMD_SIZE - 1))
9431 #else
9432 # include <asm/pgtable-2level_types.h>
9433@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
9434 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
9435 #endif
9436
9437+#ifdef CONFIG_PAX_KERNEXEC
9438+#ifndef __ASSEMBLY__
9439+extern unsigned char MODULES_EXEC_VADDR[];
9440+extern unsigned char MODULES_EXEC_END[];
9441+#endif
9442+#include <asm/boot.h>
9443+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
9444+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
9445+#else
9446+#define ktla_ktva(addr) (addr)
9447+#define ktva_ktla(addr) (addr)
9448+#endif
9449+
9450 #define MODULES_VADDR VMALLOC_START
9451 #define MODULES_END VMALLOC_END
9452 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
9453diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
9454index 975f709..107976d 100644
9455--- a/arch/x86/include/asm/pgtable_64.h
9456+++ b/arch/x86/include/asm/pgtable_64.h
9457@@ -16,10 +16,14 @@
9458
9459 extern pud_t level3_kernel_pgt[512];
9460 extern pud_t level3_ident_pgt[512];
9461+extern pud_t level3_vmalloc_start_pgt[512];
9462+extern pud_t level3_vmalloc_end_pgt[512];
9463+extern pud_t level3_vmemmap_pgt[512];
9464+extern pud_t level2_vmemmap_pgt[512];
9465 extern pmd_t level2_kernel_pgt[512];
9466 extern pmd_t level2_fixmap_pgt[512];
9467-extern pmd_t level2_ident_pgt[512];
9468-extern pgd_t init_level4_pgt[];
9469+extern pmd_t level2_ident_pgt[512*2];
9470+extern pgd_t init_level4_pgt[512];
9471
9472 #define swapper_pg_dir init_level4_pgt
9473
9474@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9475
9476 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9477 {
9478+ pax_open_kernel();
9479 *pmdp = pmd;
9480+ pax_close_kernel();
9481 }
9482
9483 static inline void native_pmd_clear(pmd_t *pmd)
9484@@ -107,6 +113,13 @@ static inline void native_pud_clear(pud_t *pud)
9485
9486 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
9487 {
9488+ pax_open_kernel();
9489+ *pgdp = pgd;
9490+ pax_close_kernel();
9491+}
9492+
9493+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
9494+{
9495 *pgdp = pgd;
9496 }
9497
9498diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
9499index 766ea16..5b96cb3 100644
9500--- a/arch/x86/include/asm/pgtable_64_types.h
9501+++ b/arch/x86/include/asm/pgtable_64_types.h
9502@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
9503 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
9504 #define MODULES_END _AC(0xffffffffff000000, UL)
9505 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
9506+#define MODULES_EXEC_VADDR MODULES_VADDR
9507+#define MODULES_EXEC_END MODULES_END
9508+
9509+#define ktla_ktva(addr) (addr)
9510+#define ktva_ktla(addr) (addr)
9511
9512 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
9513diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
9514index 013286a..8b42f4f 100644
9515--- a/arch/x86/include/asm/pgtable_types.h
9516+++ b/arch/x86/include/asm/pgtable_types.h
16454cff 9517@@ -16,13 +16,12 @@
58c5fc13
MT
9518 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
9519 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
9520 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
9521-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
9522+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
9523 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
9524 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
9525 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
9526-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
9527-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
16454cff 9528-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
58c5fc13 9529+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
16454cff 9530+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
58c5fc13
MT
9531 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
9532
9533 /* If _PAGE_BIT_PRESENT is clear, we use these: */
16454cff 9534@@ -40,7 +39,6 @@
58c5fc13
MT
9535 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
9536 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
9537 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
9538-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
9539 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
9540 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
9541 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
16454cff 9542@@ -57,8 +55,10 @@
58c5fc13
MT
9543
9544 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
9545 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
9546-#else
9547+#elif defined(CONFIG_KMEMCHECK)
9548 #define _PAGE_NX (_AT(pteval_t, 0))
9549+#else
9550+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
9551 #endif
9552
9553 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
16454cff 9554@@ -96,6 +96,9 @@
58c5fc13
MT
9555 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
9556 _PAGE_ACCESSED)
9557
9558+#define PAGE_READONLY_NOEXEC PAGE_READONLY
9559+#define PAGE_SHARED_NOEXEC PAGE_SHARED
9560+
9561 #define __PAGE_KERNEL_EXEC \
9562 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
9563 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
6e9df6a3 9564@@ -106,7 +109,7 @@
58c5fc13
MT
9565 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
9566 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
9567 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
9568-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
58c5fc13 9569+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
6e9df6a3
MT
9570 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
9571 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
58c5fc13 9572 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
6e9df6a3 9573@@ -168,8 +171,8 @@
58c5fc13
MT
9574 * bits are combined, this will alow user to access the high address mapped
9575 * VDSO in the presence of CONFIG_COMPAT_VDSO
9576 */
9577-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
9578-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
9579+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9580+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9581 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
9582 #endif
9583
fe2de317 9584@@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
57199397
MT
9585 {
9586 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
9587 }
9588+#endif
9589
9590+#if PAGETABLE_LEVELS == 3
9591+#include <asm-generic/pgtable-nopud.h>
9592+#endif
9593+
9594+#if PAGETABLE_LEVELS == 2
9595+#include <asm-generic/pgtable-nopmd.h>
9596+#endif
9597+
9598+#ifndef __ASSEMBLY__
9599 #if PAGETABLE_LEVELS > 3
9600 typedef struct { pudval_t pud; } pud_t;
9601
fe2de317 9602@@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
57199397
MT
9603 return pud.pud;
9604 }
9605 #else
9606-#include <asm-generic/pgtable-nopud.h>
9607-
9608 static inline pudval_t native_pud_val(pud_t pud)
9609 {
9610 return native_pgd_val(pud.pgd);
fe2de317 9611@@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
57199397
MT
9612 return pmd.pmd;
9613 }
9614 #else
9615-#include <asm-generic/pgtable-nopmd.h>
9616-
9617 static inline pmdval_t native_pmd_val(pmd_t pmd)
9618 {
9619 return native_pgd_val(pmd.pud.pgd);
6e9df6a3 9620@@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
58c5fc13
MT
9621
9622 extern pteval_t __supported_pte_mask;
ae4e228f
MT
9623 extern void set_nx(void);
9624-extern int nx_enabled;
58c5fc13
MT
9625
9626 #define pgprot_writecombine pgprot_writecombine
9627 extern pgprot_t pgprot_writecombine(pgprot_t prot);
fe2de317
MT
9628diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
9629index 0d1171c..36571a9 100644
9630--- a/arch/x86/include/asm/processor.h
9631+++ b/arch/x86/include/asm/processor.h
66a7e928 9632@@ -266,7 +266,7 @@ struct tss_struct {
58c5fc13
MT
9633
9634 } ____cacheline_aligned;
9635
9636-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
9637+extern struct tss_struct init_tss[NR_CPUS];
9638
9639 /*
9640 * Save the original ist values for checking stack pointers during debugging
fe2de317 9641@@ -858,11 +858,18 @@ static inline void spin_lock_prefetch(const void *x)
58c5fc13
MT
9642 */
9643 #define TASK_SIZE PAGE_OFFSET
9644 #define TASK_SIZE_MAX TASK_SIZE
9645+
9646+#ifdef CONFIG_PAX_SEGMEXEC
9647+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
58c5fc13
MT
9648+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
9649+#else
9650 #define STACK_TOP TASK_SIZE
9651-#define STACK_TOP_MAX STACK_TOP
9652+#endif
ae4e228f 9653+
58c5fc13
MT
9654+#define STACK_TOP_MAX TASK_SIZE
9655
9656 #define INIT_THREAD { \
66a7e928
MT
9657- .sp0 = sizeof(init_stack) + (long)&init_stack, \
9658+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9659 .vm86_info = NULL, \
9660 .sysenter_cs = __KERNEL_CS, \
9661 .io_bitmap_ptr = NULL, \
fe2de317 9662@@ -876,7 +883,7 @@ static inline void spin_lock_prefetch(const void *x)
58c5fc13
MT
9663 */
9664 #define INIT_TSS { \
9665 .x86_tss = { \
9666- .sp0 = sizeof(init_stack) + (long)&init_stack, \
9667+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9668 .ss0 = __KERNEL_DS, \
9669 .ss1 = __KERNEL_CS, \
9670 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
fe2de317 9671@@ -887,11 +894,7 @@ static inline void spin_lock_prefetch(const void *x)
58c5fc13
MT
9672 extern unsigned long thread_saved_pc(struct task_struct *tsk);
9673
9674 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
9675-#define KSTK_TOP(info) \
9676-({ \
9677- unsigned long *__ptr = (unsigned long *)(info); \
9678- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
9679-})
71d190be 9680+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
58c5fc13
MT
9681
9682 /*
9683 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
fe2de317 9684@@ -906,7 +909,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
58c5fc13
MT
9685 #define task_pt_regs(task) \
9686 ({ \
9687 struct pt_regs *__regs__; \
9688- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
9689+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
9690 __regs__ - 1; \
9691 })
9692
fe2de317 9693@@ -916,13 +919,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
df50ba0c
MT
9694 /*
9695 * User space process size. 47bits minus one guard page.
9696 */
9697-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
9698+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
9699
9700 /* This decides where the kernel will search for a free chunk of vm
58c5fc13
MT
9701 * space during mmap's.
9702 */
9703 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
9704- 0xc0000000 : 0xFFFFe000)
9705+ 0xc0000000 : 0xFFFFf000)
9706
9707 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
9708 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
fe2de317 9709@@ -933,11 +936,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
66a7e928
MT
9710 #define STACK_TOP_MAX TASK_SIZE_MAX
9711
9712 #define INIT_THREAD { \
9713- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9714+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9715 }
9716
9717 #define INIT_TSS { \
9718- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9719+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9720 }
9721
9722 /*
fe2de317 9723@@ -959,6 +962,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
58c5fc13
MT
9724 */
9725 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
9726
9727+#ifdef CONFIG_PAX_SEGMEXEC
9728+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
9729+#endif
9730+
9731 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
9732
9733 /* Get/set a process' ability to use the timestamp counter instruction */
fe2de317
MT
9734diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
9735index 3566454..4bdfb8c 100644
9736--- a/arch/x86/include/asm/ptrace.h
9737+++ b/arch/x86/include/asm/ptrace.h
9738@@ -156,28 +156,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
58c5fc13
MT
9739 }
9740
9741 /*
9742- * user_mode_vm(regs) determines whether a register set came from user mode.
9743+ * user_mode(regs) determines whether a register set came from user mode.
9744 * This is true if V8086 mode was enabled OR if the register set was from
9745 * protected mode with RPL-3 CS value. This tricky test checks that with
9746 * one comparison. Many places in the kernel can bypass this full check
9747- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
9748+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
9749+ * be used.
9750 */
9751-static inline int user_mode(struct pt_regs *regs)
9752+static inline int user_mode_novm(struct pt_regs *regs)
9753 {
9754 #ifdef CONFIG_X86_32
9755 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
9756 #else
9757- return !!(regs->cs & 3);
9758+ return !!(regs->cs & SEGMENT_RPL_MASK);
9759 #endif
9760 }
9761
9762-static inline int user_mode_vm(struct pt_regs *regs)
9763+static inline int user_mode(struct pt_regs *regs)
9764 {
9765 #ifdef CONFIG_X86_32
9766 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
9767 USER_RPL;
9768 #else
9769- return user_mode(regs);
9770+ return user_mode_novm(regs);
9771 #endif
9772 }
9773
fe2de317 9774@@ -193,15 +194,16 @@ static inline int v8086_mode(struct pt_regs *regs)
6e9df6a3
MT
9775 #ifdef CONFIG_X86_64
9776 static inline bool user_64bit_mode(struct pt_regs *regs)
9777 {
9778+ unsigned long cs = regs->cs & 0xffff;
9779 #ifndef CONFIG_PARAVIRT
9780 /*
9781 * On non-paravirt systems, this is the only long mode CPL 3
9782 * selector. We do not allow long mode selectors in the LDT.
9783 */
9784- return regs->cs == __USER_CS;
9785+ return cs == __USER_CS;
9786 #else
9787 /* Headers are too twisted for this to go in paravirt.h. */
9788- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
9789+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
9790 #endif
9791 }
9792 #endif
fe2de317
MT
9793diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
9794index 3250e3d..20db631 100644
9795--- a/arch/x86/include/asm/reboot.h
9796+++ b/arch/x86/include/asm/reboot.h
66a7e928
MT
9797@@ -6,19 +6,19 @@
9798 struct pt_regs;
9799
9800 struct machine_ops {
9801- void (*restart)(char *cmd);
9802- void (*halt)(void);
9803- void (*power_off)(void);
9804+ void (* __noreturn restart)(char *cmd);
9805+ void (* __noreturn halt)(void);
9806+ void (* __noreturn power_off)(void);
9807 void (*shutdown)(void);
9808 void (*crash_shutdown)(struct pt_regs *);
9809- void (*emergency_restart)(void);
15a11c5b 9810-};
66a7e928 9811+ void (* __noreturn emergency_restart)(void);
15a11c5b 9812+} __no_const;
66a7e928
MT
9813
9814 extern struct machine_ops machine_ops;
58c5fc13
MT
9815
9816 void native_machine_crash_shutdown(struct pt_regs *regs);
9817 void native_machine_shutdown(void);
66a7e928
MT
9818-void machine_real_restart(unsigned int type);
9819+void machine_real_restart(unsigned int type) __noreturn;
9820 /* These must match dispatch_table in reboot_32.S */
9821 #define MRR_BIOS 0
9822 #define MRR_APM 1
fe2de317
MT
9823diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
9824index df4cd32..27ae072 100644
9825--- a/arch/x86/include/asm/rwsem.h
9826+++ b/arch/x86/include/asm/rwsem.h
9827@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
58c5fc13
MT
9828 {
9829 asm volatile("# beginning down_read\n\t"
df50ba0c 9830 LOCK_PREFIX _ASM_INC "(%1)\n\t"
58c5fc13
MT
9831+
9832+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 9833+ "jno 0f\n"
df50ba0c 9834+ LOCK_PREFIX _ASM_DEC "(%1)\n"
bc901d79
MT
9835+ "int $4\n0:\n"
9836+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
9837+#endif
9838+
6892158b 9839 /* adds 0x00000001 */
bc901d79 9840 " jns 1f\n"
58c5fc13 9841 " call call_rwsem_down_read_failed\n"
fe2de317 9842@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
bc901d79 9843 "1:\n\t"
df50ba0c
MT
9844 " mov %1,%2\n\t"
9845 " add %3,%2\n\t"
58c5fc13
MT
9846+
9847+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 9848+ "jno 0f\n"
df50ba0c 9849+ "sub %3,%2\n"
bc901d79
MT
9850+ "int $4\n0:\n"
9851+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
9852+#endif
9853+
bc901d79 9854 " jle 2f\n\t"
df50ba0c 9855 LOCK_PREFIX " cmpxchg %2,%0\n\t"
bc901d79 9856 " jnz 1b\n\t"
fe2de317 9857@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
66a7e928 9858 long tmp;
58c5fc13 9859 asm volatile("# beginning down_write\n\t"
df50ba0c 9860 LOCK_PREFIX " xadd %1,(%2)\n\t"
58c5fc13
MT
9861+
9862+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 9863+ "jno 0f\n"
df50ba0c 9864+ "mov %1,(%2)\n"
bc901d79
MT
9865+ "int $4\n0:\n"
9866+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
9867+#endif
9868+
6892158b 9869 /* adds 0xffff0001, returns the old value */
df50ba0c 9870 " test %1,%1\n\t"
58c5fc13 9871 /* was the count 0 before? */
fe2de317 9872@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
66a7e928 9873 long tmp;
58c5fc13 9874 asm volatile("# beginning __up_read\n\t"
df50ba0c 9875 LOCK_PREFIX " xadd %1,(%2)\n\t"
58c5fc13
MT
9876+
9877+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 9878+ "jno 0f\n"
df50ba0c 9879+ "mov %1,(%2)\n"
bc901d79
MT
9880+ "int $4\n0:\n"
9881+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
9882+#endif
9883+
9884 /* subtracts 1, returns the old value */
bc901d79 9885 " jns 1f\n\t"
6892158b 9886 " call call_rwsem_wake\n" /* expects old value in %edx */
fe2de317 9887@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
66a7e928 9888 long tmp;
58c5fc13 9889 asm volatile("# beginning __up_write\n\t"
df50ba0c 9890 LOCK_PREFIX " xadd %1,(%2)\n\t"
58c5fc13
MT
9891+
9892+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 9893+ "jno 0f\n"
df50ba0c 9894+ "mov %1,(%2)\n"
bc901d79
MT
9895+ "int $4\n0:\n"
9896+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
9897+#endif
9898+
6892158b 9899 /* subtracts 0xffff0001, returns the old value */
bc901d79 9900 " jns 1f\n\t"
6892158b 9901 " call call_rwsem_wake\n" /* expects old value in %edx */
fe2de317 9902@@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
58c5fc13
MT
9903 {
9904 asm volatile("# beginning __downgrade_write\n\t"
df50ba0c 9905 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
58c5fc13
MT
9906+
9907+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 9908+ "jno 0f\n"
df50ba0c 9909+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
bc901d79
MT
9910+ "int $4\n0:\n"
9911+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
9912+#endif
9913+
df50ba0c
MT
9914 /*
9915 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
9916 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
fe2de317 9917@@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
66a7e928
MT
9918 */
9919 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
58c5fc13 9920 {
df50ba0c
MT
9921- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
9922+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
58c5fc13
MT
9923+
9924+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 9925+ "jno 0f\n"
df50ba0c 9926+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
bc901d79
MT
9927+ "int $4\n0:\n"
9928+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
9929+#endif
9930+
9931 : "+m" (sem->count)
df50ba0c 9932 : "er" (delta));
58c5fc13 9933 }
fe2de317 9934@@ -206,7 +262,15 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
58c5fc13 9935 {
66a7e928 9936 long tmp = delta;
58c5fc13
MT
9937
9938- asm volatile(LOCK_PREFIX "xadd %0,%1"
9939+ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
9940+
9941+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 9942+ "jno 0f\n"
df50ba0c 9943+ "mov %0,%1\n"
bc901d79
MT
9944+ "int $4\n0:\n"
9945+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
9946+#endif
9947+
9948 : "+r" (tmp), "+m" (sem->count)
9949 : : "memory");
9950
fe2de317
MT
9951diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
9952index 5e64171..f58957e 100644
9953--- a/arch/x86/include/asm/segment.h
9954+++ b/arch/x86/include/asm/segment.h
15a11c5b 9955@@ -64,10 +64,15 @@
ae4e228f
MT
9956 * 26 - ESPFIX small SS
9957 * 27 - per-cpu [ offset to per-cpu data area ]
9958 * 28 - stack_canary-20 [ for stack protector ]
9959- * 29 - unused
9960- * 30 - unused
9961+ * 29 - PCI BIOS CS
9962+ * 30 - PCI BIOS DS
9963 * 31 - TSS for double fault handler
9964 */
15a11c5b
MT
9965+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
9966+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
9967+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
9968+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
9969+
ae4e228f 9970 #define GDT_ENTRY_TLS_MIN 6
15a11c5b
MT
9971 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
9972
9973@@ -79,6 +84,8 @@
ae4e228f 9974
bc901d79 9975 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
ae4e228f
MT
9976
9977+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
9978+
bc901d79 9979 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
58c5fc13 9980
bc901d79 9981 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
15a11c5b 9982@@ -104,6 +111,12 @@
58c5fc13
MT
9983 #define __KERNEL_STACK_CANARY 0
9984 #endif
9985
bc901d79 9986+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
58c5fc13
MT
9987+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
9988+
bc901d79 9989+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
58c5fc13
MT
9990+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
9991+
9992 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
9993
9994 /*
15a11c5b 9995@@ -141,7 +154,7 @@
58c5fc13
MT
9996 */
9997
9998 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
9999-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
10000+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
10001
10002
10003 #else
15a11c5b 10004@@ -165,6 +178,8 @@
6e9df6a3 10005 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
ae4e228f
MT
10006 #define __USER32_DS __USER_DS
10007
10008+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
10009+
10010 #define GDT_ENTRY_TSS 8 /* needs two entries */
10011 #define GDT_ENTRY_LDT 10 /* needs two entries */
10012 #define GDT_ENTRY_TLS_MIN 12
15a11c5b 10013@@ -185,6 +200,7 @@
ae4e228f
MT
10014 #endif
10015
bc901d79
MT
10016 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
10017+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
10018 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
10019 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
10020 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
fe2de317
MT
10021diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
10022index 73b11bc..d4a3b63 100644
10023--- a/arch/x86/include/asm/smp.h
10024+++ b/arch/x86/include/asm/smp.h
10025@@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
66a7e928
MT
10026 /* cpus sharing the last level cache: */
10027 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
6892158b
MT
10028 DECLARE_PER_CPU(u16, cpu_llc_id);
10029-DECLARE_PER_CPU(int, cpu_number);
10030+DECLARE_PER_CPU(unsigned int, cpu_number);
10031
10032 static inline struct cpumask *cpu_sibling_mask(int cpu)
10033 {
15a11c5b
MT
10034@@ -77,7 +77,7 @@ struct smp_ops {
10035
10036 void (*send_call_func_ipi)(const struct cpumask *mask);
10037 void (*send_call_func_single_ipi)(int cpu);
10038-};
10039+} __no_const;
10040
10041 /* Globals due to paravirt */
10042 extern void set_cpu_sibling_map(int cpu);
fe2de317 10043@@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitdata;
71d190be
MT
10044 extern int safe_smp_processor_id(void);
10045
10046 #elif defined(CONFIG_X86_64_SMP)
10047-#define raw_smp_processor_id() (percpu_read(cpu_number))
10048-
10049-#define stack_smp_processor_id() \
10050-({ \
10051- struct thread_info *ti; \
10052- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
10053- ti->cpu; \
10054-})
10055+#define raw_smp_processor_id() (percpu_read(cpu_number))
10056+#define stack_smp_processor_id() raw_smp_processor_id()
10057 #define safe_smp_processor_id() smp_processor_id()
10058
10059 #endif
fe2de317
MT
10060diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
10061index ee67edf..49c796b 100644
10062--- a/arch/x86/include/asm/spinlock.h
10063+++ b/arch/x86/include/asm/spinlock.h
10064@@ -248,6 +248,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
ae4e228f 10065 static inline void arch_read_lock(arch_rwlock_t *rw)
58c5fc13 10066 {
6e9df6a3 10067 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
58c5fc13
MT
10068+
10069+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 10070+ "jno 0f\n"
6e9df6a3 10071+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
bc901d79
MT
10072+ "int $4\n0:\n"
10073+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
10074+#endif
10075+
bc901d79
MT
10076 "jns 1f\n"
10077 "call __read_lock_failed\n\t"
10078 "1:\n"
fe2de317 10079@@ -257,6 +265,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
ae4e228f 10080 static inline void arch_write_lock(arch_rwlock_t *rw)
58c5fc13 10081 {
6e9df6a3 10082 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
58c5fc13
MT
10083+
10084+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 10085+ "jno 0f\n"
6e9df6a3 10086+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
bc901d79
MT
10087+ "int $4\n0:\n"
10088+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
10089+#endif
10090+
bc901d79
MT
10091 "jz 1f\n"
10092 "call __write_lock_failed\n\t"
10093 "1:\n"
fe2de317 10094@@ -286,13 +302,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
58c5fc13 10095
ae4e228f 10096 static inline void arch_read_unlock(arch_rwlock_t *rw)
58c5fc13 10097 {
6e9df6a3
MT
10098- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
10099+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
58c5fc13
MT
10100+
10101+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 10102+ "jno 0f\n"
6e9df6a3 10103+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
bc901d79
MT
10104+ "int $4\n0:\n"
10105+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
10106+#endif
10107+
6e9df6a3 10108 :"+m" (rw->lock) : : "memory");
58c5fc13
MT
10109 }
10110
ae4e228f 10111 static inline void arch_write_unlock(arch_rwlock_t *rw)
58c5fc13 10112 {
6e9df6a3
MT
10113- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
10114+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
58c5fc13
MT
10115+
10116+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 10117+ "jno 0f\n"
6e9df6a3 10118+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
58c5fc13 10119+ "int $4\n0:\n"
bc901d79 10120+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
10121+#endif
10122+
6e9df6a3 10123 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
58c5fc13
MT
10124 }
10125
fe2de317
MT
10126diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
10127index 1575177..cb23f52 100644
10128--- a/arch/x86/include/asm/stackprotector.h
10129+++ b/arch/x86/include/asm/stackprotector.h
15a11c5b
MT
10130@@ -48,7 +48,7 @@
10131 * head_32 for boot CPU and setup_per_cpu_areas() for others.
10132 */
10133 #define GDT_STACK_CANARY_INIT \
10134- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
10135+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
10136
10137 /*
10138 * Initialize the stackprotector canary value.
fe2de317 10139@@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
bc901d79
MT
10140
10141 static inline void load_stack_canary_segment(void)
10142 {
10143-#ifdef CONFIG_X86_32
10144+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
10145 asm volatile ("mov %0, %%gs" : : "r" (0));
10146 #endif
10147 }
fe2de317
MT
10148diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
10149index 70bbe39..4ae2bd4 100644
10150--- a/arch/x86/include/asm/stacktrace.h
10151+++ b/arch/x86/include/asm/stacktrace.h
71d190be
MT
10152@@ -11,28 +11,20 @@
10153
10154 extern int kstack_depth_to_print;
10155
10156-struct thread_info;
10157+struct task_struct;
10158 struct stacktrace_ops;
10159
10160-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
10161- unsigned long *stack,
10162- unsigned long bp,
10163- const struct stacktrace_ops *ops,
10164- void *data,
10165- unsigned long *end,
10166- int *graph);
fe2de317
MT
10167+typedef unsigned long walk_stack_t(struct task_struct *task,
10168+ void *stack_start,
10169+ unsigned long *stack,
10170+ unsigned long bp,
10171+ const struct stacktrace_ops *ops,
10172+ void *data,
10173+ unsigned long *end,
10174+ int *graph);
10175
71d190be
MT
10176-extern unsigned long
10177-print_context_stack(struct thread_info *tinfo,
10178- unsigned long *stack, unsigned long bp,
10179- const struct stacktrace_ops *ops, void *data,
10180- unsigned long *end, int *graph);
10181-
10182-extern unsigned long
10183-print_context_stack_bp(struct thread_info *tinfo,
10184- unsigned long *stack, unsigned long bp,
10185- const struct stacktrace_ops *ops, void *data,
10186- unsigned long *end, int *graph);
71d190be
MT
10187+extern walk_stack_t print_context_stack;
10188+extern walk_stack_t print_context_stack_bp;
10189
10190 /* Generic stack tracer with callbacks */
10191
15a11c5b 10192@@ -40,7 +32,7 @@ struct stacktrace_ops {
71d190be
MT
10193 void (*address)(void *data, unsigned long address, int reliable);
10194 /* On negative return stop dumping */
10195 int (*stack)(void *data, char *name);
10196- walk_stack_t walk_stack;
10197+ walk_stack_t *walk_stack;
10198 };
10199
10200 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
fe2de317
MT
10201diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
10202index cb23852..2dde194 100644
10203--- a/arch/x86/include/asm/sys_ia32.h
10204+++ b/arch/x86/include/asm/sys_ia32.h
10205@@ -40,7 +40,7 @@ asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *,
6e9df6a3
MT
10206 compat_sigset_t __user *, unsigned int);
10207 asmlinkage long sys32_alarm(unsigned int);
10208
10209-asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
10210+asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
10211 asmlinkage long sys32_sysfs(int, u32, u32);
10212
10213 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
fe2de317
MT
10214diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
10215index c2ff2a1..4349184 100644
10216--- a/arch/x86/include/asm/system.h
10217+++ b/arch/x86/include/asm/system.h
66a7e928 10218@@ -129,7 +129,7 @@ do { \
71d190be
MT
10219 "call __switch_to\n\t" \
10220 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
10221 __switch_canary \
10222- "movq %P[thread_info](%%rsi),%%r8\n\t" \
10223+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
10224 "movq %%rax,%%rdi\n\t" \
10225 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
10226 "jnz ret_from_fork\n\t" \
66a7e928 10227@@ -140,7 +140,7 @@ do { \
71d190be
MT
10228 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
10229 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
10230 [_tif_fork] "i" (_TIF_FORK), \
10231- [thread_info] "i" (offsetof(struct task_struct, stack)), \
10232+ [thread_info] "m" (current_tinfo), \
10233 [current_task] "m" (current_task) \
10234 __switch_canary_iparam \
10235 : "memory", "cc" __EXTRA_CLOBBER)
fe2de317 10236@@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
58c5fc13
MT
10237 {
10238 unsigned long __limit;
10239 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
10240- return __limit + 1;
10241+ return __limit;
10242 }
10243
10244 static inline void native_clts(void)
15a11c5b 10245@@ -397,12 +397,12 @@ void enable_hlt(void);
58c5fc13
MT
10246
10247 void cpu_idle_wait(void);
10248
10249-extern unsigned long arch_align_stack(unsigned long sp);
10250+#define arch_align_stack(x) ((x) & ~0xfUL)
10251 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
10252
10253 void default_idle(void);
66a7e928
MT
10254
10255-void stop_this_cpu(void *dummy);
10256+void stop_this_cpu(void *dummy) __noreturn;
10257
10258 /*
10259 * Force strict CPU ordering.
fe2de317
MT
10260diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
10261index a1fe5c1..ee326d8 100644
10262--- a/arch/x86/include/asm/thread_info.h
10263+++ b/arch/x86/include/asm/thread_info.h
71d190be
MT
10264@@ -10,6 +10,7 @@
10265 #include <linux/compiler.h>
10266 #include <asm/page.h>
10267 #include <asm/types.h>
10268+#include <asm/percpu.h>
10269
10270 /*
10271 * low level task data that entry.S needs immediate access to
10272@@ -24,7 +25,6 @@ struct exec_domain;
6e9df6a3 10273 #include <linux/atomic.h>
71d190be
MT
10274
10275 struct thread_info {
10276- struct task_struct *task; /* main task structure */
10277 struct exec_domain *exec_domain; /* execution domain */
10278 __u32 flags; /* low level flags */
10279 __u32 status; /* thread synchronous flags */
66a7e928 10280@@ -34,18 +34,12 @@ struct thread_info {
71d190be
MT
10281 mm_segment_t addr_limit;
10282 struct restart_block restart_block;
10283 void __user *sysenter_return;
10284-#ifdef CONFIG_X86_32
10285- unsigned long previous_esp; /* ESP of the previous stack in
10286- case of nested (IRQ) stacks
10287- */
10288- __u8 supervisor_stack[0];
10289-#endif
66a7e928 10290+ unsigned long lowest_stack;
71d190be
MT
10291 int uaccess_err;
10292 };
10293
10294-#define INIT_THREAD_INFO(tsk) \
10295+#define INIT_THREAD_INFO \
10296 { \
10297- .task = &tsk, \
10298 .exec_domain = &default_exec_domain, \
10299 .flags = 0, \
10300 .cpu = 0, \
66a7e928 10301@@ -56,7 +50,7 @@ struct thread_info {
71d190be
MT
10302 }, \
10303 }
10304
10305-#define init_thread_info (init_thread_union.thread_info)
66a7e928 10306+#define init_thread_info (init_thread_union.stack)
71d190be
MT
10307 #define init_stack (init_thread_union.stack)
10308
10309 #else /* !__ASSEMBLY__ */
fe2de317 10310@@ -170,45 +164,40 @@ struct thread_info {
66a7e928
MT
10311 ret; \
10312 })
71d190be 10313
fe2de317
MT
10314-#ifdef CONFIG_X86_32
10315-
10316-#define STACK_WARN (THREAD_SIZE/8)
10317-/*
10318- * macros/functions for gaining access to the thread information structure
10319- *
10320- * preempt_count needs to be 1 initially, until the scheduler is functional.
10321- */
10322-#ifndef __ASSEMBLY__
10323-
10324-
10325-/* how to get the current stack pointer from C */
10326-register unsigned long current_stack_pointer asm("esp") __used;
71d190be 10327-
71d190be
MT
10328-/* how to get the thread information struct from C */
10329-static inline struct thread_info *current_thread_info(void)
10330-{
10331- return (struct thread_info *)
10332- (current_stack_pointer & ~(THREAD_SIZE - 1));
10333-}
10334-
10335-#else /* !__ASSEMBLY__ */
10336-
fe2de317
MT
10337+#ifdef __ASSEMBLY__
10338 /* how to get the thread information struct from ASM */
10339 #define GET_THREAD_INFO(reg) \
71d190be
MT
10340- movl $-THREAD_SIZE, reg; \
10341- andl %esp, reg
fe2de317
MT
10342+ mov PER_CPU_VAR(current_tinfo), reg
10343
10344 /* use this one if reg already contains %esp */
71d190be
MT
10345-#define GET_THREAD_INFO_WITH_ESP(reg) \
10346- andl $-THREAD_SIZE, reg
fe2de317
MT
10347+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
10348+#else
10349+/* how to get the thread information struct from C */
10350+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
10351+
10352+static __always_inline struct thread_info *current_thread_info(void)
10353+{
10354+ return percpu_read_stable(current_tinfo);
10355+}
10356+#endif
10357+
10358+#ifdef CONFIG_X86_32
10359+
10360+#define STACK_WARN (THREAD_SIZE/8)
10361+/*
10362+ * macros/functions for gaining access to the thread information structure
10363+ *
10364+ * preempt_count needs to be 1 initially, until the scheduler is functional.
10365+ */
10366+#ifndef __ASSEMBLY__
10367+
10368+/* how to get the current stack pointer from C */
10369+register unsigned long current_stack_pointer asm("esp") __used;
10370
71d190be
MT
10371 #endif
10372
10373 #else /* X86_32 */
10374
10375-#include <asm/percpu.h>
10376-#define KERNEL_STACK_OFFSET (5*8)
10377-
10378 /*
10379 * macros/functions for gaining access to the thread information structure
10380 * preempt_count needs to be 1 initially, until the scheduler is functional.
fe2de317 10381@@ -216,21 +205,8 @@ static inline struct thread_info *current_thread_info(void)
71d190be
MT
10382 #ifndef __ASSEMBLY__
10383 DECLARE_PER_CPU(unsigned long, kernel_stack);
10384
10385-static inline struct thread_info *current_thread_info(void)
10386-{
10387- struct thread_info *ti;
10388- ti = (void *)(percpu_read_stable(kernel_stack) +
10389- KERNEL_STACK_OFFSET - THREAD_SIZE);
10390- return ti;
10391-}
10392-
10393-#else /* !__ASSEMBLY__ */
10394-
10395-/* how to get the thread information struct from ASM */
10396-#define GET_THREAD_INFO(reg) \
10397- movq PER_CPU_VAR(kernel_stack),reg ; \
10398- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
10399-
66a7e928
MT
10400+/* how to get the current stack pointer from C */
10401+register unsigned long current_stack_pointer asm("rsp") __used;
71d190be
MT
10402 #endif
10403
10404 #endif /* !X86_32 */
66a7e928 10405@@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
71d190be
MT
10406 extern void free_thread_info(struct thread_info *ti);
10407 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
10408 #define arch_task_cache_init arch_task_cache_init
10409+
10410+#define __HAVE_THREAD_FUNCTIONS
10411+#define task_thread_info(task) (&(task)->tinfo)
10412+#define task_stack_page(task) ((task)->stack)
10413+#define setup_thread_stack(p, org) do {} while (0)
10414+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
10415+
10416+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
66a7e928 10417+extern struct task_struct *alloc_task_struct_node(int node);
71d190be
MT
10418+extern void free_task_struct(struct task_struct *);
10419+
10420 #endif
10421 #endif /* _ASM_X86_THREAD_INFO_H */
fe2de317
MT
10422diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
10423index 36361bf..324f262 100644
10424--- a/arch/x86/include/asm/uaccess.h
10425+++ b/arch/x86/include/asm/uaccess.h
10426@@ -7,12 +7,15 @@
10427 #include <linux/compiler.h>
10428 #include <linux/thread_info.h>
10429 #include <linux/string.h>
10430+#include <linux/sched.h>
10431 #include <asm/asm.h>
10432 #include <asm/page.h>
10433
10434 #define VERIFY_READ 0
10435 #define VERIFY_WRITE 1
10436
10437+extern void check_object_size(const void *ptr, unsigned long n, bool to);
10438+
10439 /*
10440 * The fs value determines whether argument validity checking should be
10441 * performed or not. If get_fs() == USER_DS, checking is performed, with
10442@@ -28,7 +31,12 @@
10443
10444 #define get_ds() (KERNEL_DS)
10445 #define get_fs() (current_thread_info()->addr_limit)
10446+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10447+void __set_fs(mm_segment_t x);
10448+void set_fs(mm_segment_t x);
10449+#else
10450 #define set_fs(x) (current_thread_info()->addr_limit = (x))
10451+#endif
10452
10453 #define segment_eq(a, b) ((a).seg == (b).seg)
10454
10455@@ -76,7 +84,33 @@
10456 * checks that the pointer is in the user space range - after calling
10457 * this function, memory access functions may still return -EFAULT.
10458 */
10459-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10460+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10461+#define access_ok(type, addr, size) \
10462+({ \
10463+ long __size = size; \
10464+ unsigned long __addr = (unsigned long)addr; \
10465+ unsigned long __addr_ao = __addr & PAGE_MASK; \
10466+ unsigned long __end_ao = __addr + __size - 1; \
10467+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
10468+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
10469+ while(__addr_ao <= __end_ao) { \
10470+ char __c_ao; \
10471+ __addr_ao += PAGE_SIZE; \
10472+ if (__size > PAGE_SIZE) \
10473+ cond_resched(); \
10474+ if (__get_user(__c_ao, (char __user *)__addr)) \
10475+ break; \
10476+ if (type != VERIFY_WRITE) { \
10477+ __addr = __addr_ao; \
10478+ continue; \
10479+ } \
10480+ if (__put_user(__c_ao, (char __user *)__addr)) \
10481+ break; \
10482+ __addr = __addr_ao; \
10483+ } \
10484+ } \
10485+ __ret_ao; \
10486+})
10487
10488 /*
10489 * The exception table consists of pairs of addresses: the first is the
10490@@ -182,12 +216,20 @@ extern int __get_user_bad(void);
10491 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
10492 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
10493
10494-
10495+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10496+#define __copyuser_seg "gs;"
10497+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
10498+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
10499+#else
10500+#define __copyuser_seg
10501+#define __COPYUSER_SET_ES
10502+#define __COPYUSER_RESTORE_ES
10503+#endif
10504
10505 #ifdef CONFIG_X86_32
10506 #define __put_user_asm_u64(x, addr, err, errret) \
10507- asm volatile("1: movl %%eax,0(%2)\n" \
10508- "2: movl %%edx,4(%2)\n" \
10509+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
10510+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
10511 "3:\n" \
10512 ".section .fixup,\"ax\"\n" \
10513 "4: movl %3,%0\n" \
10514@@ -199,8 +241,8 @@ extern int __get_user_bad(void);
10515 : "A" (x), "r" (addr), "i" (errret), "0" (err))
10516
10517 #define __put_user_asm_ex_u64(x, addr) \
10518- asm volatile("1: movl %%eax,0(%1)\n" \
10519- "2: movl %%edx,4(%1)\n" \
10520+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
10521+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
10522 "3:\n" \
10523 _ASM_EXTABLE(1b, 2b - 1b) \
10524 _ASM_EXTABLE(2b, 3b - 2b) \
10525@@ -252,7 +294,7 @@ extern void __put_user_8(void);
10526 __typeof__(*(ptr)) __pu_val; \
10527 __chk_user_ptr(ptr); \
10528 might_fault(); \
10529- __pu_val = x; \
10530+ __pu_val = (x); \
10531 switch (sizeof(*(ptr))) { \
10532 case 1: \
10533 __put_user_x(1, __pu_val, ptr, __ret_pu); \
10534@@ -373,7 +415,7 @@ do { \
10535 } while (0)
10536
10537 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10538- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
10539+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
10540 "2:\n" \
10541 ".section .fixup,\"ax\"\n" \
10542 "3: mov %3,%0\n" \
10543@@ -381,7 +423,7 @@ do { \
10544 " jmp 2b\n" \
10545 ".previous\n" \
10546 _ASM_EXTABLE(1b, 3b) \
10547- : "=r" (err), ltype(x) \
10548+ : "=r" (err), ltype (x) \
10549 : "m" (__m(addr)), "i" (errret), "0" (err))
10550
10551 #define __get_user_size_ex(x, ptr, size) \
10552@@ -406,7 +448,7 @@ do { \
10553 } while (0)
10554
10555 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
10556- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
10557+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
10558 "2:\n" \
10559 _ASM_EXTABLE(1b, 2b - 1b) \
10560 : ltype(x) : "m" (__m(addr)))
10561@@ -423,13 +465,24 @@ do { \
10562 int __gu_err; \
10563 unsigned long __gu_val; \
10564 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
10565- (x) = (__force __typeof__(*(ptr)))__gu_val; \
10566+ (x) = (__typeof__(*(ptr)))__gu_val; \
10567 __gu_err; \
10568 })
10569
10570 /* FIXME: this hack is definitely wrong -AK */
10571 struct __large_struct { unsigned long buf[100]; };
10572-#define __m(x) (*(struct __large_struct __user *)(x))
10573+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10574+#define ____m(x) \
10575+({ \
10576+ unsigned long ____x = (unsigned long)(x); \
10577+ if (____x < PAX_USER_SHADOW_BASE) \
10578+ ____x += PAX_USER_SHADOW_BASE; \
10579+ (void __user *)____x; \
10580+})
10581+#else
10582+#define ____m(x) (x)
10583+#endif
10584+#define __m(x) (*(struct __large_struct __user *)____m(x))
10585
10586 /*
10587 * Tell gcc we read from memory instead of writing: this is because
10588@@ -437,7 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
10589 * aliasing issues.
10590 */
10591 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10592- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
10593+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
10594 "2:\n" \
10595 ".section .fixup,\"ax\"\n" \
10596 "3: mov %3,%0\n" \
10597@@ -445,10 +498,10 @@ struct __large_struct { unsigned long buf[100]; };
10598 ".previous\n" \
10599 _ASM_EXTABLE(1b, 3b) \
10600 : "=r"(err) \
10601- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
10602+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
10603
10604 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
10605- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
10606+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
10607 "2:\n" \
10608 _ASM_EXTABLE(1b, 2b - 1b) \
10609 : : ltype(x), "m" (__m(addr)))
10610@@ -487,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
10611 * On error, the variable @x is set to zero.
10612 */
10613
10614+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10615+#define __get_user(x, ptr) get_user((x), (ptr))
10616+#else
10617 #define __get_user(x, ptr) \
10618 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
10619+#endif
10620
10621 /**
10622 * __put_user: - Write a simple value into user space, with less checking.
10623@@ -510,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
10624 * Returns zero on success, or -EFAULT on error.
10625 */
10626
10627+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10628+#define __put_user(x, ptr) put_user((x), (ptr))
10629+#else
10630 #define __put_user(x, ptr) \
10631 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
10632+#endif
10633
10634 #define __get_user_unaligned __get_user
10635 #define __put_user_unaligned __put_user
10636@@ -529,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
10637 #define get_user_ex(x, ptr) do { \
10638 unsigned long __gue_val; \
10639 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
10640- (x) = (__force __typeof__(*(ptr)))__gue_val; \
10641+ (x) = (__typeof__(*(ptr)))__gue_val; \
10642 } while (0)
10643
10644 #ifdef CONFIG_X86_WP_WORKS_OK
10645diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
10646index 566e803..89f1e60 100644
10647--- a/arch/x86/include/asm/uaccess_32.h
10648+++ b/arch/x86/include/asm/uaccess_32.h
10649@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
58c5fc13
MT
10650 static __always_inline unsigned long __must_check
10651 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10652 {
66a7e928
MT
10653+ pax_track_stack();
10654+
58c5fc13
MT
10655+ if ((long)n < 0)
10656+ return n;
10657+
10658 if (__builtin_constant_p(n)) {
10659 unsigned long ret;
10660
fe2de317 10661@@ -61,6 +66,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
58c5fc13
MT
10662 return ret;
10663 }
10664 }
10665+ if (!__builtin_constant_p(n))
10666+ check_object_size(from, n, true);
10667 return __copy_to_user_ll(to, from, n);
10668 }
10669
fe2de317 10670@@ -82,12 +89,16 @@ static __always_inline unsigned long __must_check
66a7e928
MT
10671 __copy_to_user(void __user *to, const void *from, unsigned long n)
10672 {
10673 might_fault();
10674+
10675 return __copy_to_user_inatomic(to, from, n);
10676 }
10677
58c5fc13
MT
10678 static __always_inline unsigned long
10679 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
10680 {
10681+ if ((long)n < 0)
10682+ return n;
10683+
10684 /* Avoid zeroing the tail if the copy fails..
10685 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
10686 * but as the zeroing behaviour is only significant when n is not
15a11c5b 10687@@ -137,6 +148,12 @@ static __always_inline unsigned long
58c5fc13
MT
10688 __copy_from_user(void *to, const void __user *from, unsigned long n)
10689 {
10690 might_fault();
10691+
66a7e928
MT
10692+ pax_track_stack();
10693+
58c5fc13
MT
10694+ if ((long)n < 0)
10695+ return n;
10696+
10697 if (__builtin_constant_p(n)) {
10698 unsigned long ret;
10699
fe2de317 10700@@ -152,6 +169,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
58c5fc13
MT
10701 return ret;
10702 }
10703 }
10704+ if (!__builtin_constant_p(n))
10705+ check_object_size(to, n, false);
10706 return __copy_from_user_ll(to, from, n);
10707 }
10708
fe2de317 10709@@ -159,6 +178,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
58c5fc13
MT
10710 const void __user *from, unsigned long n)
10711 {
10712 might_fault();
10713+
10714+ if ((long)n < 0)
10715+ return n;
10716+
10717 if (__builtin_constant_p(n)) {
10718 unsigned long ret;
10719
15a11c5b 10720@@ -181,15 +204,19 @@ static __always_inline unsigned long
58c5fc13
MT
10721 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
10722 unsigned long n)
10723 {
10724- return __copy_from_user_ll_nocache_nozero(to, from, n);
10725+ if ((long)n < 0)
10726+ return n;
fe2de317
MT
10727+
10728+ return __copy_from_user_ll_nocache_nozero(to, from, n);
10729 }
ae4e228f
MT
10730
10731-unsigned long __must_check copy_to_user(void __user *to,
10732- const void *from, unsigned long n);
10733-unsigned long __must_check _copy_from_user(void *to,
10734- const void __user *from,
10735- unsigned long n);
fe2de317 10736-
ae4e228f
MT
10737+extern void copy_to_user_overflow(void)
10738+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
10739+ __compiletime_error("copy_to_user() buffer size is not provably correct")
10740+#else
10741+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
10742+#endif
10743+;
10744
10745 extern void copy_from_user_overflow(void)
10746 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
fe2de317 10747@@ -199,17 +226,61 @@ extern void copy_from_user_overflow(void)
ae4e228f
MT
10748 #endif
10749 ;
10750
10751-static inline unsigned long __must_check copy_from_user(void *to,
10752- const void __user *from,
10753- unsigned long n)
58c5fc13
MT
10754+/**
10755+ * copy_to_user: - Copy a block of data into user space.
10756+ * @to: Destination address, in user space.
10757+ * @from: Source address, in kernel space.
10758+ * @n: Number of bytes to copy.
10759+ *
10760+ * Context: User context only. This function may sleep.
10761+ *
10762+ * Copy data from kernel space to user space.
10763+ *
10764+ * Returns number of bytes that could not be copied.
10765+ * On success, this will be zero.
10766+ */
ae4e228f 10767+static inline unsigned long __must_check
58c5fc13
MT
10768+copy_to_user(void __user *to, const void *from, unsigned long n)
10769+{
ae4e228f
MT
10770+ int sz = __compiletime_object_size(from);
10771+
10772+ if (unlikely(sz != -1 && sz < n))
10773+ copy_to_user_overflow();
10774+ else if (access_ok(VERIFY_WRITE, to, n))
58c5fc13
MT
10775+ n = __copy_to_user(to, from, n);
10776+ return n;
10777+}
10778+
10779+/**
10780+ * copy_from_user: - Copy a block of data from user space.
10781+ * @to: Destination address, in kernel space.
10782+ * @from: Source address, in user space.
10783+ * @n: Number of bytes to copy.
10784+ *
10785+ * Context: User context only. This function may sleep.
10786+ *
10787+ * Copy data from user space to kernel space.
10788+ *
10789+ * Returns number of bytes that could not be copied.
10790+ * On success, this will be zero.
10791+ *
10792+ * If some data could not be copied, this function will pad the copied
10793+ * data to the requested size using zero bytes.
10794+ */
ae4e228f 10795+static inline unsigned long __must_check
58c5fc13 10796+copy_from_user(void *to, const void __user *from, unsigned long n)
ae4e228f
MT
10797 {
10798 int sz = __compiletime_object_size(to);
10799
10800- if (likely(sz == -1 || sz >= n))
10801- n = _copy_from_user(to, from, n);
10802- else
10803+ if (unlikely(sz != -1 && sz < n))
10804 copy_from_user_overflow();
10805-
10806+ else if (access_ok(VERIFY_READ, from, n))
58c5fc13
MT
10807+ n = __copy_from_user(to, from, n);
10808+ else if ((long)n > 0) {
10809+ if (!__builtin_constant_p(n))
10810+ check_object_size(to, n, false);
10811+ memset(to, 0, n);
10812+ }
ae4e228f 10813 return n;
58c5fc13
MT
10814 }
10815
fe2de317
MT
10816diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
10817index 1c66d30..59bd7d4 100644
10818--- a/arch/x86/include/asm/uaccess_64.h
10819+++ b/arch/x86/include/asm/uaccess_64.h
15a11c5b 10820@@ -10,6 +10,9 @@
df50ba0c
MT
10821 #include <asm/alternative.h>
10822 #include <asm/cpufeature.h>
58c5fc13 10823 #include <asm/page.h>
df50ba0c
MT
10824+#include <asm/pgtable.h>
10825+
58c5fc13 10826+#define set_fs(x) (current_thread_info()->addr_limit = (x))
df50ba0c 10827
58c5fc13
MT
10828 /*
10829 * Copy To/From Userspace
fe2de317
MT
10830@@ -17,12 +20,12 @@
10831
10832 /* Handles exceptions in both to and from, but doesn't do access_ok */
10833 __must_check unsigned long
10834-copy_user_generic_string(void *to, const void *from, unsigned len);
10835+copy_user_generic_string(void *to, const void *from, unsigned long len);
10836 __must_check unsigned long
10837-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
10838+copy_user_generic_unrolled(void *to, const void *from, unsigned long len);
10839
10840 static __always_inline __must_check unsigned long
10841-copy_user_generic(void *to, const void *from, unsigned len)
10842+copy_user_generic(void *to, const void *from, unsigned long len)
10843 {
10844 unsigned ret;
10845
10846@@ -36,138 +39,226 @@ copy_user_generic(void *to, const void *from, unsigned len)
df50ba0c
MT
10847 return ret;
10848 }
10849
ae4e228f 10850+static __always_inline __must_check unsigned long
fe2de317 10851+__copy_to_user(void __user *to, const void *from, unsigned long len);
ae4e228f 10852+static __always_inline __must_check unsigned long
fe2de317 10853+__copy_from_user(void *to, const void __user *from, unsigned long len);
ae4e228f 10854 __must_check unsigned long
fe2de317
MT
10855-_copy_to_user(void __user *to, const void *from, unsigned len);
10856-__must_check unsigned long
10857-_copy_from_user(void *to, const void __user *from, unsigned len);
10858-__must_check unsigned long
10859-copy_in_user(void __user *to, const void __user *from, unsigned len);
10860+copy_in_user(void __user *to, const void __user *from, unsigned long len);
58c5fc13 10861
ae4e228f
MT
10862 static inline unsigned long __must_check copy_from_user(void *to,
10863 const void __user *from,
10864- unsigned long n)
10865+ unsigned n)
10866 {
10867- int sz = __compiletime_object_size(to);
10868-
10869 might_fault();
10870- if (likely(sz == -1 || sz >= n))
10871- n = _copy_from_user(to, from, n);
10872-#ifdef CONFIG_DEBUG_VM
10873- else
10874- WARN(1, "Buffer overflow detected!\n");
10875-#endif
10876+
10877+ if (access_ok(VERIFY_READ, from, n))
10878+ n = __copy_from_user(to, from, n);
fe2de317 10879+ else if (n < INT_MAX) {
ae4e228f
MT
10880+ if (!__builtin_constant_p(n))
10881+ check_object_size(to, n, false);
10882+ memset(to, 0, n);
10883+ }
10884 return n;
10885 }
10886
fe2de317
MT
10887 static __always_inline __must_check
10888-int copy_to_user(void __user *dst, const void *src, unsigned size)
10889+int copy_to_user(void __user *dst, const void *src, unsigned long size)
ae4e228f
MT
10890 {
10891 might_fault();
10892
10893- return _copy_to_user(dst, src, size);
10894+ if (access_ok(VERIFY_WRITE, dst, size))
10895+ size = __copy_to_user(dst, src, size);
10896+ return size;
10897 }
10898
58c5fc13
MT
10899 static __always_inline __must_check
10900-int __copy_from_user(void *dst, const void __user *src, unsigned size)
fe2de317 10901+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
58c5fc13
MT
10902 {
10903- int ret = 0;
ae4e228f 10904+ int sz = __compiletime_object_size(dst);
58c5fc13
MT
10905+ unsigned ret = 0;
10906
10907 might_fault();
10908- if (!__builtin_constant_p(size))
bc901d79 10909- return copy_user_generic(dst, (__force void *)src, size);
58c5fc13 10910+
66a7e928
MT
10911+ pax_track_stack();
10912+
fe2de317 10913+ if (size > INT_MAX)
58c5fc13
MT
10914+ return size;
10915+
bc901d79
MT
10916+#ifdef CONFIG_PAX_MEMORY_UDEREF
10917+ if (!__access_ok(VERIFY_READ, src, size))
10918+ return size;
10919+#endif
10920+
ae4e228f
MT
10921+ if (unlikely(sz != -1 && sz < size)) {
10922+#ifdef CONFIG_DEBUG_VM
10923+ WARN(1, "Buffer overflow detected!\n");
10924+#endif
10925+ return size;
10926+ }
10927+
58c5fc13
MT
10928+ if (!__builtin_constant_p(size)) {
10929+ check_object_size(dst, size, false);
8308f9c9
MT
10930+
10931+#ifdef CONFIG_PAX_MEMORY_UDEREF
df50ba0c
MT
10932+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10933+ src += PAX_USER_SHADOW_BASE;
8308f9c9
MT
10934+#endif
10935+
6e9df6a3 10936+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
58c5fc13
MT
10937+ }
10938 switch (size) {
bc901d79
MT
10939- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
10940+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
58c5fc13 10941 ret, "b", "b", "=q", 1);
bc901d79
MT
10942 return ret;
10943- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
10944+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
10945 ret, "w", "w", "=r", 2);
10946 return ret;
10947- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
10948+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
10949 ret, "l", "k", "=r", 4);
10950 return ret;
10951- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
10952+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10953 ret, "q", "", "=r", 8);
10954 return ret;
10955 case 10:
10956- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10957+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10958 ret, "q", "", "=r", 10);
10959 if (unlikely(ret))
10960 return ret;
10961 __get_user_asm(*(u16 *)(8 + (char *)dst),
10962- (u16 __user *)(8 + (char __user *)src),
10963+ (const u16 __user *)(8 + (const char __user *)src),
10964 ret, "w", "w", "=r", 2);
10965 return ret;
10966 case 16:
10967- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10968+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10969 ret, "q", "", "=r", 16);
10970 if (unlikely(ret))
10971 return ret;
10972 __get_user_asm(*(u64 *)(8 + (char *)dst),
10973- (u64 __user *)(8 + (char __user *)src),
10974+ (const u64 __user *)(8 + (const char __user *)src),
df50ba0c
MT
10975 ret, "q", "", "=r", 8);
10976 return ret;
10977 default:
bc901d79 10978- return copy_user_generic(dst, (__force void *)src, size);
8308f9c9
MT
10979+
10980+#ifdef CONFIG_PAX_MEMORY_UDEREF
df50ba0c
MT
10981+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10982+ src += PAX_USER_SHADOW_BASE;
8308f9c9
MT
10983+#endif
10984+
6e9df6a3 10985+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
df50ba0c 10986 }
58c5fc13
MT
10987 }
10988
10989 static __always_inline __must_check
10990-int __copy_to_user(void __user *dst, const void *src, unsigned size)
fe2de317 10991+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
58c5fc13
MT
10992 {
10993- int ret = 0;
ae4e228f 10994+ int sz = __compiletime_object_size(src);
58c5fc13
MT
10995+ unsigned ret = 0;
10996
10997 might_fault();
10998- if (!__builtin_constant_p(size))
6e9df6a3 10999- return copy_user_generic((__force void *)dst, src, size);
58c5fc13 11000+
66a7e928
MT
11001+ pax_track_stack();
11002+
fe2de317 11003+ if (size > INT_MAX)
58c5fc13
MT
11004+ return size;
11005+
bc901d79
MT
11006+#ifdef CONFIG_PAX_MEMORY_UDEREF
11007+ if (!__access_ok(VERIFY_WRITE, dst, size))
11008+ return size;
11009+#endif
11010+
ae4e228f
MT
11011+ if (unlikely(sz != -1 && sz < size)) {
11012+#ifdef CONFIG_DEBUG_VM
11013+ WARN(1, "Buffer overflow detected!\n");
11014+#endif
11015+ return size;
11016+ }
11017+
58c5fc13
MT
11018+ if (!__builtin_constant_p(size)) {
11019+ check_object_size(src, size, true);
8308f9c9
MT
11020+
11021+#ifdef CONFIG_PAX_MEMORY_UDEREF
df50ba0c
MT
11022+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11023+ dst += PAX_USER_SHADOW_BASE;
8308f9c9
MT
11024+#endif
11025+
6e9df6a3 11026+ return copy_user_generic((__force_kernel void *)dst, src, size);
58c5fc13
MT
11027+ }
11028 switch (size) {
bc901d79
MT
11029- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
11030+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
58c5fc13 11031 ret, "b", "b", "iq", 1);
bc901d79
MT
11032 return ret;
11033- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
11034+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
11035 ret, "w", "w", "ir", 2);
11036 return ret;
11037- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
11038+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
11039 ret, "l", "k", "ir", 4);
11040 return ret;
11041- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
11042+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11043 ret, "q", "", "er", 8);
11044 return ret;
11045 case 10:
11046- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11047+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11048 ret, "q", "", "er", 10);
11049 if (unlikely(ret))
11050 return ret;
11051 asm("":::"memory");
11052- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
11053+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
11054 ret, "w", "w", "ir", 2);
11055 return ret;
11056 case 16:
11057- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11058+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11059 ret, "q", "", "er", 16);
11060 if (unlikely(ret))
11061 return ret;
11062 asm("":::"memory");
11063- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
11064+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
df50ba0c
MT
11065 ret, "q", "", "er", 8);
11066 return ret;
11067 default:
6e9df6a3 11068- return copy_user_generic((__force void *)dst, src, size);
8308f9c9
MT
11069+
11070+#ifdef CONFIG_PAX_MEMORY_UDEREF
df50ba0c
MT
11071+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11072+ dst += PAX_USER_SHADOW_BASE;
8308f9c9
MT
11073+#endif
11074+
6e9df6a3 11075+ return copy_user_generic((__force_kernel void *)dst, src, size);
df50ba0c 11076 }
58c5fc13
MT
11077 }
11078
11079 static __always_inline __must_check
11080-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
fe2de317 11081+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
58c5fc13
MT
11082 {
11083- int ret = 0;
58c5fc13
MT
11084+ unsigned ret = 0;
11085
11086 might_fault();
df50ba0c 11087- if (!__builtin_constant_p(size))
6e9df6a3
MT
11088- return copy_user_generic((__force void *)dst,
11089- (__force void *)src, size);
58c5fc13 11090+
fe2de317 11091+ if (size > INT_MAX)
58c5fc13
MT
11092+ return size;
11093+
bc901d79
MT
11094+#ifdef CONFIG_PAX_MEMORY_UDEREF
11095+ if (!__access_ok(VERIFY_READ, src, size))
11096+ return size;
11097+ if (!__access_ok(VERIFY_WRITE, dst, size))
11098+ return size;
11099+#endif
11100+
df50ba0c 11101+ if (!__builtin_constant_p(size)) {
8308f9c9
MT
11102+
11103+#ifdef CONFIG_PAX_MEMORY_UDEREF
df50ba0c
MT
11104+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11105+ src += PAX_USER_SHADOW_BASE;
11106+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11107+ dst += PAX_USER_SHADOW_BASE;
8308f9c9
MT
11108+#endif
11109+
6e9df6a3
MT
11110+ return copy_user_generic((__force_kernel void *)dst,
11111+ (__force_kernel const void *)src, size);
df50ba0c
MT
11112+ }
11113 switch (size) {
11114 case 1: {
11115 u8 tmp;
bc901d79
MT
11116- __get_user_asm(tmp, (u8 __user *)src,
11117+ __get_user_asm(tmp, (const u8 __user *)src,
11118 ret, "b", "b", "=q", 1);
11119 if (likely(!ret))
11120 __put_user_asm(tmp, (u8 __user *)dst,
fe2de317 11121@@ -176,7 +267,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
bc901d79
MT
11122 }
11123 case 2: {
11124 u16 tmp;
11125- __get_user_asm(tmp, (u16 __user *)src,
11126+ __get_user_asm(tmp, (const u16 __user *)src,
11127 ret, "w", "w", "=r", 2);
11128 if (likely(!ret))
11129 __put_user_asm(tmp, (u16 __user *)dst,
fe2de317 11130@@ -186,7 +277,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
bc901d79
MT
11131
11132 case 4: {
11133 u32 tmp;
11134- __get_user_asm(tmp, (u32 __user *)src,
11135+ __get_user_asm(tmp, (const u32 __user *)src,
11136 ret, "l", "k", "=r", 4);
11137 if (likely(!ret))
11138 __put_user_asm(tmp, (u32 __user *)dst,
fe2de317 11139@@ -195,7 +286,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
bc901d79
MT
11140 }
11141 case 8: {
11142 u64 tmp;
11143- __get_user_asm(tmp, (u64 __user *)src,
11144+ __get_user_asm(tmp, (const u64 __user *)src,
11145 ret, "q", "", "=r", 8);
11146 if (likely(!ret))
11147 __put_user_asm(tmp, (u64 __user *)dst,
fe2de317 11148@@ -203,8 +294,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
df50ba0c
MT
11149 return ret;
11150 }
11151 default:
6e9df6a3
MT
11152- return copy_user_generic((__force void *)dst,
11153- (__force void *)src, size);
8308f9c9
MT
11154+
11155+#ifdef CONFIG_PAX_MEMORY_UDEREF
df50ba0c
MT
11156+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11157+ src += PAX_USER_SHADOW_BASE;
11158+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11159+ dst += PAX_USER_SHADOW_BASE;
8308f9c9
MT
11160+#endif
11161+
6e9df6a3
MT
11162+ return copy_user_generic((__force_kernel void *)dst,
11163+ (__force_kernel const void *)src, size);
df50ba0c 11164 }
bc901d79
MT
11165 }
11166
fe2de317
MT
11167@@ -219,35 +318,74 @@ __must_check unsigned long clear_user(void __user *mem, unsigned long len);
11168 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
11169
df50ba0c 11170 static __must_check __always_inline int
fe2de317
MT
11171-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
11172+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
df50ba0c 11173 {
6e9df6a3 11174- return copy_user_generic(dst, (__force const void *)src, size);
66a7e928
MT
11175+ pax_track_stack();
11176+
fe2de317 11177+ if (size > INT_MAX)
bc901d79
MT
11178+ return size;
11179+
11180+#ifdef CONFIG_PAX_MEMORY_UDEREF
11181+ if (!__access_ok(VERIFY_READ, src, size))
11182+ return size;
bc901d79 11183+
df50ba0c
MT
11184+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11185+ src += PAX_USER_SHADOW_BASE;
8308f9c9
MT
11186+#endif
11187+
6e9df6a3 11188+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
ae4e228f 11189 }
58c5fc13
MT
11190
11191-static __must_check __always_inline int
fe2de317 11192-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
58c5fc13 11193+static __must_check __always_inline unsigned long
fe2de317 11194+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
58c5fc13 11195 {
6e9df6a3 11196- return copy_user_generic((__force void *)dst, src, size);
fe2de317 11197+ if (size > INT_MAX)
58c5fc13
MT
11198+ return size;
11199+
bc901d79
MT
11200+#ifdef CONFIG_PAX_MEMORY_UDEREF
11201+ if (!__access_ok(VERIFY_WRITE, dst, size))
11202+ return size;
bc901d79 11203+
df50ba0c
MT
11204+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11205+ dst += PAX_USER_SHADOW_BASE;
8308f9c9
MT
11206+#endif
11207+
6e9df6a3 11208+ return copy_user_generic((__force_kernel void *)dst, src, size);
58c5fc13
MT
11209 }
11210
11211-extern long __copy_user_nocache(void *dst, const void __user *src,
fe2de317 11212- unsigned size, int zerorest);
58c5fc13 11213+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
fe2de317 11214+ unsigned long size, int zerorest);
58c5fc13
MT
11215
11216-static inline int
11217-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
fe2de317 11218+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
58c5fc13
MT
11219 {
11220 might_sleep();
11221+
fe2de317 11222+ if (size > INT_MAX)
58c5fc13 11223+ return size;
bc901d79
MT
11224+
11225+#ifdef CONFIG_PAX_MEMORY_UDEREF
11226+ if (!__access_ok(VERIFY_READ, src, size))
11227+ return size;
11228+#endif
58c5fc13
MT
11229+
11230 return __copy_user_nocache(dst, src, size, 1);
11231 }
11232
11233-static inline int
11234-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
fe2de317 11235- unsigned size)
58c5fc13 11236+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
fe2de317 11237+ unsigned long size)
58c5fc13 11238 {
fe2de317 11239+ if (size > INT_MAX)
58c5fc13 11240+ return size;
bc901d79
MT
11241+
11242+#ifdef CONFIG_PAX_MEMORY_UDEREF
11243+ if (!__access_ok(VERIFY_READ, src, size))
11244+ return size;
11245+#endif
58c5fc13
MT
11246+
11247 return __copy_user_nocache(dst, src, size, 0);
11248 }
11249
11250-unsigned long
6e9df6a3 11251-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
58c5fc13 11252+extern unsigned long
fe2de317 11253+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest);
58c5fc13
MT
11254
11255 #endif /* _ASM_X86_UACCESS_64_H */
fe2de317
MT
11256diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
11257index bb05228..d763d5b 100644
11258--- a/arch/x86/include/asm/vdso.h
11259+++ b/arch/x86/include/asm/vdso.h
6e9df6a3
MT
11260@@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
11261 #define VDSO32_SYMBOL(base, name) \
11262 ({ \
11263 extern const char VDSO32_##name[]; \
11264- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
11265+ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
11266 })
11267 #endif
11268
fe2de317
MT
11269diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
11270index d3d8590..d296b5f 100644
11271--- a/arch/x86/include/asm/x86_init.h
11272+++ b/arch/x86/include/asm/x86_init.h
15a11c5b
MT
11273@@ -28,7 +28,7 @@ struct x86_init_mpparse {
11274 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
11275 void (*find_smp_config)(void);
11276 void (*get_smp_config)(unsigned int early);
11277-};
11278+} __no_const;
57199397 11279
15a11c5b
MT
11280 /**
11281 * struct x86_init_resources - platform specific resource related ops
11282@@ -42,7 +42,7 @@ struct x86_init_resources {
11283 void (*probe_roms)(void);
11284 void (*reserve_resources)(void);
11285 char *(*memory_setup)(void);
11286-};
11287+} __no_const;
58c5fc13 11288
15a11c5b
MT
11289 /**
11290 * struct x86_init_irqs - platform specific interrupt setup
11291@@ -55,7 +55,7 @@ struct x86_init_irqs {
11292 void (*pre_vector_init)(void);
11293 void (*intr_init)(void);
11294 void (*trap_init)(void);
11295-};
11296+} __no_const;
58c5fc13 11297
15a11c5b
MT
11298 /**
11299 * struct x86_init_oem - oem platform specific customizing functions
11300@@ -65,7 +65,7 @@ struct x86_init_irqs {
11301 struct x86_init_oem {
11302 void (*arch_setup)(void);
11303 void (*banner)(void);
11304-};
11305+} __no_const;
58c5fc13 11306
15a11c5b
MT
11307 /**
11308 * struct x86_init_mapping - platform specific initial kernel pagetable setup
11309@@ -76,7 +76,7 @@ struct x86_init_oem {
11310 */
11311 struct x86_init_mapping {
11312 void (*pagetable_reserve)(u64 start, u64 end);
11313-};
11314+} __no_const;
58c5fc13 11315
15a11c5b
MT
11316 /**
11317 * struct x86_init_paging - platform specific paging functions
11318@@ -86,7 +86,7 @@ struct x86_init_mapping {
11319 struct x86_init_paging {
11320 void (*pagetable_setup_start)(pgd_t *base);
11321 void (*pagetable_setup_done)(pgd_t *base);
11322-};
11323+} __no_const;
58c5fc13 11324
15a11c5b
MT
11325 /**
11326 * struct x86_init_timers - platform specific timer setup
11327@@ -101,7 +101,7 @@ struct x86_init_timers {
11328 void (*tsc_pre_init)(void);
11329 void (*timer_init)(void);
11330 void (*wallclock_init)(void);
11331-};
11332+} __no_const;
58c5fc13 11333
15a11c5b
MT
11334 /**
11335 * struct x86_init_iommu - platform specific iommu setup
11336@@ -109,7 +109,7 @@ struct x86_init_timers {
11337 */
11338 struct x86_init_iommu {
11339 int (*iommu_init)(void);
11340-};
11341+} __no_const;
58c5fc13 11342
15a11c5b
MT
11343 /**
11344 * struct x86_init_pci - platform specific pci init functions
11345@@ -123,7 +123,7 @@ struct x86_init_pci {
11346 int (*init)(void);
11347 void (*init_irq)(void);
11348 void (*fixup_irqs)(void);
11349-};
11350+} __no_const;
58c5fc13 11351
15a11c5b
MT
11352 /**
11353 * struct x86_init_ops - functions for platform specific setup
11354@@ -139,7 +139,7 @@ struct x86_init_ops {
11355 struct x86_init_timers timers;
11356 struct x86_init_iommu iommu;
11357 struct x86_init_pci pci;
11358-};
11359+} __no_const;
66a7e928 11360
15a11c5b
MT
11361 /**
11362 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
11363@@ -147,7 +147,7 @@ struct x86_init_ops {
11364 */
11365 struct x86_cpuinit_ops {
11366 void (*setup_percpu_clockev)(void);
11367-};
11368+} __no_const;
66a7e928 11369
15a11c5b
MT
11370 /**
11371 * struct x86_platform_ops - platform specific runtime functions
11372@@ -166,7 +166,7 @@ struct x86_platform_ops {
11373 bool (*is_untracked_pat_range)(u64 start, u64 end);
11374 void (*nmi_init)(void);
11375 int (*i8042_detect)(void);
11376-};
11377+} __no_const;
11378
11379 struct pci_dev;
11380
11381@@ -174,7 +174,7 @@ struct x86_msi_ops {
11382 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
11383 void (*teardown_msi_irq)(unsigned int irq);
11384 void (*teardown_msi_irqs)(struct pci_dev *dev);
11385-};
11386+} __no_const;
11387
11388 extern struct x86_init_ops x86_init;
11389 extern struct x86_cpuinit_ops x86_cpuinit;
fe2de317
MT
11390diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
11391index c6ce245..ffbdab7 100644
11392--- a/arch/x86/include/asm/xsave.h
11393+++ b/arch/x86/include/asm/xsave.h
11394@@ -65,6 +65,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
df50ba0c
MT
11395 {
11396 int err;
6892158b 11397
df50ba0c
MT
11398+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11399+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
11400+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
11401+#endif
11402+
6892158b
MT
11403 /*
11404 * Clear the xsave header first, so that reserved fields are
11405 * initialized to zero.
fe2de317 11406@@ -96,10 +101,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
6e9df6a3
MT
11407 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
11408 {
11409 int err;
11410- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
11411+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
df50ba0c
MT
11412 u32 lmask = mask;
11413 u32 hmask = mask >> 32;
11414
11415+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11416+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
11417+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
11418+#endif
11419+
11420 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
11421 "2:\n"
11422 ".section .fixup,\"ax\"\n"
fe2de317
MT
11423diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
11424index 6a564ac..9b1340c 100644
11425--- a/arch/x86/kernel/acpi/realmode/Makefile
11426+++ b/arch/x86/kernel/acpi/realmode/Makefile
11427@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
15a11c5b
MT
11428 $(call cc-option, -fno-stack-protector) \
11429 $(call cc-option, -mpreferred-stack-boundary=2)
11430 KBUILD_CFLAGS += $(call cc-option, -m32)
11431+ifdef CONSTIFY_PLUGIN
11432+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
11433+endif
11434 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
11435 GCOV_PROFILE := n
11436
fe2de317
MT
11437diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
11438index b4fd836..4358fe3 100644
11439--- a/arch/x86/kernel/acpi/realmode/wakeup.S
11440+++ b/arch/x86/kernel/acpi/realmode/wakeup.S
15a11c5b
MT
11441@@ -108,6 +108,9 @@ wakeup_code:
11442 /* Do any other stuff... */
11443
11444 #ifndef CONFIG_64BIT
11445+ /* Recheck NX bit overrides (64bit path does this in trampoline */
11446+ call verify_cpu
11447+
11448 /* This could also be done in C code... */
11449 movl pmode_cr3, %eax
11450 movl %eax, %cr3
11451@@ -131,6 +134,7 @@ wakeup_code:
11452 movl pmode_cr0, %eax
11453 movl %eax, %cr0
11454 jmp pmode_return
11455+# include "../../verify_cpu.S"
11456 #else
11457 pushw $0
11458 pushw trampoline_segment
fe2de317
MT
11459diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
11460index 103b6ab..2004d0a 100644
11461--- a/arch/x86/kernel/acpi/sleep.c
11462+++ b/arch/x86/kernel/acpi/sleep.c
15a11c5b 11463@@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
66a7e928 11464 header->trampoline_segment = trampoline_address() >> 4;
58c5fc13 11465 #ifdef CONFIG_SMP
16454cff 11466 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
58c5fc13 11467+
ae4e228f 11468+ pax_open_kernel();
58c5fc13
MT
11469 early_gdt_descr.address =
11470 (unsigned long)get_cpu_gdt_table(smp_processor_id());
ae4e228f 11471+ pax_close_kernel();
58c5fc13
MT
11472+
11473 initial_gs = per_cpu_offset(smp_processor_id());
11474 #endif
11475 initial_code = (unsigned long)wakeup_long64;
fe2de317
MT
11476diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
11477index 13ab720..95d5442 100644
11478--- a/arch/x86/kernel/acpi/wakeup_32.S
11479+++ b/arch/x86/kernel/acpi/wakeup_32.S
58c5fc13
MT
11480@@ -30,13 +30,11 @@ wakeup_pmode_return:
11481 # and restore the stack ... but you need gdt for this to work
11482 movl saved_context_esp, %esp
11483
11484- movl %cs:saved_magic, %eax
11485- cmpl $0x12345678, %eax
11486+ cmpl $0x12345678, saved_magic
11487 jne bogus_magic
11488
11489 # jump to place where we left off
11490- movl saved_eip, %eax
11491- jmp *%eax
11492+ jmp *(saved_eip)
11493
11494 bogus_magic:
11495 jmp bogus_magic
fe2de317
MT
11496diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
11497index c638228..16dfa8d 100644
11498--- a/arch/x86/kernel/alternative.c
11499+++ b/arch/x86/kernel/alternative.c
11500@@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
6e9df6a3
MT
11501 */
11502 for (a = start; a < end; a++) {
11503 instr = (u8 *)&a->instr_offset + a->instr_offset;
11504+
11505+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11506+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11507+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
11508+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11509+#endif
11510+
11511 replacement = (u8 *)&a->repl_offset + a->repl_offset;
11512 BUG_ON(a->replacementlen > a->instrlen);
11513 BUG_ON(a->instrlen > sizeof(insnbuf));
fe2de317 11514@@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
6e9df6a3
MT
11515 for (poff = start; poff < end; poff++) {
11516 u8 *ptr = (u8 *)poff + *poff;
11517
11518+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11519+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11520+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
11521+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11522+#endif
11523+
57199397
MT
11524 if (!*poff || ptr < text || ptr >= text_end)
11525 continue;
11526 /* turn DS segment override prefix into lock prefix */
11527- if (*ptr == 0x3e)
11528+ if (*ktla_ktva(ptr) == 0x3e)
11529 text_poke(ptr, ((unsigned char []){0xf0}), 1);
11530 };
11531 mutex_unlock(&text_mutex);
fe2de317 11532@@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
6e9df6a3
MT
11533 for (poff = start; poff < end; poff++) {
11534 u8 *ptr = (u8 *)poff + *poff;
11535
11536+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11537+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11538+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
11539+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11540+#endif
11541+
57199397
MT
11542 if (!*poff || ptr < text || ptr >= text_end)
11543 continue;
11544 /* turn lock prefix into DS segment override prefix */
11545- if (*ptr == 0xf0)
11546+ if (*ktla_ktva(ptr) == 0xf0)
11547 text_poke(ptr, ((unsigned char []){0x3E}), 1);
11548 };
11549 mutex_unlock(&text_mutex);
fe2de317 11550@@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
58c5fc13
MT
11551
11552 BUG_ON(p->len > MAX_PATCH_LEN);
11553 /* prep the buffer with the original instructions */
11554- memcpy(insnbuf, p->instr, p->len);
11555+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
11556 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
11557 (unsigned long)p->instr, p->len);
11558
fe2de317 11559@@ -568,7 +587,7 @@ void __init alternative_instructions(void)
df50ba0c
MT
11560 if (smp_alt_once)
11561 free_init_pages("SMP alternatives",
11562 (unsigned long)__smp_locks,
11563- (unsigned long)__smp_locks_end);
11564+ PAGE_ALIGN((unsigned long)__smp_locks_end));
11565
11566 restart_nmi();
11567 }
fe2de317 11568@@ -585,13 +604,17 @@ void __init alternative_instructions(void)
58c5fc13
MT
11569 * instructions. And on the local CPU you need to be protected again NMI or MCE
11570 * handlers seeing an inconsistent instruction while you patch.
11571 */
bc901d79
MT
11572-void *__init_or_module text_poke_early(void *addr, const void *opcode,
11573+void *__kprobes text_poke_early(void *addr, const void *opcode,
ae4e228f 11574 size_t len)
58c5fc13
MT
11575 {
11576 unsigned long flags;
58c5fc13
MT
11577 local_irq_save(flags);
11578- memcpy(addr, opcode, len);
11579+
ae4e228f 11580+ pax_open_kernel();
58c5fc13 11581+ memcpy(ktla_ktva(addr), opcode, len);
57199397 11582 sync_core();
ae4e228f 11583+ pax_close_kernel();
58c5fc13 11584+
ae4e228f 11585 local_irq_restore(flags);
58c5fc13 11586 /* Could also do a CLFLUSH here to speed up CPU recovery; but
57199397 11587 that causes hangs on some VIA CPUs. */
fe2de317 11588@@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
58c5fc13
MT
11589 */
11590 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
11591 {
11592- unsigned long flags;
11593- char *vaddr;
11594+ unsigned char *vaddr = ktla_ktva(addr);
11595 struct page *pages[2];
11596- int i;
11597+ size_t i;
58c5fc13 11598
ae4e228f 11599 if (!core_kernel_text((unsigned long)addr)) {
58c5fc13
MT
11600- pages[0] = vmalloc_to_page(addr);
11601- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
58c5fc13
MT
11602+ pages[0] = vmalloc_to_page(vaddr);
11603+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
11604 } else {
11605- pages[0] = virt_to_page(addr);
11606+ pages[0] = virt_to_page(vaddr);
11607 WARN_ON(!PageReserved(pages[0]));
11608- pages[1] = virt_to_page(addr + PAGE_SIZE);
11609+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
11610 }
11611 BUG_ON(!pages[0]);
11612- local_irq_save(flags);
11613- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
11614- if (pages[1])
11615- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
11616- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
11617- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
11618- clear_fixmap(FIX_TEXT_POKE0);
11619- if (pages[1])
11620- clear_fixmap(FIX_TEXT_POKE1);
11621- local_flush_tlb();
11622- sync_core();
11623- /* Could also do a CLFLUSH here to speed up CPU recovery; but
11624- that causes hangs on some VIA CPUs. */
11625+ text_poke_early(addr, opcode, len);
11626 for (i = 0; i < len; i++)
11627- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
11628- local_irq_restore(flags);
bc901d79 11629+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
58c5fc13
MT
11630 return addr;
11631 }
df50ba0c 11632
fe2de317
MT
11633diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
11634index 52fa563..5de9d9c 100644
11635--- a/arch/x86/kernel/apic/apic.c
11636+++ b/arch/x86/kernel/apic/apic.c
6e9df6a3 11637@@ -174,7 +174,7 @@ int first_system_vector = 0xfe;
66a7e928 11638 /*
15a11c5b
MT
11639 * Debug level, exported for io_apic.c
11640 */
11641-unsigned int apic_verbosity;
11642+int apic_verbosity;
ae4e228f 11643
15a11c5b 11644 int pic_mode;
66a7e928 11645
fe2de317 11646@@ -1835,7 +1835,7 @@ void smp_error_interrupt(struct pt_regs *regs)
8308f9c9
MT
11647 apic_write(APIC_ESR, 0);
11648 v1 = apic_read(APIC_ESR);
11649 ack_APIC_irq();
11650- atomic_inc(&irq_err_count);
11651+ atomic_inc_unchecked(&irq_err_count);
11652
15a11c5b
MT
11653 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
11654 smp_processor_id(), v0 , v1);
fe2de317 11655@@ -2209,6 +2209,8 @@ static int __cpuinit apic_cluster_num(void)
66a7e928
MT
11656 u16 *bios_cpu_apicid;
11657 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
11658
11659+ pax_track_stack();
11660+
11661 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
11662 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
11663
fe2de317
MT
11664diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
11665index 8eb863e..32e6934 100644
11666--- a/arch/x86/kernel/apic/io_apic.c
11667+++ b/arch/x86/kernel/apic/io_apic.c
11668@@ -1028,7 +1028,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
ae4e228f
MT
11669 }
11670 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
11671
11672-void lock_vector_lock(void)
11673+void lock_vector_lock(void) __acquires(vector_lock)
11674 {
11675 /* Used to the online set of cpus does not change
11676 * during assign_irq_vector.
15a11c5b 11677@@ -1036,7 +1036,7 @@ void lock_vector_lock(void)
df50ba0c 11678 raw_spin_lock(&vector_lock);
ae4e228f
MT
11679 }
11680
11681-void unlock_vector_lock(void)
11682+void unlock_vector_lock(void) __releases(vector_lock)
11683 {
df50ba0c 11684 raw_spin_unlock(&vector_lock);
ae4e228f 11685 }
fe2de317 11686@@ -2405,7 +2405,7 @@ static void ack_apic_edge(struct irq_data *data)
8308f9c9
MT
11687 ack_APIC_irq();
11688 }
11689
11690-atomic_t irq_mis_count;
11691+atomic_unchecked_t irq_mis_count;
11692
11693 /*
11694 * IO-APIC versions below 0x20 don't support EOI register.
fe2de317 11695@@ -2513,7 +2513,7 @@ static void ack_apic_level(struct irq_data *data)
8308f9c9
MT
11696 * at the cpu.
11697 */
11698 if (!(v & (1 << (i & 0x1f)))) {
11699- atomic_inc(&irq_mis_count);
11700+ atomic_inc_unchecked(&irq_mis_count);
11701
11702 eoi_ioapic_irq(irq, cfg);
11703 }
fe2de317
MT
11704diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
11705index 0371c48..54cdf63 100644
11706--- a/arch/x86/kernel/apm_32.c
11707+++ b/arch/x86/kernel/apm_32.c
15a11c5b 11708@@ -413,7 +413,7 @@ static DEFINE_MUTEX(apm_mutex);
ae4e228f
MT
11709 * This is for buggy BIOS's that refer to (real mode) segment 0x40
11710 * even though they are called in protected mode.
11711 */
11712-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
11713+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
11714 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
11715
11716 static const char driver_version[] = "1.16ac"; /* no spaces */
15a11c5b 11717@@ -591,7 +591,10 @@ static long __apm_bios_call(void *_call)
58c5fc13
MT
11718 BUG_ON(cpu != 0);
11719 gdt = get_cpu_gdt_table(cpu);
11720 save_desc_40 = gdt[0x40 / 8];
11721+
ae4e228f 11722+ pax_open_kernel();
58c5fc13 11723 gdt[0x40 / 8] = bad_bios_desc;
ae4e228f 11724+ pax_close_kernel();
58c5fc13 11725
58c5fc13
MT
11726 apm_irq_save(flags);
11727 APM_DO_SAVE_SEGS;
15a11c5b 11728@@ -600,7 +603,11 @@ static long __apm_bios_call(void *_call)
58c5fc13
MT
11729 &call->esi);
11730 APM_DO_RESTORE_SEGS;
11731 apm_irq_restore(flags);
11732+
ae4e228f 11733+ pax_open_kernel();
58c5fc13 11734 gdt[0x40 / 8] = save_desc_40;
ae4e228f 11735+ pax_close_kernel();
58c5fc13
MT
11736+
11737 put_cpu();
11738
11739 return call->eax & 0xff;
fe2de317 11740@@ -667,7 +674,10 @@ static long __apm_bios_call_simple(void *_call)
58c5fc13
MT
11741 BUG_ON(cpu != 0);
11742 gdt = get_cpu_gdt_table(cpu);
11743 save_desc_40 = gdt[0x40 / 8];
11744+
ae4e228f 11745+ pax_open_kernel();
58c5fc13 11746 gdt[0x40 / 8] = bad_bios_desc;
ae4e228f 11747+ pax_close_kernel();
58c5fc13 11748
58c5fc13
MT
11749 apm_irq_save(flags);
11750 APM_DO_SAVE_SEGS;
fe2de317 11751@@ -675,7 +685,11 @@ static long __apm_bios_call_simple(void *_call)
58c5fc13
MT
11752 &call->eax);
11753 APM_DO_RESTORE_SEGS;
11754 apm_irq_restore(flags);
11755+
ae4e228f 11756+ pax_open_kernel();
58c5fc13 11757 gdt[0x40 / 8] = save_desc_40;
ae4e228f 11758+ pax_close_kernel();
58c5fc13
MT
11759+
11760 put_cpu();
11761 return error;
11762 }
15a11c5b 11763@@ -2349,12 +2363,15 @@ static int __init apm_init(void)
58c5fc13
MT
11764 * code to that CPU.
11765 */
11766 gdt = get_cpu_gdt_table(0);
11767+
ae4e228f
MT
11768+ pax_open_kernel();
11769 set_desc_base(&gdt[APM_CS >> 3],
11770 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
11771 set_desc_base(&gdt[APM_CS_16 >> 3],
11772 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
11773 set_desc_base(&gdt[APM_DS >> 3],
11774 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
11775+ pax_close_kernel();
58c5fc13 11776
58c5fc13
MT
11777 proc_create("apm", 0, NULL, &apm_file_ops);
11778
fe2de317
MT
11779diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
11780index 4f13faf..87db5d2 100644
11781--- a/arch/x86/kernel/asm-offsets.c
11782+++ b/arch/x86/kernel/asm-offsets.c
66a7e928 11783@@ -33,6 +33,8 @@ void common(void) {
71d190be 11784 OFFSET(TI_status, thread_info, status);
66a7e928
MT
11785 OFFSET(TI_addr_limit, thread_info, addr_limit);
11786 OFFSET(TI_preempt_count, thread_info, preempt_count);
11787+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
11788+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
58c5fc13 11789
66a7e928
MT
11790 BLANK();
11791 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
15a11c5b 11792@@ -53,8 +55,26 @@ void common(void) {
ae4e228f 11793 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
66a7e928 11794 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
ae4e228f
MT
11795 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
11796+
11797+#ifdef CONFIG_PAX_KERNEXEC
ae4e228f 11798+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
fe2de317
MT
11799 #endif
11800
df50ba0c
MT
11801+#ifdef CONFIG_PAX_MEMORY_UDEREF
11802+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
11803+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
15a11c5b
MT
11804+#ifdef CONFIG_X86_64
11805+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
11806+#endif
fe2de317
MT
11807+#endif
11808+
66a7e928
MT
11809+#endif
11810+
11811+ BLANK();
11812+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
11813+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
8308f9c9 11814+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
66a7e928 11815+
8308f9c9
MT
11816 #ifdef CONFIG_XEN
11817 BLANK();
11818 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
fe2de317
MT
11819diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
11820index e72a119..6e2955d 100644
11821--- a/arch/x86/kernel/asm-offsets_64.c
11822+++ b/arch/x86/kernel/asm-offsets_64.c
11823@@ -69,6 +69,7 @@ int main(void)
11824 BLANK();
11825 #undef ENTRY
11826
11827+ DEFINE(TSS_size, sizeof(struct tss_struct));
11828 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
11829 BLANK();
11830
11831diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
11832index 6042981..e638266 100644
11833--- a/arch/x86/kernel/cpu/Makefile
11834+++ b/arch/x86/kernel/cpu/Makefile
11835@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
11836 CFLAGS_REMOVE_perf_event.o = -pg
11837 endif
11838
11839-# Make sure load_percpu_segment has no stackprotector
11840-nostackp := $(call cc-option, -fno-stack-protector)
11841-CFLAGS_common.o := $(nostackp)
11842-
11843 obj-y := intel_cacheinfo.o scattered.o topology.o
11844 obj-y += proc.o capflags.o powerflags.o common.o
11845 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
11846diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
11847index b13ed39..603286c 100644
11848--- a/arch/x86/kernel/cpu/amd.c
11849+++ b/arch/x86/kernel/cpu/amd.c
11850@@ -647,7 +647,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
71d190be
MT
11851 unsigned int size)
11852 {
11853 /* AMD errata T13 (order #21922) */
11854- if ((c->x86 == 6)) {
11855+ if (c->x86 == 6) {
11856 /* Duron Rev A0 */
11857 if (c->x86_model == 3 && c->x86_mask == 0)
11858 size = 64;
fe2de317
MT
11859diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
11860index 6218439..0f1addc 100644
11861--- a/arch/x86/kernel/cpu/common.c
11862+++ b/arch/x86/kernel/cpu/common.c
11863@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
58c5fc13
MT
11864
11865 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
11866
11867-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
11868-#ifdef CONFIG_X86_64
11869- /*
11870- * We need valid kernel segments for data and code in long mode too
11871- * IRET will check the segment types kkeil 2000/10/28
11872- * Also sysret mandates a special GDT layout
11873- *
11874- * TLS descriptors are currently at a different place compared to i386.
11875- * Hopefully nobody expects them at a fixed place (Wine?)
11876- */
ae4e228f
MT
11877- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
11878- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
11879- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
11880- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
11881- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
11882- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
58c5fc13 11883-#else
ae4e228f
MT
11884- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
11885- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11886- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
11887- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
58c5fc13
MT
11888- /*
11889- * Segments used for calling PnP BIOS have byte granularity.
11890- * They code segments and data segments have fixed 64k limits,
11891- * the transfer segment sizes are set at run time.
11892- */
11893- /* 32-bit code */
ae4e228f 11894- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
58c5fc13 11895- /* 16-bit code */
ae4e228f 11896- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
58c5fc13 11897- /* 16-bit data */
ae4e228f 11898- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
58c5fc13 11899- /* 16-bit data */
ae4e228f 11900- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
58c5fc13 11901- /* 16-bit data */
ae4e228f 11902- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
58c5fc13
MT
11903- /*
11904- * The APM segments have byte granularity and their bases
11905- * are set at run time. All have 64k limits.
11906- */
11907- /* 32-bit code */
ae4e228f 11908- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
58c5fc13 11909- /* 16-bit code */
ae4e228f 11910- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
58c5fc13 11911- /* data */
ae4e228f 11912- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
58c5fc13 11913-
ae4e228f
MT
11914- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11915- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
58c5fc13
MT
11916- GDT_STACK_CANARY_INIT
11917-#endif
11918-} };
11919-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
11920-
11921 static int __init x86_xsave_setup(char *s)
11922 {
11923 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
15a11c5b 11924@@ -371,7 +317,7 @@ void switch_to_new_gdt(int cpu)
58c5fc13
MT
11925 {
11926 struct desc_ptr gdt_descr;
11927
11928- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
11929+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
11930 gdt_descr.size = GDT_SIZE - 1;
11931 load_gdt(&gdt_descr);
11932 /* Reload the per-cpu base */
fe2de317 11933@@ -840,6 +786,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
58c5fc13
MT
11934 /* Filter out anything that depends on CPUID levels we don't have */
11935 filter_cpuid_features(c, true);
11936
ae4e228f 11937+#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
58c5fc13
MT
11938+ setup_clear_cpu_cap(X86_FEATURE_SEP);
11939+#endif
11940+
11941 /* If the model name is still unset, do table lookup. */
11942 if (!c->x86_model_id[0]) {
11943 const char *p;
fe2de317 11944@@ -1019,6 +969,9 @@ static __init int setup_disablecpuid(char *arg)
71d190be
MT
11945 }
11946 __setup("clearcpuid=", setup_disablecpuid);
11947
11948+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
11949+EXPORT_PER_CPU_SYMBOL(current_tinfo);
11950+
11951 #ifdef CONFIG_X86_64
11952 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
11953
fe2de317 11954@@ -1034,7 +987,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
71d190be
MT
11955 EXPORT_PER_CPU_SYMBOL(current_task);
11956
11957 DEFINE_PER_CPU(unsigned long, kernel_stack) =
11958- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
66a7e928 11959+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
71d190be
MT
11960 EXPORT_PER_CPU_SYMBOL(kernel_stack);
11961
11962 DEFINE_PER_CPU(char *, irq_stack_ptr) =
fe2de317 11963@@ -1099,7 +1052,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
bc901d79
MT
11964 {
11965 memset(regs, 0, sizeof(struct pt_regs));
11966 regs->fs = __KERNEL_PERCPU;
11967- regs->gs = __KERNEL_STACK_CANARY;
11968+ savesegment(gs, regs->gs);
11969
11970 return regs;
11971 }
15a11c5b 11972@@ -1154,7 +1107,7 @@ void __cpuinit cpu_init(void)
58c5fc13
MT
11973 int i;
11974
11975 cpu = stack_smp_processor_id();
11976- t = &per_cpu(init_tss, cpu);
11977+ t = init_tss + cpu;
ae4e228f 11978 oist = &per_cpu(orig_ist, cpu);
58c5fc13
MT
11979
11980 #ifdef CONFIG_NUMA
15a11c5b 11981@@ -1180,7 +1133,7 @@ void __cpuinit cpu_init(void)
df50ba0c
MT
11982 switch_to_new_gdt(cpu);
11983 loadsegment(fs, 0);
11984
11985- load_idt((const struct desc_ptr *)&idt_descr);
11986+ load_idt(&idt_descr);
11987
11988 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
11989 syscall_init();
15a11c5b 11990@@ -1189,7 +1142,6 @@ void __cpuinit cpu_init(void)
bc901d79
MT
11991 wrmsrl(MSR_KERNEL_GS_BASE, 0);
11992 barrier();
11993
11994- x86_configure_nx();
11995 if (cpu != 0)
11996 enable_x2apic();
11997
15a11c5b 11998@@ -1243,7 +1195,7 @@ void __cpuinit cpu_init(void)
58c5fc13
MT
11999 {
12000 int cpu = smp_processor_id();
12001 struct task_struct *curr = current;
12002- struct tss_struct *t = &per_cpu(init_tss, cpu);
12003+ struct tss_struct *t = init_tss + cpu;
12004 struct thread_struct *thread = &curr->thread;
12005
12006 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
fe2de317
MT
12007diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
12008index ed6086e..a1dcf29 100644
12009--- a/arch/x86/kernel/cpu/intel.c
12010+++ b/arch/x86/kernel/cpu/intel.c
12011@@ -172,7 +172,7 @@ static void __cpuinit trap_init_f00f_bug(void)
58c5fc13
MT
12012 * Update the IDT descriptor and reload the IDT so that
12013 * it uses the read-only mapped virtual address.
12014 */
12015- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
12016+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
12017 load_idt(&idt_descr);
12018 }
12019 #endif
fe2de317
MT
12020diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
12021index 0ed633c..82cef2a 100644
12022--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
12023+++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
12024@@ -215,7 +215,9 @@ static int inject_init(void)
12025 if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
12026 return -ENOMEM;
12027 printk(KERN_INFO "Machine check injector initialized\n");
12028- mce_chrdev_ops.write = mce_write;
12029+ pax_open_kernel();
12030+ *(void **)&mce_chrdev_ops.write = mce_write;
12031+ pax_close_kernel();
12032 register_die_notifier(&mce_raise_nb);
12033 return 0;
12034 }
12035diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
12036index 08363b0..ee26113 100644
12037--- a/arch/x86/kernel/cpu/mcheck/mce.c
12038+++ b/arch/x86/kernel/cpu/mcheck/mce.c
6e9df6a3
MT
12039@@ -42,6 +42,7 @@
12040 #include <asm/processor.h>
c52201e0
MT
12041 #include <asm/mce.h>
12042 #include <asm/msr.h>
12043+#include <asm/local.h>
12044
12045 #include "mce-internal.h"
12046
6e9df6a3 12047@@ -205,7 +206,7 @@ static void print_mce(struct mce *m)
ae4e228f
MT
12048 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
12049 m->cs, m->ip);
12050
12051- if (m->cs == __KERNEL_CS)
12052+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
12053 print_symbol("{%s}", m->ip);
12054 pr_cont("\n");
12055 }
6e9df6a3 12056@@ -233,10 +234,10 @@ static void print_mce(struct mce *m)
8308f9c9
MT
12057
12058 #define PANIC_TIMEOUT 5 /* 5 seconds */
12059
12060-static atomic_t mce_paniced;
12061+static atomic_unchecked_t mce_paniced;
12062
12063 static int fake_panic;
12064-static atomic_t mce_fake_paniced;
12065+static atomic_unchecked_t mce_fake_paniced;
12066
12067 /* Panic in progress. Enable interrupts and wait for final IPI */
12068 static void wait_for_panic(void)
fe2de317 12069@@ -260,7 +261,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
8308f9c9
MT
12070 /*
12071 * Make sure only one CPU runs in machine check panic
12072 */
12073- if (atomic_inc_return(&mce_paniced) > 1)
12074+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
12075 wait_for_panic();
12076 barrier();
12077
fe2de317 12078@@ -268,7 +269,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
8308f9c9
MT
12079 console_verbose();
12080 } else {
12081 /* Don't log too much for fake panic */
12082- if (atomic_inc_return(&mce_fake_paniced) > 1)
12083+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
12084 return;
12085 }
12086 /* First print corrected ones that are still unlogged */
6e9df6a3 12087@@ -610,7 +611,7 @@ static int mce_timed_out(u64 *t)
8308f9c9
MT
12088 * might have been modified by someone else.
12089 */
12090 rmb();
12091- if (atomic_read(&mce_paniced))
12092+ if (atomic_read_unchecked(&mce_paniced))
12093 wait_for_panic();
12094 if (!monarch_timeout)
12095 goto out;
fe2de317
MT
12096@@ -1392,7 +1393,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
12097 }
12098
12099 /* Call the installed machine check handler for this CPU setup. */
12100-void (*machine_check_vector)(struct pt_regs *, long error_code) =
12101+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
12102 unexpected_machine_check;
12103
12104 /*
12105@@ -1415,7 +1416,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
12106 return;
12107 }
12108
12109+ pax_open_kernel();
12110 machine_check_vector = do_machine_check;
12111+ pax_close_kernel();
12112
12113 __mcheck_cpu_init_generic();
12114 __mcheck_cpu_init_vendor(c);
12115@@ -1429,7 +1432,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
58c5fc13
MT
12116 */
12117
6e9df6a3
MT
12118 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
12119-static int mce_chrdev_open_count; /* #times opened */
12120+static local_t mce_chrdev_open_count; /* #times opened */
12121 static int mce_chrdev_open_exclu; /* already open exclusive? */
58c5fc13 12122
6e9df6a3 12123 static int mce_chrdev_open(struct inode *inode, struct file *file)
fe2de317 12124@@ -1437,7 +1440,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
6e9df6a3 12125 spin_lock(&mce_chrdev_state_lock);
58c5fc13 12126
6e9df6a3
MT
12127 if (mce_chrdev_open_exclu ||
12128- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
12129+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
12130 spin_unlock(&mce_chrdev_state_lock);
58c5fc13
MT
12131
12132 return -EBUSY;
fe2de317 12133@@ -1445,7 +1448,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
58c5fc13
MT
12134
12135 if (file->f_flags & O_EXCL)
6e9df6a3
MT
12136 mce_chrdev_open_exclu = 1;
12137- mce_chrdev_open_count++;
12138+ local_inc(&mce_chrdev_open_count);
58c5fc13 12139
6e9df6a3 12140 spin_unlock(&mce_chrdev_state_lock);
58c5fc13 12141
fe2de317 12142@@ -1456,7 +1459,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
58c5fc13 12143 {
6e9df6a3 12144 spin_lock(&mce_chrdev_state_lock);
58c5fc13 12145
6e9df6a3
MT
12146- mce_chrdev_open_count--;
12147+ local_dec(&mce_chrdev_open_count);
12148 mce_chrdev_open_exclu = 0;
58c5fc13 12149
6e9df6a3 12150 spin_unlock(&mce_chrdev_state_lock);
fe2de317 12151@@ -2147,7 +2150,7 @@ struct dentry *mce_get_debugfs_dir(void)
8308f9c9
MT
12152 static void mce_reset(void)
12153 {
12154 cpu_missing = 0;
12155- atomic_set(&mce_fake_paniced, 0);
12156+ atomic_set_unchecked(&mce_fake_paniced, 0);
12157 atomic_set(&mce_executing, 0);
12158 atomic_set(&mce_callin, 0);
12159 atomic_set(&global_nwo, 0);
fe2de317
MT
12160diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
12161index 5c0e653..1e82c7c 100644
12162--- a/arch/x86/kernel/cpu/mcheck/p5.c
12163+++ b/arch/x86/kernel/cpu/mcheck/p5.c
12164@@ -50,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
12165 if (!cpu_has(c, X86_FEATURE_MCE))
12166 return;
12167
15a11c5b 12168+ pax_open_kernel();
fe2de317 12169 machine_check_vector = pentium_machine_check;
15a11c5b 12170+ pax_close_kernel();
fe2de317
MT
12171 /* Make sure the vector pointer is visible before we enable MCEs: */
12172 wmb();
12173
12174diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
12175index 54060f5..e6ba93d 100644
12176--- a/arch/x86/kernel/cpu/mcheck/winchip.c
12177+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
12178@@ -24,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
12179 {
12180 u32 lo, hi;
12181
12182+ pax_open_kernel();
12183 machine_check_vector = winchip_machine_check;
12184+ pax_close_kernel();
12185 /* Make sure the vector pointer is visible before we enable MCEs: */
12186 wmb();
12187
12188diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
12189index 6b96110..0da73eb 100644
12190--- a/arch/x86/kernel/cpu/mtrr/main.c
12191+++ b/arch/x86/kernel/cpu/mtrr/main.c
66a7e928 12192@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
ae4e228f
MT
12193 u64 size_or_mask, size_and_mask;
12194 static bool mtrr_aps_delayed_init;
12195
df50ba0c 12196-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
ae4e228f
MT
12197+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
12198
df50ba0c 12199 const struct mtrr_ops *mtrr_if;
ae4e228f 12200
fe2de317
MT
12201diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
12202index df5e41f..816c719 100644
12203--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
12204+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
15a11c5b
MT
12205@@ -25,7 +25,7 @@ struct mtrr_ops {
12206 int (*validate_add_page)(unsigned long base, unsigned long size,
ae4e228f 12207 unsigned int type);
15a11c5b
MT
12208 int (*have_wrcomb)(void);
12209-};
12210+} __do_const;
ae4e228f
MT
12211
12212 extern int generic_get_free_region(unsigned long base, unsigned long size,
15a11c5b 12213 int replace_reg);
fe2de317
MT
12214diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
12215index cfa62ec..9250dd7 100644
12216--- a/arch/x86/kernel/cpu/perf_event.c
12217+++ b/arch/x86/kernel/cpu/perf_event.c
12218@@ -795,6 +795,8 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
66a7e928
MT
12219 int i, j, w, wmax, num = 0;
12220 struct hw_perf_event *hwc;
12221
12222+ pax_track_stack();
12223+
12224 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
12225
12226 for (i = 0; i < n; i++) {
fe2de317 12227@@ -1919,7 +1921,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
57199397
MT
12228 break;
12229
bc901d79 12230 perf_callchain_store(entry, frame.return_address);
57199397 12231- fp = frame.next_frame;
6e9df6a3 12232+ fp = (const void __force_user *)frame.next_frame;
57199397
MT
12233 }
12234 }
12235
fe2de317
MT
12236diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
12237index 764c7c2..c5d9c7b 100644
12238--- a/arch/x86/kernel/crash.c
12239+++ b/arch/x86/kernel/crash.c
12240@@ -42,7 +42,7 @@ static void kdump_nmi_callback(int cpu, struct die_args *args)
58c5fc13
MT
12241 regs = args->regs;
12242
12243 #ifdef CONFIG_X86_32
12244- if (!user_mode_vm(regs)) {
12245+ if (!user_mode(regs)) {
12246 crash_fixup_ss_esp(&fixed_regs, regs);
12247 regs = &fixed_regs;
12248 }
fe2de317
MT
12249diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
12250index 37250fe..bf2ec74 100644
12251--- a/arch/x86/kernel/doublefault_32.c
12252+++ b/arch/x86/kernel/doublefault_32.c
58c5fc13
MT
12253@@ -11,7 +11,7 @@
12254
12255 #define DOUBLEFAULT_STACKSIZE (1024)
12256 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
12257-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
12258+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
12259
12260 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
12261
12262@@ -21,7 +21,7 @@ static void doublefault_fn(void)
12263 unsigned long gdt, tss;
12264
12265 store_gdt(&gdt_desc);
12266- gdt = gdt_desc.address;
12267+ gdt = (unsigned long)gdt_desc.address;
12268
12269 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
12270
fe2de317 12271@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
58c5fc13
MT
12272 /* 0x2 bit is always set */
12273 .flags = X86_EFLAGS_SF | 0x2,
12274 .sp = STACK_START,
12275- .es = __USER_DS,
12276+ .es = __KERNEL_DS,
12277 .cs = __KERNEL_CS,
12278 .ss = __KERNEL_DS,
12279- .ds = __USER_DS,
12280+ .ds = __KERNEL_DS,
12281 .fs = __KERNEL_PERCPU,
12282
12283 .__cr3 = __pa_nodebug(swapper_pg_dir),
fe2de317
MT
12284diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
12285index 1aae78f..aab3a3d 100644
12286--- a/arch/x86/kernel/dumpstack.c
12287+++ b/arch/x86/kernel/dumpstack.c
bc901d79
MT
12288@@ -2,6 +2,9 @@
12289 * Copyright (C) 1991, 1992 Linus Torvalds
12290 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
12291 */
12292+#ifdef CONFIG_GRKERNSEC_HIDESYM
12293+#define __INCLUDED_BY_HIDESYM 1
12294+#endif
12295 #include <linux/kallsyms.h>
12296 #include <linux/kprobes.h>
12297 #include <linux/uaccess.h>
fe2de317 12298@@ -35,9 +38,8 @@ void printk_address(unsigned long address, int reliable)
71d190be
MT
12299 static void
12300 print_ftrace_graph_addr(unsigned long addr, void *data,
12301 const struct stacktrace_ops *ops,
12302- struct thread_info *tinfo, int *graph)
12303+ struct task_struct *task, int *graph)
12304 {
12305- struct task_struct *task = tinfo->task;
12306 unsigned long ret_addr;
12307 int index = task->curr_ret_stack;
12308
fe2de317 12309@@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
71d190be
MT
12310 static inline void
12311 print_ftrace_graph_addr(unsigned long addr, void *data,
12312 const struct stacktrace_ops *ops,
12313- struct thread_info *tinfo, int *graph)
12314+ struct task_struct *task, int *graph)
12315 { }
12316 #endif
12317
fe2de317 12318@@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
71d190be
MT
12319 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
12320 */
12321
12322-static inline int valid_stack_ptr(struct thread_info *tinfo,
12323- void *p, unsigned int size, void *end)
12324+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
12325 {
12326- void *t = tinfo;
12327 if (end) {
12328 if (p < end && p >= (end-THREAD_SIZE))
12329 return 1;
fe2de317 12330@@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
71d190be
MT
12331 }
12332
12333 unsigned long
12334-print_context_stack(struct thread_info *tinfo,
12335+print_context_stack(struct task_struct *task, void *stack_start,
12336 unsigned long *stack, unsigned long bp,
12337 const struct stacktrace_ops *ops, void *data,
12338 unsigned long *end, int *graph)
12339 {
12340 struct stack_frame *frame = (struct stack_frame *)bp;
12341
12342- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
12343+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
12344 unsigned long addr;
12345
12346 addr = *stack;
fe2de317 12347@@ -102,7 +102,7 @@ print_context_stack(struct thread_info *tinfo,
71d190be
MT
12348 } else {
12349 ops->address(data, addr, 0);
12350 }
12351- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12352+ print_ftrace_graph_addr(addr, data, ops, task, graph);
12353 }
12354 stack++;
12355 }
fe2de317 12356@@ -111,7 +111,7 @@ print_context_stack(struct thread_info *tinfo,
71d190be
MT
12357 EXPORT_SYMBOL_GPL(print_context_stack);
12358
12359 unsigned long
12360-print_context_stack_bp(struct thread_info *tinfo,
12361+print_context_stack_bp(struct task_struct *task, void *stack_start,
12362 unsigned long *stack, unsigned long bp,
12363 const struct stacktrace_ops *ops, void *data,
12364 unsigned long *end, int *graph)
fe2de317 12365@@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_info *tinfo,
71d190be
MT
12366 struct stack_frame *frame = (struct stack_frame *)bp;
12367 unsigned long *ret_addr = &frame->return_address;
12368
12369- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
12370+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
12371 unsigned long addr = *ret_addr;
12372
12373 if (!__kernel_text_address(addr))
fe2de317 12374@@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_info *tinfo,
71d190be
MT
12375 ops->address(data, addr, 1);
12376 frame = frame->next_frame;
12377 ret_addr = &frame->return_address;
12378- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12379+ print_ftrace_graph_addr(addr, data, ops, task, graph);
12380 }
12381
12382 return (unsigned long)frame;
15a11c5b 12383@@ -186,7 +186,7 @@ void dump_stack(void)
57199397 12384
66a7e928 12385 bp = stack_frame(current, NULL);
57199397
MT
12386 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
12387- current->pid, current->comm, print_tainted(),
12388+ task_pid_nr(current), current->comm, print_tainted(),
12389 init_utsname()->release,
12390 (int)strcspn(init_utsname()->version, " "),
12391 init_utsname()->version);
15a11c5b 12392@@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
71d190be
MT
12393 }
12394 EXPORT_SYMBOL_GPL(oops_begin);
12395
12396+extern void gr_handle_kernel_exploit(void);
12397+
12398 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12399 {
12400 if (regs && kexec_should_crash(current))
fe2de317 12401@@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
57199397
MT
12402 panic("Fatal exception in interrupt");
12403 if (panic_on_oops)
12404 panic("Fatal exception");
12405- do_exit(signr);
71d190be
MT
12406+
12407+ gr_handle_kernel_exploit();
12408+
57199397
MT
12409+ do_group_exit(signr);
12410 }
12411
12412 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
fe2de317 12413@@ -269,7 +274,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
57199397
MT
12414
12415 show_registers(regs);
12416 #ifdef CONFIG_X86_32
12417- if (user_mode_vm(regs)) {
12418+ if (user_mode(regs)) {
12419 sp = regs->sp;
12420 ss = regs->ss & 0xffff;
12421 } else {
fe2de317 12422@@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs *regs, long err)
57199397
MT
12423 unsigned long flags = oops_begin();
12424 int sig = SIGSEGV;
12425
12426- if (!user_mode_vm(regs))
12427+ if (!user_mode(regs))
12428 report_bug(regs->ip, regs);
12429
12430 if (__die(str, regs, err))
fe2de317
MT
12431diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
12432index 3b97a80..667ce7a 100644
12433--- a/arch/x86/kernel/dumpstack_32.c
12434+++ b/arch/x86/kernel/dumpstack_32.c
12435@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12436 bp = stack_frame(task, regs);
12437
12438 for (;;) {
12439- struct thread_info *context;
12440+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12441
12442- context = (struct thread_info *)
12443- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
12444- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
12445+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12446
12447- stack = (unsigned long *)context->previous_esp;
12448- if (!stack)
12449+ if (stack_start == task_stack_page(task))
12450 break;
12451+ stack = *(unsigned long **)stack_start;
12452 if (ops->stack(data, "IRQ") < 0)
12453 break;
12454 touch_nmi_watchdog();
12455@@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs)
12456 * When in-kernel, we also print out the stack and code at the
12457 * time of the fault..
12458 */
12459- if (!user_mode_vm(regs)) {
12460+ if (!user_mode(regs)) {
12461 unsigned int code_prologue = code_bytes * 43 / 64;
12462 unsigned int code_len = code_bytes;
12463 unsigned char c;
12464 u8 *ip;
12465+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
12466
12467 printk(KERN_EMERG "Stack:\n");
12468 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
12469
12470 printk(KERN_EMERG "Code: ");
12471
12472- ip = (u8 *)regs->ip - code_prologue;
12473+ ip = (u8 *)regs->ip - code_prologue + cs_base;
12474 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
12475 /* try starting at IP */
12476- ip = (u8 *)regs->ip;
12477+ ip = (u8 *)regs->ip + cs_base;
12478 code_len = code_len - code_prologue + 1;
12479 }
12480 for (i = 0; i < code_len; i++, ip++) {
12481@@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs)
12482 printk(" Bad EIP value.");
12483 break;
12484 }
12485- if (ip == (u8 *)regs->ip)
12486+ if (ip == (u8 *)regs->ip + cs_base)
12487 printk("<%02x> ", c);
12488 else
12489 printk("%02x ", c);
12490@@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
12491 {
12492 unsigned short ud2;
12493
12494+ ip = ktla_ktva(ip);
12495 if (ip < PAGE_OFFSET)
12496 return 0;
12497 if (probe_kernel_address((unsigned short *)ip, ud2))
12498@@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
12499
12500 return ud2 == 0x0b0f;
12501 }
12502+
12503+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12504+void pax_check_alloca(unsigned long size)
12505+{
12506+ unsigned long sp = (unsigned long)&sp, stack_left;
12507+
12508+ /* all kernel stacks are of the same size */
12509+ stack_left = sp & (THREAD_SIZE - 1);
12510+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
12511+}
12512+EXPORT_SYMBOL(pax_check_alloca);
12513+#endif
12514diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
12515index 19853ad..508ca79 100644
12516--- a/arch/x86/kernel/dumpstack_64.c
12517+++ b/arch/x86/kernel/dumpstack_64.c
12518@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12519 unsigned long *irq_stack_end =
12520 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
12521 unsigned used = 0;
12522- struct thread_info *tinfo;
12523 int graph = 0;
12524 unsigned long dummy;
12525+ void *stack_start;
12526
12527 if (!task)
12528 task = current;
12529@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12530 * current stack address. If the stacks consist of nested
12531 * exceptions
12532 */
12533- tinfo = task_thread_info(task);
12534 for (;;) {
12535 char *id;
12536 unsigned long *estack_end;
12537+
12538 estack_end = in_exception_stack(cpu, (unsigned long)stack,
12539 &used, &id);
12540
12541@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12542 if (ops->stack(data, id) < 0)
12543 break;
12544
12545- bp = ops->walk_stack(tinfo, stack, bp, ops,
12546+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
12547 data, estack_end, &graph);
12548 ops->stack(data, "<EOE>");
12549 /*
12550@@ -172,7 +172,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12551 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
12552 if (ops->stack(data, "IRQ") < 0)
12553 break;
12554- bp = ops->walk_stack(tinfo, stack, bp,
12555+ bp = ops->walk_stack(task, irq_stack, stack, bp,
12556 ops, data, irq_stack_end, &graph);
12557 /*
12558 * We link to the next stack (which would be
12559@@ -191,7 +191,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12560 /*
12561 * This handles the process stack:
12562 */
12563- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
12564+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12565+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12566 put_cpu();
12567 }
12568 EXPORT_SYMBOL(dump_trace);
12569@@ -305,3 +306,50 @@ int is_valid_bugaddr(unsigned long ip)
12570
12571 return ud2 == 0x0b0f;
12572 }
12573+
12574+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12575+void pax_check_alloca(unsigned long size)
12576+{
12577+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
12578+ unsigned cpu, used;
12579+ char *id;
12580+
12581+ /* check the process stack first */
12582+ stack_start = (unsigned long)task_stack_page(current);
12583+ stack_end = stack_start + THREAD_SIZE;
12584+ if (likely(stack_start <= sp && sp < stack_end)) {
12585+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
12586+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
12587+ return;
12588+ }
12589+
12590+ cpu = get_cpu();
12591+
12592+ /* check the irq stacks */
12593+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
12594+ stack_start = stack_end - IRQ_STACK_SIZE;
12595+ if (stack_start <= sp && sp < stack_end) {
12596+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
12597+ put_cpu();
12598+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
12599+ return;
12600+ }
12601+
12602+ /* check the exception stacks */
12603+ used = 0;
12604+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
12605+ stack_start = stack_end - EXCEPTION_STKSZ;
12606+ if (stack_end && stack_start <= sp && sp < stack_end) {
12607+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
12608+ put_cpu();
12609+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
12610+ return;
12611+ }
12612+
12613+ put_cpu();
12614+
12615+ /* unknown stack */
12616+ BUG();
12617+}
12618+EXPORT_SYMBOL(pax_check_alloca);
12619+#endif
12620diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
12621index cd28a35..2601699 100644
12622--- a/arch/x86/kernel/early_printk.c
12623+++ b/arch/x86/kernel/early_printk.c
66a7e928
MT
12624@@ -7,6 +7,7 @@
12625 #include <linux/pci_regs.h>
12626 #include <linux/pci_ids.h>
12627 #include <linux/errno.h>
12628+#include <linux/sched.h>
12629 #include <asm/io.h>
12630 #include <asm/processor.h>
12631 #include <asm/fcntl.h>
fe2de317 12632@@ -179,6 +180,8 @@ asmlinkage void early_printk(const char *fmt, ...)
66a7e928
MT
12633 int n;
12634 va_list ap;
12635
12636+ pax_track_stack();
12637+
12638 va_start(ap, fmt);
12639 n = vscnprintf(buf, sizeof(buf), fmt, ap);
12640 early_console->write(early_console, buf, n);
fe2de317
MT
12641diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
12642index f3f6f53..0841b66 100644
12643--- a/arch/x86/kernel/entry_32.S
12644+++ b/arch/x86/kernel/entry_32.S
6e9df6a3 12645@@ -186,13 +186,146 @@
bc901d79
MT
12646 /*CFI_REL_OFFSET gs, PT_GS*/
12647 .endm
12648 .macro SET_KERNEL_GS reg
58c5fc13 12649+
bc901d79
MT
12650+#ifdef CONFIG_CC_STACKPROTECTOR
12651 movl $(__KERNEL_STACK_CANARY), \reg
12652+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12653+ movl $(__USER_DS), \reg
12654+#else
12655+ xorl \reg, \reg
12656+#endif
12657+
12658 movl \reg, %gs
12659 .endm
58c5fc13
MT
12660
12661 #endif /* CONFIG_X86_32_LAZY_GS */
12662
12663-.macro SAVE_ALL
8308f9c9 12664+.macro pax_enter_kernel
df50ba0c 12665+#ifdef CONFIG_PAX_KERNEXEC
8308f9c9 12666+ call pax_enter_kernel
df50ba0c 12667+#endif
8308f9c9
MT
12668+.endm
12669+
12670+.macro pax_exit_kernel
12671+#ifdef CONFIG_PAX_KERNEXEC
12672+ call pax_exit_kernel
df50ba0c
MT
12673+#endif
12674+.endm
12675+
df50ba0c 12676+#ifdef CONFIG_PAX_KERNEXEC
8308f9c9 12677+ENTRY(pax_enter_kernel)
df50ba0c 12678+#ifdef CONFIG_PARAVIRT
66a7e928
MT
12679+ pushl %eax
12680+ pushl %ecx
df50ba0c
MT
12681+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
12682+ mov %eax, %esi
12683+#else
12684+ mov %cr0, %esi
12685+#endif
12686+ bts $16, %esi
12687+ jnc 1f
12688+ mov %cs, %esi
12689+ cmp $__KERNEL_CS, %esi
12690+ jz 3f
12691+ ljmp $__KERNEL_CS, $3f
12692+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
12693+2:
12694+#ifdef CONFIG_PARAVIRT
12695+ mov %esi, %eax
12696+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
12697+#else
12698+ mov %esi, %cr0
12699+#endif
12700+3:
12701+#ifdef CONFIG_PARAVIRT
66a7e928
MT
12702+ popl %ecx
12703+ popl %eax
df50ba0c 12704+#endif
8308f9c9
MT
12705+ ret
12706+ENDPROC(pax_enter_kernel)
12707+
12708+ENTRY(pax_exit_kernel)
12709+#ifdef CONFIG_PARAVIRT
66a7e928
MT
12710+ pushl %eax
12711+ pushl %ecx
8308f9c9
MT
12712+#endif
12713+ mov %cs, %esi
12714+ cmp $__KERNEXEC_KERNEL_CS, %esi
12715+ jnz 2f
12716+#ifdef CONFIG_PARAVIRT
12717+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
12718+ mov %eax, %esi
12719+#else
12720+ mov %cr0, %esi
12721+#endif
12722+ btr $16, %esi
12723+ ljmp $__KERNEL_CS, $1f
12724+1:
12725+#ifdef CONFIG_PARAVIRT
12726+ mov %esi, %eax
12727+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
12728+#else
12729+ mov %esi, %cr0
12730+#endif
12731+2:
12732+#ifdef CONFIG_PARAVIRT
66a7e928
MT
12733+ popl %ecx
12734+ popl %eax
8308f9c9
MT
12735+#endif
12736+ ret
12737+ENDPROC(pax_exit_kernel)
12738+#endif
12739+
12740+.macro pax_erase_kstack
12741+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12742+ call pax_erase_kstack
df50ba0c
MT
12743+#endif
12744+.endm
12745+
8308f9c9 12746+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
66a7e928
MT
12747+/*
12748+ * ebp: thread_info
12749+ * ecx, edx: can be clobbered
12750+ */
8308f9c9 12751+ENTRY(pax_erase_kstack)
66a7e928
MT
12752+ pushl %edi
12753+ pushl %eax
8308f9c9 12754+
66a7e928 12755+ mov TI_lowest_stack(%ebp), %edi
8308f9c9
MT
12756+ mov $-0xBEEF, %eax
12757+ std
66a7e928
MT
12758+
12759+1: mov %edi, %ecx
8308f9c9
MT
12760+ and $THREAD_SIZE_asm - 1, %ecx
12761+ shr $2, %ecx
12762+ repne scasl
12763+ jecxz 2f
66a7e928
MT
12764+
12765+ cmp $2*16, %ecx
8308f9c9 12766+ jc 2f
66a7e928
MT
12767+
12768+ mov $2*16, %ecx
8308f9c9
MT
12769+ repe scasl
12770+ jecxz 2f
12771+ jne 1b
66a7e928
MT
12772+
12773+2: cld
8308f9c9
MT
12774+ mov %esp, %ecx
12775+ sub %edi, %ecx
12776+ shr $2, %ecx
12777+ rep stosl
12778+
66a7e928
MT
12779+ mov TI_task_thread_sp0(%ebp), %edi
12780+ sub $128, %edi
12781+ mov %edi, TI_lowest_stack(%ebp)
12782+
12783+ popl %eax
12784+ popl %edi
8308f9c9
MT
12785+ ret
12786+ENDPROC(pax_erase_kstack)
12787+#endif
12788+
58c5fc13
MT
12789+.macro __SAVE_ALL _DS
12790 cld
12791 PUSH_GS
bc901d79 12792 pushl_cfi %fs
6e9df6a3 12793@@ -215,7 +348,7 @@
bc901d79
MT
12794 CFI_REL_OFFSET ecx, 0
12795 pushl_cfi %ebx
58c5fc13
MT
12796 CFI_REL_OFFSET ebx, 0
12797- movl $(__USER_DS), %edx
12798+ movl $\_DS, %edx
12799 movl %edx, %ds
12800 movl %edx, %es
12801 movl $(__KERNEL_PERCPU), %edx
6e9df6a3 12802@@ -223,6 +356,15 @@
58c5fc13
MT
12803 SET_KERNEL_GS %edx
12804 .endm
12805
12806+.macro SAVE_ALL
ae4e228f 12807+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
58c5fc13 12808+ __SAVE_ALL __KERNEL_DS
8308f9c9 12809+ pax_enter_kernel
58c5fc13
MT
12810+#else
12811+ __SAVE_ALL __USER_DS
12812+#endif
12813+.endm
12814+
12815 .macro RESTORE_INT_REGS
bc901d79
MT
12816 popl_cfi %ebx
12817 CFI_RESTORE ebx
6e9df6a3
MT
12818@@ -308,7 +450,7 @@ ENTRY(ret_from_fork)
12819 popfl_cfi
12820 jmp syscall_exit
12821 CFI_ENDPROC
12822-END(ret_from_fork)
12823+ENDPROC(ret_from_fork)
12824
12825 /*
12826 * Interrupt exit functions should be protected against kprobes
12827@@ -333,7 +475,15 @@ check_userspace:
58c5fc13
MT
12828 movb PT_CS(%esp), %al
12829 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
12830 cmpl $USER_RPL, %eax
12831+
12832+#ifdef CONFIG_PAX_KERNEXEC
12833+ jae resume_userspace
ae4e228f
MT
12834+
12835+ PAX_EXIT_KERNEL
58c5fc13
MT
12836+ jmp resume_kernel
12837+#else
12838 jb resume_kernel # not returning to v8086 or userspace
12839+#endif
12840
12841 ENTRY(resume_userspace)
12842 LOCKDEP_SYS_EXIT
6e9df6a3 12843@@ -345,8 +495,8 @@ ENTRY(resume_userspace)
66a7e928
MT
12844 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
12845 # int/exception return?
12846 jne work_pending
12847- jmp restore_all
6e9df6a3 12848-END(ret_from_exception)
66a7e928 12849+ jmp restore_all_pax
6e9df6a3 12850+ENDPROC(ret_from_exception)
66a7e928
MT
12851
12852 #ifdef CONFIG_PREEMPT
6e9df6a3
MT
12853 ENTRY(resume_kernel)
12854@@ -361,7 +511,7 @@ need_resched:
12855 jz restore_all
12856 call preempt_schedule_irq
12857 jmp need_resched
12858-END(resume_kernel)
12859+ENDPROC(resume_kernel)
12860 #endif
12861 CFI_ENDPROC
12862 /*
12863@@ -395,23 +545,34 @@ sysenter_past_esp:
58c5fc13
MT
12864 /*CFI_REL_OFFSET cs, 0*/
12865 /*
12866 * Push current_thread_info()->sysenter_return to the stack.
12867- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
12868- * pushed above; +8 corresponds to copy_thread's esp0 setting.
12869 */
66a7e928 12870- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
71d190be 12871+ pushl_cfi $0
58c5fc13
MT
12872 CFI_REL_OFFSET eip, 0
12873
bc901d79 12874 pushl_cfi %eax
71d190be
MT
12875 SAVE_ALL
12876+ GET_THREAD_INFO(%ebp)
12877+ movl TI_sysenter_return(%ebp),%ebp
12878+ movl %ebp,PT_EIP(%esp)
12879 ENABLE_INTERRUPTS(CLBR_NONE)
12880
12881 /*
58c5fc13
MT
12882 * Load the potential sixth argument from user stack.
12883 * Careful about security.
12884 */
12885+ movl PT_OLDESP(%esp),%ebp
12886+
12887+#ifdef CONFIG_PAX_MEMORY_UDEREF
12888+ mov PT_OLDSS(%esp),%ds
12889+1: movl %ds:(%ebp),%ebp
12890+ push %ss
12891+ pop %ds
12892+#else
12893 cmpl $__PAGE_OFFSET-3,%ebp
12894 jae syscall_fault
12895 1: movl (%ebp),%ebp
12896+#endif
12897+
12898 movl %ebp,PT_EBP(%esp)
12899 .section __ex_table,"a"
12900 .align 4
6e9df6a3 12901@@ -434,12 +595,24 @@ sysenter_do_call:
58c5fc13
MT
12902 testl $_TIF_ALLWORK_MASK, %ecx
12903 jne sysexit_audit
12904 sysenter_exit:
12905+
12906+#ifdef CONFIG_PAX_RANDKSTACK
8308f9c9 12907+ pushl_cfi %eax
15a11c5b 12908+ movl %esp, %eax
58c5fc13 12909+ call pax_randomize_kstack
8308f9c9
MT
12910+ popl_cfi %eax
12911+#endif
12912+
66a7e928 12913+ pax_erase_kstack
58c5fc13
MT
12914+
12915 /* if something modifies registers it must also disable sysexit */
12916 movl PT_EIP(%esp), %edx
12917 movl PT_OLDESP(%esp), %ecx
12918 xorl %ebp,%ebp
12919 TRACE_IRQS_ON
12920 1: mov PT_FS(%esp), %fs
12921+2: mov PT_DS(%esp), %ds
12922+3: mov PT_ES(%esp), %es
12923 PTGS_TO_GS
12924 ENABLE_INTERRUPTS_SYSEXIT
12925
6e9df6a3 12926@@ -456,6 +629,9 @@ sysenter_audit:
66a7e928
MT
12927 movl %eax,%edx /* 2nd arg: syscall number */
12928 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
12929 call audit_syscall_entry
12930+
12931+ pax_erase_kstack
12932+
12933 pushl_cfi %ebx
12934 movl PT_EAX(%esp),%eax /* reload syscall number */
12935 jmp sysenter_do_call
6e9df6a3 12936@@ -482,11 +658,17 @@ sysexit_audit:
58c5fc13
MT
12937
12938 CFI_ENDPROC
12939 .pushsection .fixup,"ax"
12940-2: movl $0,PT_FS(%esp)
12941+4: movl $0,PT_FS(%esp)
12942+ jmp 1b
12943+5: movl $0,PT_DS(%esp)
12944+ jmp 1b
12945+6: movl $0,PT_ES(%esp)
12946 jmp 1b
12947 .section __ex_table,"a"
12948 .align 4
12949- .long 1b,2b
12950+ .long 1b,4b
12951+ .long 2b,5b
12952+ .long 3b,6b
12953 .popsection
12954 PTGS_TO_GS_EX
12955 ENDPROC(ia32_sysenter_target)
6e9df6a3 12956@@ -519,6 +701,15 @@ syscall_exit:
58c5fc13
MT
12957 testl $_TIF_ALLWORK_MASK, %ecx # current->work
12958 jne syscall_exit_work
12959
66a7e928
MT
12960+restore_all_pax:
12961+
58c5fc13 12962+#ifdef CONFIG_PAX_RANDKSTACK
15a11c5b 12963+ movl %esp, %eax
58c5fc13
MT
12964+ call pax_randomize_kstack
12965+#endif
8308f9c9 12966+
8308f9c9 12967+ pax_erase_kstack
58c5fc13
MT
12968+
12969 restore_all:
12970 TRACE_IRQS_IRET
12971 restore_all_notrace:
6e9df6a3 12972@@ -578,14 +769,34 @@ ldt_ss:
6892158b
MT
12973 * compensating for the offset by changing to the ESPFIX segment with
12974 * a base address that matches for the difference.
12975 */
12976-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
12977+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
12978 mov %esp, %edx /* load kernel esp */
58c5fc13
MT
12979 mov PT_OLDESP(%esp), %eax /* load userspace esp */
12980 mov %dx, %ax /* eax: new kernel esp */
12981 sub %eax, %edx /* offset (low word is 0) */
58c5fc13
MT
12982+#ifdef CONFIG_SMP
12983+ movl PER_CPU_VAR(cpu_number), %ebx
12984+ shll $PAGE_SHIFT_asm, %ebx
12985+ addl $cpu_gdt_table, %ebx
12986+#else
12987+ movl $cpu_gdt_table, %ebx
12988+#endif
12989 shr $16, %edx
6892158b
MT
12990- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
12991- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
15a11c5b
MT
12992+
12993+#ifdef CONFIG_PAX_KERNEXEC
12994+ mov %cr0, %esi
12995+ btr $16, %esi
12996+ mov %esi, %cr0
12997+#endif
12998+
6892158b
MT
12999+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
13000+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
15a11c5b
MT
13001+
13002+#ifdef CONFIG_PAX_KERNEXEC
13003+ bts $16, %esi
13004+ mov %esi, %cr0
13005+#endif
13006+
bc901d79
MT
13007 pushl_cfi $__ESPFIX_SS
13008 pushl_cfi %eax /* new kernel esp */
13009 /* Disable interrupts, but do not irqtrace this section: we
6e9df6a3 13010@@ -614,34 +825,28 @@ work_resched:
66a7e928
MT
13011 movl TI_flags(%ebp), %ecx
13012 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
13013 # than syscall tracing?
13014- jz restore_all
13015+ jz restore_all_pax
13016 testb $_TIF_NEED_RESCHED, %cl
13017 jnz work_resched
58c5fc13
MT
13018
13019 work_notifysig: # deal with pending signals and
13020 # notify-resume requests
13021+ movl %esp, %eax
13022 #ifdef CONFIG_VM86
13023 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
13024- movl %esp, %eax
13025- jne work_notifysig_v86 # returning to kernel-space or
13026+ jz 1f # returning to kernel-space or
13027 # vm86-space
13028- xorl %edx, %edx
13029- call do_notify_resume
13030- jmp resume_userspace_sig
13031
13032- ALIGN
13033-work_notifysig_v86:
bc901d79 13034 pushl_cfi %ecx # save ti_flags for do_notify_resume
58c5fc13 13035 call save_v86_state # %eax contains pt_regs pointer
bc901d79 13036 popl_cfi %ecx
58c5fc13
MT
13037 movl %eax, %esp
13038-#else
13039- movl %esp, %eax
13040+1:
13041 #endif
13042 xorl %edx, %edx
13043 call do_notify_resume
6e9df6a3
MT
13044 jmp resume_userspace_sig
13045-END(work_pending)
13046+ENDPROC(work_pending)
13047
13048 # perform syscall exit tracing
13049 ALIGN
13050@@ -649,11 +854,14 @@ syscall_trace_entry:
66a7e928
MT
13051 movl $-ENOSYS,PT_EAX(%esp)
13052 movl %esp, %eax
13053 call syscall_trace_enter
13054+
13055+ pax_erase_kstack
13056+
13057 /* What it returned is what we'll actually use. */
13058 cmpl $(nr_syscalls), %eax
13059 jnae syscall_call
6e9df6a3
MT
13060 jmp syscall_exit
13061-END(syscall_trace_entry)
13062+ENDPROC(syscall_trace_entry)
13063
13064 # perform syscall exit tracing
13065 ALIGN
13066@@ -666,20 +874,24 @@ syscall_exit_work:
13067 movl %esp, %eax
13068 call syscall_trace_leave
13069 jmp resume_userspace
13070-END(syscall_exit_work)
13071+ENDPROC(syscall_exit_work)
13072 CFI_ENDPROC
58c5fc13
MT
13073
13074 RING0_INT_FRAME # can't unwind into user space anyway
13075 syscall_fault:
13076+#ifdef CONFIG_PAX_MEMORY_UDEREF
13077+ push %ss
13078+ pop %ds
13079+#endif
13080 GET_THREAD_INFO(%ebp)
13081 movl $-EFAULT,PT_EAX(%esp)
13082 jmp resume_userspace
6e9df6a3
MT
13083-END(syscall_fault)
13084+ENDPROC(syscall_fault)
13085
13086 syscall_badsys:
13087 movl $-ENOSYS,PT_EAX(%esp)
13088 jmp resume_userspace
13089-END(syscall_badsys)
13090+ENDPROC(syscall_badsys)
13091 CFI_ENDPROC
13092 /*
13093 * End of kprobes section
13094@@ -753,6 +965,36 @@ ptregs_clone:
bc901d79
MT
13095 CFI_ENDPROC
13096 ENDPROC(ptregs_clone)
13097
13098+ ALIGN;
13099+ENTRY(kernel_execve)
13100+ CFI_STARTPROC
13101+ pushl_cfi %ebp
13102+ sub $PT_OLDSS+4,%esp
13103+ pushl_cfi %edi
13104+ pushl_cfi %ecx
13105+ pushl_cfi %eax
13106+ lea 3*4(%esp),%edi
13107+ mov $PT_OLDSS/4+1,%ecx
13108+ xorl %eax,%eax
13109+ rep stosl
13110+ popl_cfi %eax
13111+ popl_cfi %ecx
13112+ popl_cfi %edi
13113+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
13114+ pushl_cfi %esp
13115+ call sys_execve
13116+ add $4,%esp
13117+ CFI_ADJUST_CFA_OFFSET -4
13118+ GET_THREAD_INFO(%ebp)
13119+ test %eax,%eax
13120+ jz syscall_exit
13121+ add $PT_OLDSS+4,%esp
13122+ CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
13123+ popl_cfi %ebp
13124+ ret
13125+ CFI_ENDPROC
13126+ENDPROC(kernel_execve)
13127+
13128 .macro FIXUP_ESPFIX_STACK
13129 /*
13130 * Switch back for ESPFIX stack to the normal zerobased stack
6e9df6a3 13131@@ -762,8 +1004,15 @@ ENDPROC(ptregs_clone)
58c5fc13
MT
13132 * normal stack and adjusts ESP with the matching offset.
13133 */
13134 /* fixup the stack */
6892158b
MT
13135- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
13136- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
58c5fc13
MT
13137+#ifdef CONFIG_SMP
13138+ movl PER_CPU_VAR(cpu_number), %ebx
13139+ shll $PAGE_SHIFT_asm, %ebx
13140+ addl $cpu_gdt_table, %ebx
13141+#else
13142+ movl $cpu_gdt_table, %ebx
13143+#endif
6892158b
MT
13144+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
13145+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
58c5fc13 13146 shl $16, %eax
6892158b 13147 addl %esp, %eax /* the adjusted stack pointer */
bc901d79 13148 pushl_cfi $__KERNEL_DS
6e9df6a3
MT
13149@@ -816,7 +1065,7 @@ vector=vector+1
13150 .endr
13151 2: jmp common_interrupt
13152 .endr
13153-END(irq_entries_start)
13154+ENDPROC(irq_entries_start)
13155
13156 .previous
13157 END(interrupt)
13158@@ -864,7 +1113,7 @@ ENTRY(coprocessor_error)
13159 pushl_cfi $do_coprocessor_error
13160 jmp error_code
13161 CFI_ENDPROC
13162-END(coprocessor_error)
13163+ENDPROC(coprocessor_error)
13164
13165 ENTRY(simd_coprocessor_error)
13166 RING0_INT_FRAME
13167@@ -885,7 +1134,7 @@ ENTRY(simd_coprocessor_error)
13168 #endif
13169 jmp error_code
13170 CFI_ENDPROC
13171-END(simd_coprocessor_error)
13172+ENDPROC(simd_coprocessor_error)
13173
13174 ENTRY(device_not_available)
13175 RING0_INT_FRAME
13176@@ -893,7 +1142,7 @@ ENTRY(device_not_available)
13177 pushl_cfi $do_device_not_available
13178 jmp error_code
13179 CFI_ENDPROC
13180-END(device_not_available)
13181+ENDPROC(device_not_available)
13182
13183 #ifdef CONFIG_PARAVIRT
13184 ENTRY(native_iret)
13185@@ -902,12 +1151,12 @@ ENTRY(native_iret)
13186 .align 4
13187 .long native_iret, iret_exc
13188 .previous
13189-END(native_iret)
13190+ENDPROC(native_iret)
13191
13192 ENTRY(native_irq_enable_sysexit)
13193 sti
13194 sysexit
13195-END(native_irq_enable_sysexit)
13196+ENDPROC(native_irq_enable_sysexit)
13197 #endif
13198
13199 ENTRY(overflow)
13200@@ -916,7 +1165,7 @@ ENTRY(overflow)
13201 pushl_cfi $do_overflow
13202 jmp error_code
13203 CFI_ENDPROC
13204-END(overflow)
13205+ENDPROC(overflow)
13206
13207 ENTRY(bounds)
13208 RING0_INT_FRAME
13209@@ -924,7 +1173,7 @@ ENTRY(bounds)
13210 pushl_cfi $do_bounds
13211 jmp error_code
13212 CFI_ENDPROC
13213-END(bounds)
13214+ENDPROC(bounds)
13215
13216 ENTRY(invalid_op)
13217 RING0_INT_FRAME
13218@@ -932,7 +1181,7 @@ ENTRY(invalid_op)
13219 pushl_cfi $do_invalid_op
13220 jmp error_code
13221 CFI_ENDPROC
13222-END(invalid_op)
13223+ENDPROC(invalid_op)
13224
13225 ENTRY(coprocessor_segment_overrun)
13226 RING0_INT_FRAME
13227@@ -940,35 +1189,35 @@ ENTRY(coprocessor_segment_overrun)
13228 pushl_cfi $do_coprocessor_segment_overrun
13229 jmp error_code
13230 CFI_ENDPROC
13231-END(coprocessor_segment_overrun)
13232+ENDPROC(coprocessor_segment_overrun)
13233
13234 ENTRY(invalid_TSS)
13235 RING0_EC_FRAME
13236 pushl_cfi $do_invalid_TSS
13237 jmp error_code
13238 CFI_ENDPROC
13239-END(invalid_TSS)
13240+ENDPROC(invalid_TSS)
13241
13242 ENTRY(segment_not_present)
13243 RING0_EC_FRAME
13244 pushl_cfi $do_segment_not_present
13245 jmp error_code
13246 CFI_ENDPROC
13247-END(segment_not_present)
13248+ENDPROC(segment_not_present)
13249
13250 ENTRY(stack_segment)
13251 RING0_EC_FRAME
13252 pushl_cfi $do_stack_segment
13253 jmp error_code
13254 CFI_ENDPROC
13255-END(stack_segment)
13256+ENDPROC(stack_segment)
13257
13258 ENTRY(alignment_check)
13259 RING0_EC_FRAME
13260 pushl_cfi $do_alignment_check
13261 jmp error_code
13262 CFI_ENDPROC
13263-END(alignment_check)
13264+ENDPROC(alignment_check)
13265
13266 ENTRY(divide_error)
13267 RING0_INT_FRAME
13268@@ -976,7 +1225,7 @@ ENTRY(divide_error)
13269 pushl_cfi $do_divide_error
13270 jmp error_code
13271 CFI_ENDPROC
13272-END(divide_error)
13273+ENDPROC(divide_error)
13274
13275 #ifdef CONFIG_X86_MCE
13276 ENTRY(machine_check)
13277@@ -985,7 +1234,7 @@ ENTRY(machine_check)
13278 pushl_cfi machine_check_vector
13279 jmp error_code
13280 CFI_ENDPROC
13281-END(machine_check)
13282+ENDPROC(machine_check)
13283 #endif
13284
13285 ENTRY(spurious_interrupt_bug)
13286@@ -994,7 +1243,7 @@ ENTRY(spurious_interrupt_bug)
13287 pushl_cfi $do_spurious_interrupt_bug
13288 jmp error_code
13289 CFI_ENDPROC
13290-END(spurious_interrupt_bug)
13291+ENDPROC(spurious_interrupt_bug)
13292 /*
13293 * End of kprobes section
13294 */
fe2de317 13295@@ -1109,7 +1358,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
6e9df6a3
MT
13296
13297 ENTRY(mcount)
13298 ret
13299-END(mcount)
13300+ENDPROC(mcount)
13301
13302 ENTRY(ftrace_caller)
13303 cmpl $0, function_trace_stop
13304@@ -1138,7 +1387,7 @@ ftrace_graph_call:
13305 .globl ftrace_stub
13306 ftrace_stub:
13307 ret
13308-END(ftrace_caller)
13309+ENDPROC(ftrace_caller)
13310
13311 #else /* ! CONFIG_DYNAMIC_FTRACE */
13312
13313@@ -1174,7 +1423,7 @@ trace:
13314 popl %ecx
13315 popl %eax
13316 jmp ftrace_stub
13317-END(mcount)
13318+ENDPROC(mcount)
13319 #endif /* CONFIG_DYNAMIC_FTRACE */
13320 #endif /* CONFIG_FUNCTION_TRACER */
13321
13322@@ -1195,7 +1444,7 @@ ENTRY(ftrace_graph_caller)
13323 popl %ecx
13324 popl %eax
13325 ret
13326-END(ftrace_graph_caller)
13327+ENDPROC(ftrace_graph_caller)
13328
13329 .globl return_to_handler
13330 return_to_handler:
13331@@ -1209,7 +1458,6 @@ return_to_handler:
ae4e228f 13332 jmp *%ecx
58c5fc13
MT
13333 #endif
13334
13335-.section .rodata,"a"
13336 #include "syscall_table_32.S"
13337
13338 syscall_table_size=(.-sys_call_table)
6e9df6a3 13339@@ -1255,15 +1503,18 @@ error_code:
58c5fc13
MT
13340 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
13341 REG_TO_PTGS %ecx
13342 SET_KERNEL_GS %ecx
13343- movl $(__USER_DS), %ecx
13344+ movl $(__KERNEL_DS), %ecx
13345 movl %ecx, %ds
13346 movl %ecx, %es
df50ba0c 13347+
8308f9c9 13348+ pax_enter_kernel
df50ba0c 13349+
58c5fc13 13350 TRACE_IRQS_OFF
df50ba0c
MT
13351 movl %esp,%eax # pt_regs pointer
13352 call *%edi
6e9df6a3
MT
13353 jmp ret_from_exception
13354 CFI_ENDPROC
13355-END(page_fault)
13356+ENDPROC(page_fault)
13357
13358 /*
13359 * Debug traps and NMI can happen at the one SYSENTER instruction
13360@@ -1305,7 +1556,7 @@ debug_stack_correct:
13361 call do_debug
13362 jmp ret_from_exception
13363 CFI_ENDPROC
13364-END(debug)
13365+ENDPROC(debug)
13366
13367 /*
13368 * NMI is doubly nasty. It can happen _while_ we're handling
13369@@ -1342,6 +1593,9 @@ nmi_stack_correct:
58c5fc13
MT
13370 xorl %edx,%edx # zero error code
13371 movl %esp,%eax # pt_regs pointer
13372 call do_nmi
13373+
8308f9c9 13374+ pax_exit_kernel
58c5fc13
MT
13375+
13376 jmp restore_all_notrace
13377 CFI_ENDPROC
13378
6e9df6a3 13379@@ -1378,12 +1632,15 @@ nmi_espfix_stack:
58c5fc13
MT
13380 FIXUP_ESPFIX_STACK # %eax == %esp
13381 xorl %edx,%edx # zero error code
13382 call do_nmi
13383+
8308f9c9 13384+ pax_exit_kernel
58c5fc13
MT
13385+
13386 RESTORE_REGS
13387 lss 12+4(%esp), %esp # back to espfix stack
13388 CFI_ADJUST_CFA_OFFSET -24
6e9df6a3
MT
13389 jmp irq_return
13390 CFI_ENDPROC
13391-END(nmi)
13392+ENDPROC(nmi)
13393
13394 ENTRY(int3)
13395 RING0_INT_FRAME
13396@@ -1395,14 +1652,14 @@ ENTRY(int3)
13397 call do_int3
13398 jmp ret_from_exception
13399 CFI_ENDPROC
13400-END(int3)
13401+ENDPROC(int3)
13402
13403 ENTRY(general_protection)
13404 RING0_EC_FRAME
13405 pushl_cfi $do_general_protection
13406 jmp error_code
13407 CFI_ENDPROC
13408-END(general_protection)
13409+ENDPROC(general_protection)
13410
13411 #ifdef CONFIG_KVM_GUEST
13412 ENTRY(async_page_fault)
13413@@ -1410,7 +1667,7 @@ ENTRY(async_page_fault)
13414 pushl_cfi $do_async_page_fault
13415 jmp error_code
13416 CFI_ENDPROC
13417-END(async_page_fault)
13418+ENDPROC(async_page_fault)
13419 #endif
13420
13421 /*
fe2de317
MT
13422diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
13423index 6419bb0..00440bf 100644
13424--- a/arch/x86/kernel/entry_64.S
13425+++ b/arch/x86/kernel/entry_64.S
6e9df6a3 13426@@ -55,6 +55,8 @@
ae4e228f
MT
13427 #include <asm/paravirt.h>
13428 #include <asm/ftrace.h>
13429 #include <asm/percpu.h>
13430+#include <asm/pgtable.h>
6e9df6a3 13431+#include <asm/alternative-asm.h>
ae4e228f
MT
13432
13433 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
13434 #include <linux/elf-em.h>
6e9df6a3
MT
13435@@ -68,8 +70,9 @@
13436 #ifdef CONFIG_FUNCTION_TRACER
13437 #ifdef CONFIG_DYNAMIC_FTRACE
13438 ENTRY(mcount)
13439+ pax_force_retaddr
13440 retq
13441-END(mcount)
13442+ENDPROC(mcount)
13443
13444 ENTRY(ftrace_caller)
13445 cmpl $0, function_trace_stop
13446@@ -92,8 +95,9 @@ GLOBAL(ftrace_graph_call)
13447 #endif
13448
13449 GLOBAL(ftrace_stub)
13450+ pax_force_retaddr
13451 retq
13452-END(ftrace_caller)
13453+ENDPROC(ftrace_caller)
13454
13455 #else /* ! CONFIG_DYNAMIC_FTRACE */
13456 ENTRY(mcount)
13457@@ -112,6 +116,7 @@ ENTRY(mcount)
13458 #endif
13459
13460 GLOBAL(ftrace_stub)
13461+ pax_force_retaddr
13462 retq
13463
13464 trace:
13465@@ -121,12 +126,13 @@ trace:
13466 movq 8(%rbp), %rsi
13467 subq $MCOUNT_INSN_SIZE, %rdi
13468
13469+ pax_force_fptr ftrace_trace_function
13470 call *ftrace_trace_function
13471
13472 MCOUNT_RESTORE_FRAME
13473
13474 jmp ftrace_stub
13475-END(mcount)
13476+ENDPROC(mcount)
13477 #endif /* CONFIG_DYNAMIC_FTRACE */
13478 #endif /* CONFIG_FUNCTION_TRACER */
13479
13480@@ -146,8 +152,9 @@ ENTRY(ftrace_graph_caller)
13481
13482 MCOUNT_RESTORE_FRAME
13483
13484+ pax_force_retaddr
13485 retq
13486-END(ftrace_graph_caller)
13487+ENDPROC(ftrace_graph_caller)
13488
13489 GLOBAL(return_to_handler)
13490 subq $24, %rsp
13491@@ -163,6 +170,7 @@ GLOBAL(return_to_handler)
13492 movq 8(%rsp), %rdx
13493 movq (%rsp), %rax
13494 addq $24, %rsp
13495+ pax_force_fptr %rdi
13496 jmp *%rdi
13497 #endif
13498
fe2de317 13499@@ -178,6 +186,282 @@ ENTRY(native_usergs_sysret64)
df50ba0c
MT
13500 ENDPROC(native_usergs_sysret64)
13501 #endif /* CONFIG_PARAVIRT */
13502
13503+ .macro ljmpq sel, off
8308f9c9 13504+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
df50ba0c
MT
13505+ .byte 0x48; ljmp *1234f(%rip)
13506+ .pushsection .rodata
13507+ .align 16
13508+ 1234: .quad \off; .word \sel
13509+ .popsection
13510+#else
66a7e928
MT
13511+ pushq $\sel
13512+ pushq $\off
df50ba0c
MT
13513+ lretq
13514+#endif
13515+ .endm
13516+
317566c1 13517+ .macro pax_enter_kernel
fe2de317 13518+ pax_set_fptr_mask
317566c1
MT
13519+#ifdef CONFIG_PAX_KERNEXEC
13520+ call pax_enter_kernel
13521+#endif
13522+ .endm
13523+
13524+ .macro pax_exit_kernel
13525+#ifdef CONFIG_PAX_KERNEXEC
13526+ call pax_exit_kernel
13527+#endif
13528+ .endm
df50ba0c
MT
13529+
13530+#ifdef CONFIG_PAX_KERNEXEC
317566c1 13531+ENTRY(pax_enter_kernel)
66a7e928 13532+ pushq %rdi
df50ba0c
MT
13533+
13534+#ifdef CONFIG_PARAVIRT
13535+ PV_SAVE_REGS(CLBR_RDI)
13536+#endif
13537+
13538+ GET_CR0_INTO_RDI
13539+ bts $16,%rdi
fe2de317 13540+ jnc 3f
df50ba0c
MT
13541+ mov %cs,%edi
13542+ cmp $__KERNEL_CS,%edi
fe2de317
MT
13543+ jnz 2f
13544+1:
df50ba0c
MT
13545+
13546+#ifdef CONFIG_PARAVIRT
13547+ PV_RESTORE_REGS(CLBR_RDI)
13548+#endif
13549+
66a7e928 13550+ popq %rdi
6e9df6a3 13551+ pax_force_retaddr
df50ba0c 13552+ retq
fe2de317
MT
13553+
13554+2: ljmpq __KERNEL_CS,1f
13555+3: ljmpq __KERNEXEC_KERNEL_CS,4f
13556+4: SET_RDI_INTO_CR0
13557+ jmp 1b
df50ba0c
MT
13558+ENDPROC(pax_enter_kernel)
13559+
13560+ENTRY(pax_exit_kernel)
66a7e928 13561+ pushq %rdi
df50ba0c
MT
13562+
13563+#ifdef CONFIG_PARAVIRT
13564+ PV_SAVE_REGS(CLBR_RDI)
13565+#endif
13566+
13567+ mov %cs,%rdi
13568+ cmp $__KERNEXEC_KERNEL_CS,%edi
fe2de317
MT
13569+ jz 2f
13570+1:
13571+
13572+#ifdef CONFIG_PARAVIRT
13573+ PV_RESTORE_REGS(CLBR_RDI);
13574+#endif
13575+
13576+ popq %rdi
13577+ pax_force_retaddr
13578+ retq
df50ba0c 13579+
fe2de317
MT
13580+2: GET_CR0_INTO_RDI
13581+ btr $16,%rdi
13582+ ljmpq __KERNEL_CS,3f
13583+3: SET_RDI_INTO_CR0
13584+ jmp 1b
df50ba0c
MT
13585+#ifdef CONFIG_PARAVIRT
13586+ PV_RESTORE_REGS(CLBR_RDI);
13587+#endif
13588+
66a7e928 13589+ popq %rdi
6e9df6a3 13590+ pax_force_retaddr
df50ba0c
MT
13591+ retq
13592+ENDPROC(pax_exit_kernel)
317566c1 13593+#endif
df50ba0c 13594+
317566c1 13595+ .macro pax_enter_kernel_user
fe2de317 13596+ pax_set_fptr_mask
317566c1
MT
13597+#ifdef CONFIG_PAX_MEMORY_UDEREF
13598+ call pax_enter_kernel_user
13599+#endif
13600+ .endm
df50ba0c 13601+
317566c1 13602+ .macro pax_exit_kernel_user
df50ba0c 13603+#ifdef CONFIG_PAX_MEMORY_UDEREF
317566c1
MT
13604+ call pax_exit_kernel_user
13605+#endif
71d190be
MT
13606+#ifdef CONFIG_PAX_RANDKSTACK
13607+ push %rax
13608+ call pax_randomize_kstack
13609+ pop %rax
13610+#endif
317566c1
MT
13611+ .endm
13612+
13613+#ifdef CONFIG_PAX_MEMORY_UDEREF
13614+ENTRY(pax_enter_kernel_user)
66a7e928
MT
13615+ pushq %rdi
13616+ pushq %rbx
df50ba0c
MT
13617+
13618+#ifdef CONFIG_PARAVIRT
13619+ PV_SAVE_REGS(CLBR_RDI)
13620+#endif
13621+
13622+ GET_CR3_INTO_RDI
13623+ mov %rdi,%rbx
13624+ add $__START_KERNEL_map,%rbx
13625+ sub phys_base(%rip),%rbx
13626+
13627+#ifdef CONFIG_PARAVIRT
66a7e928 13628+ pushq %rdi
df50ba0c
MT
13629+ cmpl $0, pv_info+PARAVIRT_enabled
13630+ jz 1f
13631+ i = 0
13632+ .rept USER_PGD_PTRS
13633+ mov i*8(%rbx),%rsi
13634+ mov $0,%sil
13635+ lea i*8(%rbx),%rdi
15a11c5b 13636+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
df50ba0c
MT
13637+ i = i + 1
13638+ .endr
13639+ jmp 2f
13640+1:
13641+#endif
13642+
13643+ i = 0
13644+ .rept USER_PGD_PTRS
13645+ movb $0,i*8(%rbx)
13646+ i = i + 1
13647+ .endr
13648+
13649+#ifdef CONFIG_PARAVIRT
66a7e928 13650+2: popq %rdi
df50ba0c
MT
13651+#endif
13652+ SET_RDI_INTO_CR3
13653+
13654+#ifdef CONFIG_PAX_KERNEXEC
13655+ GET_CR0_INTO_RDI
13656+ bts $16,%rdi
13657+ SET_RDI_INTO_CR0
13658+#endif
13659+
13660+#ifdef CONFIG_PARAVIRT
13661+ PV_RESTORE_REGS(CLBR_RDI)
13662+#endif
13663+
66a7e928
MT
13664+ popq %rbx
13665+ popq %rdi
6e9df6a3 13666+ pax_force_retaddr
df50ba0c
MT
13667+ retq
13668+ENDPROC(pax_enter_kernel_user)
13669+
13670+ENTRY(pax_exit_kernel_user)
df50ba0c
MT
13671+ push %rdi
13672+
13673+#ifdef CONFIG_PARAVIRT
66a7e928 13674+ pushq %rbx
df50ba0c
MT
13675+ PV_SAVE_REGS(CLBR_RDI)
13676+#endif
13677+
13678+#ifdef CONFIG_PAX_KERNEXEC
13679+ GET_CR0_INTO_RDI
13680+ btr $16,%rdi
13681+ SET_RDI_INTO_CR0
13682+#endif
13683+
13684+ GET_CR3_INTO_RDI
13685+ add $__START_KERNEL_map,%rdi
13686+ sub phys_base(%rip),%rdi
13687+
13688+#ifdef CONFIG_PARAVIRT
13689+ cmpl $0, pv_info+PARAVIRT_enabled
13690+ jz 1f
13691+ mov %rdi,%rbx
13692+ i = 0
13693+ .rept USER_PGD_PTRS
13694+ mov i*8(%rbx),%rsi
13695+ mov $0x67,%sil
13696+ lea i*8(%rbx),%rdi
15a11c5b 13697+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
df50ba0c
MT
13698+ i = i + 1
13699+ .endr
13700+ jmp 2f
13701+1:
13702+#endif
13703+
13704+ i = 0
13705+ .rept USER_PGD_PTRS
13706+ movb $0x67,i*8(%rdi)
13707+ i = i + 1
13708+ .endr
13709+
13710+#ifdef CONFIG_PARAVIRT
13711+2: PV_RESTORE_REGS(CLBR_RDI)
66a7e928 13712+ popq %rbx
df50ba0c
MT
13713+#endif
13714+
66a7e928 13715+ popq %rdi
6e9df6a3 13716+ pax_force_retaddr
df50ba0c
MT
13717+ retq
13718+ENDPROC(pax_exit_kernel_user)
66a7e928
MT
13719+#endif
13720+
6e9df6a3 13721+.macro pax_erase_kstack
66a7e928
MT
13722+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13723+ call pax_erase_kstack
13724+#endif
6e9df6a3 13725+.endm
66a7e928
MT
13726+
13727+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13728+/*
fe2de317 13729+ * r11: thread_info
66a7e928
MT
13730+ * rcx, rdx: can be clobbered
13731+ */
13732+ENTRY(pax_erase_kstack)
13733+ pushq %rdi
13734+ pushq %rax
fe2de317 13735+ pushq %r11
66a7e928 13736+
fe2de317
MT
13737+ GET_THREAD_INFO(%r11)
13738+ mov TI_lowest_stack(%r11), %rdi
66a7e928
MT
13739+ mov $-0xBEEF, %rax
13740+ std
13741+
13742+1: mov %edi, %ecx
13743+ and $THREAD_SIZE_asm - 1, %ecx
13744+ shr $3, %ecx
13745+ repne scasq
13746+ jecxz 2f
13747+
13748+ cmp $2*8, %ecx
13749+ jc 2f
13750+
13751+ mov $2*8, %ecx
13752+ repe scasq
13753+ jecxz 2f
13754+ jne 1b
13755+
13756+2: cld
13757+ mov %esp, %ecx
13758+ sub %edi, %ecx
15a11c5b
MT
13759+
13760+ cmp $THREAD_SIZE_asm, %rcx
13761+ jb 3f
13762+ ud2
13763+3:
13764+
66a7e928
MT
13765+ shr $3, %ecx
13766+ rep stosq
13767+
fe2de317 13768+ mov TI_task_thread_sp0(%r11), %rdi
66a7e928 13769+ sub $256, %rdi
fe2de317 13770+ mov %rdi, TI_lowest_stack(%r11)
66a7e928 13771+
fe2de317 13772+ popq %r11
66a7e928
MT
13773+ popq %rax
13774+ popq %rdi
6e9df6a3 13775+ pax_force_retaddr
66a7e928
MT
13776+ ret
13777+ENDPROC(pax_erase_kstack)
317566c1 13778+#endif
df50ba0c
MT
13779
13780 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
13781 #ifdef CONFIG_TRACE_IRQFLAGS
fe2de317
MT
13782@@ -231,8 +515,8 @@ ENDPROC(native_usergs_sysret64)
13783 .endm
13784
13785 .macro UNFAKE_STACK_FRAME
13786- addq $8*6, %rsp
13787- CFI_ADJUST_CFA_OFFSET -(6*8)
13788+ addq $8*6 + ARG_SKIP, %rsp
13789+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
13790 .endm
13791
13792 /*
13793@@ -319,7 +603,7 @@ ENDPROC(native_usergs_sysret64)
6e9df6a3
MT
13794 movq %rsp, %rsi
13795
13796 leaq -RBP(%rsp),%rdi /* arg1 for handler */
df50ba0c
MT
13797- testl $3, CS(%rdi)
13798+ testb $3, CS(%rdi)
13799 je 1f
13800 SWAPGS
13801 /*
fe2de317 13802@@ -350,9 +634,10 @@ ENTRY(save_rest)
6e9df6a3
MT
13803 movq_cfi r15, R15+16
13804 movq %r11, 8(%rsp) /* return address */
13805 FIXUP_TOP_OF_STACK %r11, 16
13806+ pax_force_retaddr
13807 ret
13808 CFI_ENDPROC
13809-END(save_rest)
13810+ENDPROC(save_rest)
13811
13812 /* save complete stack frame */
13813 .pushsection .kprobes.text, "ax"
fe2de317 13814@@ -381,9 +666,10 @@ ENTRY(save_paranoid)
6e9df6a3
MT
13815 js 1f /* negative -> in kernel */
13816 SWAPGS
13817 xorl %ebx,%ebx
13818-1: ret
fe2de317 13819+1: pax_force_retaddr_bts
6e9df6a3
MT
13820+ ret
13821 CFI_ENDPROC
13822-END(save_paranoid)
13823+ENDPROC(save_paranoid)
13824 .popsection
13825
13826 /*
fe2de317 13827@@ -405,7 +691,7 @@ ENTRY(ret_from_fork)
df50ba0c
MT
13828
13829 RESTORE_REST
13830
13831- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13832+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13833 je int_ret_from_sys_call
13834
13835 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
fe2de317 13836@@ -415,7 +701,7 @@ ENTRY(ret_from_fork)
6e9df6a3
MT
13837 jmp ret_from_sys_call # go to the SYSRET fastpath
13838
13839 CFI_ENDPROC
13840-END(ret_from_fork)
13841+ENDPROC(ret_from_fork)
13842
13843 /*
13844 * System call entry. Up to 6 arguments in registers are supported.
fe2de317 13845@@ -451,7 +737,7 @@ END(ret_from_fork)
71d190be
MT
13846 ENTRY(system_call)
13847 CFI_STARTPROC simple
13848 CFI_SIGNAL_FRAME
13849- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
13850+ CFI_DEF_CFA rsp,0
13851 CFI_REGISTER rip,rcx
13852 /*CFI_REGISTER rflags,r11*/
13853 SWAPGS_UNSAFE_STACK
fe2de317 13854@@ -464,12 +750,13 @@ ENTRY(system_call_after_swapgs)
df50ba0c
MT
13855
13856 movq %rsp,PER_CPU_VAR(old_rsp)
13857 movq PER_CPU_VAR(kernel_stack),%rsp
fe2de317 13858+ SAVE_ARGS 8*6,0
317566c1 13859+ pax_enter_kernel_user
df50ba0c
MT
13860 /*
13861 * No need to follow this irqs off/on section - it's straight
13862 * and short:
71d190be
MT
13863 */
13864 ENABLE_INTERRUPTS(CLBR_NONE)
6e9df6a3 13865- SAVE_ARGS 8,0
71d190be
MT
13866 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
13867 movq %rcx,RIP-ARGOFFSET(%rsp)
13868 CFI_REL_OFFSET rip,RIP-ARGOFFSET
fe2de317
MT
13869@@ -479,7 +766,7 @@ ENTRY(system_call_after_swapgs)
13870 system_call_fastpath:
13871 cmpq $__NR_syscall_max,%rax
13872 ja badsys
13873- movq %r10,%rcx
13874+ movq R10-ARGOFFSET(%rsp),%rcx
13875 call *sys_call_table(,%rax,8) # XXX: rip relative
13876 movq %rax,RAX-ARGOFFSET(%rsp)
13877 /*
13878@@ -498,6 +785,8 @@ sysret_check:
df50ba0c
MT
13879 andl %edi,%edx
13880 jnz sysret_careful
13881 CFI_REMEMBER_STATE
317566c1 13882+ pax_exit_kernel_user
15a11c5b 13883+ pax_erase_kstack
df50ba0c
MT
13884 /*
13885 * sysretq will re-enable interrupts:
13886 */
fe2de317
MT
13887@@ -549,14 +838,18 @@ badsys:
13888 * jump back to the normal fast path.
13889 */
13890 auditsys:
13891- movq %r10,%r9 /* 6th arg: 4th syscall arg */
13892+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
13893 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
13894 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
13895 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
66a7e928
MT
13896 movq %rax,%rsi /* 2nd arg: syscall number */
13897 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
13898 call audit_syscall_entry
13899+
13900+ pax_erase_kstack
13901+
13902 LOAD_ARGS 0 /* reload call-clobbered registers */
fe2de317 13903+ pax_set_fptr_mask
66a7e928
MT
13904 jmp system_call_fastpath
13905
fe2de317
MT
13906 /*
13907@@ -586,16 +879,20 @@ tracesys:
66a7e928
MT
13908 FIXUP_TOP_OF_STACK %rdi
13909 movq %rsp,%rdi
13910 call syscall_trace_enter
13911+
13912+ pax_erase_kstack
13913+
13914 /*
13915 * Reload arg registers from stack in case ptrace changed them.
13916 * We don't reload %rax because syscall_trace_enter() returned
fe2de317
MT
13917 * the value it wants us to use in the table lookup.
13918 */
13919 LOAD_ARGS ARGOFFSET, 1
13920+ pax_set_fptr_mask
13921 RESTORE_REST
13922 cmpq $__NR_syscall_max,%rax
13923 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
13924- movq %r10,%rcx /* fixup for C */
13925+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
13926 call *sys_call_table(,%rax,8)
13927 movq %rax,RAX-ARGOFFSET(%rsp)
13928 /* Use IRET because user could have changed frame */
13929@@ -607,7 +904,7 @@ tracesys:
df50ba0c
MT
13930 GLOBAL(int_ret_from_sys_call)
13931 DISABLE_INTERRUPTS(CLBR_NONE)
13932 TRACE_IRQS_OFF
13933- testl $3,CS-ARGOFFSET(%rsp)
13934+ testb $3,CS-ARGOFFSET(%rsp)
13935 je retint_restore_args
13936 movl $_TIF_ALLWORK_MASK,%edi
13937 /* edi: mask to check */
fe2de317 13938@@ -664,7 +961,7 @@ int_restore_rest:
6e9df6a3
MT
13939 TRACE_IRQS_OFF
13940 jmp int_with_check
13941 CFI_ENDPROC
13942-END(system_call)
13943+ENDPROC(system_call)
13944
13945 /*
13946 * Certain special system calls that need to save a complete full stack frame.
fe2de317 13947@@ -680,7 +977,7 @@ ENTRY(\label)
6e9df6a3
MT
13948 call \func
13949 jmp ptregscall_common
13950 CFI_ENDPROC
13951-END(\label)
13952+ENDPROC(\label)
13953 .endm
13954
13955 PTREGSCALL stub_clone, sys_clone, %r8
fe2de317 13956@@ -698,9 +995,10 @@ ENTRY(ptregscall_common)
6e9df6a3
MT
13957 movq_cfi_restore R12+8, r12
13958 movq_cfi_restore RBP+8, rbp
13959 movq_cfi_restore RBX+8, rbx
13960+ pax_force_retaddr
13961 ret $REST_SKIP /* pop extended registers */
13962 CFI_ENDPROC
13963-END(ptregscall_common)
13964+ENDPROC(ptregscall_common)
13965
13966 ENTRY(stub_execve)
13967 CFI_STARTPROC
fe2de317 13968@@ -715,7 +1013,7 @@ ENTRY(stub_execve)
6e9df6a3
MT
13969 RESTORE_REST
13970 jmp int_ret_from_sys_call
13971 CFI_ENDPROC
13972-END(stub_execve)
13973+ENDPROC(stub_execve)
13974
13975 /*
13976 * sigreturn is special because it needs to restore all registers on return.
fe2de317 13977@@ -733,7 +1031,7 @@ ENTRY(stub_rt_sigreturn)
6e9df6a3
MT
13978 RESTORE_REST
13979 jmp int_ret_from_sys_call
13980 CFI_ENDPROC
13981-END(stub_rt_sigreturn)
13982+ENDPROC(stub_rt_sigreturn)
13983
13984 /*
13985 * Build the entry stubs and pointer table with some assembler magic.
fe2de317 13986@@ -768,7 +1066,7 @@ vector=vector+1
6e9df6a3
MT
13987 2: jmp common_interrupt
13988 .endr
13989 CFI_ENDPROC
13990-END(irq_entries_start)
13991+ENDPROC(irq_entries_start)
13992
13993 .previous
13994 END(interrupt)
fe2de317 13995@@ -789,6 +1087,16 @@ END(interrupt)
16454cff 13996 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
6e9df6a3 13997 SAVE_ARGS_IRQ
ae4e228f 13998 PARTIAL_FRAME 0
df50ba0c
MT
13999+#ifdef CONFIG_PAX_MEMORY_UDEREF
14000+ testb $3, CS(%rdi)
14001+ jnz 1f
317566c1 14002+ pax_enter_kernel
df50ba0c 14003+ jmp 2f
317566c1 14004+1: pax_enter_kernel_user
df50ba0c
MT
14005+2:
14006+#else
317566c1 14007+ pax_enter_kernel
df50ba0c 14008+#endif
ae4e228f
MT
14009 call \func
14010 .endm
14011
fe2de317 14012@@ -820,7 +1128,7 @@ ret_from_intr:
6e9df6a3 14013
ae4e228f 14014 exit_intr:
ae4e228f 14015 GET_THREAD_INFO(%rcx)
df50ba0c
MT
14016- testl $3,CS-ARGOFFSET(%rsp)
14017+ testb $3,CS-ARGOFFSET(%rsp)
ae4e228f 14018 je retint_kernel
df50ba0c
MT
14019
14020 /* Interrupt came from user space */
fe2de317 14021@@ -842,12 +1150,16 @@ retint_swapgs: /* return to user-space */
df50ba0c
MT
14022 * The iretq could re-enable interrupts:
14023 */
14024 DISABLE_INTERRUPTS(CLBR_ANY)
317566c1 14025+ pax_exit_kernel_user
15a11c5b 14026+ pax_erase_kstack
df50ba0c
MT
14027 TRACE_IRQS_IRETQ
14028 SWAPGS
14029 jmp restore_args
14030
14031 retint_restore_args: /* return to kernel space */
14032 DISABLE_INTERRUPTS(CLBR_ANY)
317566c1 14033+ pax_exit_kernel
6e9df6a3 14034+ pax_force_retaddr RIP-ARGOFFSET
df50ba0c
MT
14035 /*
14036 * The iretq could re-enable interrupts:
14037 */
fe2de317 14038@@ -936,7 +1248,7 @@ ENTRY(retint_kernel)
6e9df6a3
MT
14039 #endif
14040
14041 CFI_ENDPROC
14042-END(common_interrupt)
14043+ENDPROC(common_interrupt)
14044 /*
14045 * End of kprobes section
14046 */
fe2de317 14047@@ -952,7 +1264,7 @@ ENTRY(\sym)
6e9df6a3
MT
14048 interrupt \do_sym
14049 jmp ret_from_intr
14050 CFI_ENDPROC
14051-END(\sym)
14052+ENDPROC(\sym)
14053 .endm
14054
14055 #ifdef CONFIG_SMP
fe2de317 14056@@ -1017,12 +1329,22 @@ ENTRY(\sym)
bc901d79 14057 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
ae4e228f
MT
14058 call error_entry
14059 DEFAULT_FRAME 0
df50ba0c
MT
14060+#ifdef CONFIG_PAX_MEMORY_UDEREF
14061+ testb $3, CS(%rsp)
14062+ jnz 1f
317566c1 14063+ pax_enter_kernel
df50ba0c 14064+ jmp 2f
317566c1 14065+1: pax_enter_kernel_user
df50ba0c
MT
14066+2:
14067+#else
317566c1 14068+ pax_enter_kernel
df50ba0c 14069+#endif
ae4e228f
MT
14070 movq %rsp,%rdi /* pt_regs pointer */
14071 xorl %esi,%esi /* no error code */
14072 call \do_sym
6e9df6a3
MT
14073 jmp error_exit /* %ebx: no swapgs flag */
14074 CFI_ENDPROC
14075-END(\sym)
14076+ENDPROC(\sym)
14077 .endm
14078
14079 .macro paranoidzeroentry sym do_sym
fe2de317 14080@@ -1034,15 +1356,25 @@ ENTRY(\sym)
bc901d79 14081 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
ae4e228f
MT
14082 call save_paranoid
14083 TRACE_IRQS_OFF
df50ba0c
MT
14084+#ifdef CONFIG_PAX_MEMORY_UDEREF
14085+ testb $3, CS(%rsp)
14086+ jnz 1f
317566c1 14087+ pax_enter_kernel
df50ba0c 14088+ jmp 2f
317566c1 14089+1: pax_enter_kernel_user
df50ba0c
MT
14090+2:
14091+#else
317566c1 14092+ pax_enter_kernel
df50ba0c 14093+#endif
ae4e228f
MT
14094 movq %rsp,%rdi /* pt_regs pointer */
14095 xorl %esi,%esi /* no error code */
14096 call \do_sym
6e9df6a3
MT
14097 jmp paranoid_exit /* %ebx: no swapgs flag */
14098 CFI_ENDPROC
14099-END(\sym)
14100+ENDPROC(\sym)
6892158b
MT
14101 .endm
14102
14103-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
14104+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
14105 .macro paranoidzeroentry_ist sym do_sym ist
14106 ENTRY(\sym)
14107 INTR_FRAME
fe2de317 14108@@ -1052,14 +1384,30 @@ ENTRY(\sym)
bc901d79 14109 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
ae4e228f 14110 call save_paranoid
58c5fc13 14111 TRACE_IRQS_OFF
df50ba0c
MT
14112+#ifdef CONFIG_PAX_MEMORY_UDEREF
14113+ testb $3, CS(%rsp)
14114+ jnz 1f
317566c1 14115+ pax_enter_kernel
df50ba0c 14116+ jmp 2f
317566c1 14117+1: pax_enter_kernel_user
df50ba0c
MT
14118+2:
14119+#else
317566c1 14120+ pax_enter_kernel
df50ba0c 14121+#endif
58c5fc13
MT
14122 movq %rsp,%rdi /* pt_regs pointer */
14123 xorl %esi,%esi /* no error code */
58c5fc13 14124+#ifdef CONFIG_SMP
ae4e228f
MT
14125+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
14126+ lea init_tss(%r12), %r12
58c5fc13 14127+#else
ae4e228f 14128+ lea init_tss(%rip), %r12
58c5fc13 14129+#endif
6892158b 14130 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
58c5fc13 14131 call \do_sym
6892158b 14132 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
6e9df6a3
MT
14133 jmp paranoid_exit /* %ebx: no swapgs flag */
14134 CFI_ENDPROC
14135-END(\sym)
14136+ENDPROC(\sym)
14137 .endm
14138
14139 .macro errorentry sym do_sym
fe2de317 14140@@ -1070,13 +1418,23 @@ ENTRY(\sym)
bc901d79 14141 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
ae4e228f
MT
14142 call error_entry
14143 DEFAULT_FRAME 0
df50ba0c
MT
14144+#ifdef CONFIG_PAX_MEMORY_UDEREF
14145+ testb $3, CS(%rsp)
14146+ jnz 1f
317566c1 14147+ pax_enter_kernel
df50ba0c 14148+ jmp 2f
317566c1 14149+1: pax_enter_kernel_user
df50ba0c
MT
14150+2:
14151+#else
317566c1 14152+ pax_enter_kernel
df50ba0c 14153+#endif
ae4e228f
MT
14154 movq %rsp,%rdi /* pt_regs pointer */
14155 movq ORIG_RAX(%rsp),%rsi /* get error code */
14156 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
6e9df6a3
MT
14157 call \do_sym
14158 jmp error_exit /* %ebx: no swapgs flag */
14159 CFI_ENDPROC
14160-END(\sym)
14161+ENDPROC(\sym)
14162 .endm
14163
14164 /* error code is on the stack already */
fe2de317 14165@@ -1089,13 +1447,23 @@ ENTRY(\sym)
ae4e228f
MT
14166 call save_paranoid
14167 DEFAULT_FRAME 0
14168 TRACE_IRQS_OFF
df50ba0c
MT
14169+#ifdef CONFIG_PAX_MEMORY_UDEREF
14170+ testb $3, CS(%rsp)
14171+ jnz 1f
317566c1 14172+ pax_enter_kernel
df50ba0c 14173+ jmp 2f
317566c1 14174+1: pax_enter_kernel_user
df50ba0c
MT
14175+2:
14176+#else
317566c1 14177+ pax_enter_kernel
df50ba0c 14178+#endif
ae4e228f
MT
14179 movq %rsp,%rdi /* pt_regs pointer */
14180 movq ORIG_RAX(%rsp),%rsi /* get error code */
14181 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
6e9df6a3
MT
14182 call \do_sym
14183 jmp paranoid_exit /* %ebx: no swapgs flag */
14184 CFI_ENDPROC
14185-END(\sym)
14186+ENDPROC(\sym)
14187 .endm
14188
14189 zeroentry divide_error do_divide_error
fe2de317 14190@@ -1125,9 +1493,10 @@ gs_change:
6e9df6a3
MT
14191 2: mfence /* workaround */
14192 SWAPGS
14193 popfq_cfi
14194+ pax_force_retaddr
14195 ret
14196 CFI_ENDPROC
14197-END(native_load_gs_index)
14198+ENDPROC(native_load_gs_index)
14199
14200 .section __ex_table,"a"
14201 .align 8
fe2de317 14202@@ -1149,13 +1518,14 @@ ENTRY(kernel_thread_helper)
6e9df6a3
MT
14203 * Here we are in the child and the registers are set as they were
14204 * at kernel_thread() invocation in the parent.
14205 */
14206+ pax_force_fptr %rsi
14207 call *%rsi
14208 # exit
14209 mov %eax, %edi
14210 call do_exit
14211 ud2 # padding for call trace
14212 CFI_ENDPROC
14213-END(kernel_thread_helper)
14214+ENDPROC(kernel_thread_helper)
14215
14216 /*
14217 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
fe2de317
MT
14218@@ -1182,11 +1552,11 @@ ENTRY(kernel_execve)
14219 RESTORE_REST
14220 testq %rax,%rax
6e9df6a3 14221 je int_ret_from_sys_call
fe2de317 14222- RESTORE_ARGS
6e9df6a3
MT
14223 UNFAKE_STACK_FRAME
14224+ pax_force_retaddr
14225 ret
14226 CFI_ENDPROC
14227-END(kernel_execve)
14228+ENDPROC(kernel_execve)
14229
14230 /* Call softirq on interrupt stack. Interrupts are off. */
14231 ENTRY(call_softirq)
fe2de317 14232@@ -1204,9 +1574,10 @@ ENTRY(call_softirq)
6e9df6a3
MT
14233 CFI_DEF_CFA_REGISTER rsp
14234 CFI_ADJUST_CFA_OFFSET -8
14235 decl PER_CPU_VAR(irq_count)
14236+ pax_force_retaddr
14237 ret
14238 CFI_ENDPROC
14239-END(call_softirq)
14240+ENDPROC(call_softirq)
14241
14242 #ifdef CONFIG_XEN
14243 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
fe2de317 14244@@ -1244,7 +1615,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
6e9df6a3
MT
14245 decl PER_CPU_VAR(irq_count)
14246 jmp error_exit
14247 CFI_ENDPROC
14248-END(xen_do_hypervisor_callback)
14249+ENDPROC(xen_do_hypervisor_callback)
14250
14251 /*
14252 * Hypervisor uses this for application faults while it executes.
fe2de317 14253@@ -1303,7 +1674,7 @@ ENTRY(xen_failsafe_callback)
6e9df6a3
MT
14254 SAVE_ALL
14255 jmp error_exit
14256 CFI_ENDPROC
14257-END(xen_failsafe_callback)
14258+ENDPROC(xen_failsafe_callback)
14259
14260 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
14261 xen_hvm_callback_vector xen_evtchn_do_upcall
fe2de317 14262@@ -1352,16 +1723,31 @@ ENTRY(paranoid_exit)
df50ba0c
MT
14263 TRACE_IRQS_OFF
14264 testl %ebx,%ebx /* swapgs needed? */
14265 jnz paranoid_restore
14266- testl $3,CS(%rsp)
14267+ testb $3,CS(%rsp)
ae4e228f 14268 jnz paranoid_userspace
df50ba0c 14269+#ifdef CONFIG_PAX_MEMORY_UDEREF
317566c1 14270+ pax_exit_kernel
df50ba0c
MT
14271+ TRACE_IRQS_IRETQ 0
14272+ SWAPGS_UNSAFE_STACK
14273+ RESTORE_ALL 8
fe2de317 14274+ pax_force_retaddr_bts
df50ba0c
MT
14275+ jmp irq_return
14276+#endif
ae4e228f 14277 paranoid_swapgs:
df50ba0c 14278+#ifdef CONFIG_PAX_MEMORY_UDEREF
317566c1 14279+ pax_exit_kernel_user
df50ba0c 14280+#else
317566c1 14281+ pax_exit_kernel
df50ba0c 14282+#endif
ae4e228f
MT
14283 TRACE_IRQS_IRETQ 0
14284 SWAPGS_UNSAFE_STACK
14285 RESTORE_ALL 8
14286 jmp irq_return
14287 paranoid_restore:
317566c1 14288+ pax_exit_kernel
ae4e228f
MT
14289 TRACE_IRQS_IRETQ 0
14290 RESTORE_ALL 8
fe2de317 14291+ pax_force_retaddr_bts
ae4e228f 14292 jmp irq_return
15a11c5b
MT
14293 paranoid_userspace:
14294 GET_THREAD_INFO(%rcx)
fe2de317 14295@@ -1390,7 +1776,7 @@ paranoid_schedule:
6e9df6a3
MT
14296 TRACE_IRQS_OFF
14297 jmp paranoid_userspace
14298 CFI_ENDPROC
14299-END(paranoid_exit)
14300+ENDPROC(paranoid_exit)
14301
14302 /*
14303 * Exception entry point. This expects an error code/orig_rax on the stack.
fe2de317 14304@@ -1417,12 +1803,13 @@ ENTRY(error_entry)
df50ba0c
MT
14305 movq_cfi r14, R14+8
14306 movq_cfi r15, R15+8
14307 xorl %ebx,%ebx
14308- testl $3,CS+8(%rsp)
14309+ testb $3,CS+8(%rsp)
14310 je error_kernelspace
14311 error_swapgs:
14312 SWAPGS
6e9df6a3
MT
14313 error_sti:
14314 TRACE_IRQS_OFF
fe2de317 14315+ pax_force_retaddr_bts
6e9df6a3
MT
14316 ret
14317
14318 /*
fe2de317 14319@@ -1449,7 +1836,7 @@ bstep_iret:
6e9df6a3
MT
14320 movq %rcx,RIP+8(%rsp)
14321 jmp error_swapgs
14322 CFI_ENDPROC
14323-END(error_entry)
14324+ENDPROC(error_entry)
14325
14326
14327 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
fe2de317 14328@@ -1469,7 +1856,7 @@ ENTRY(error_exit)
6e9df6a3
MT
14329 jnz retint_careful
14330 jmp retint_swapgs
14331 CFI_ENDPROC
14332-END(error_exit)
14333+ENDPROC(error_exit)
14334
14335
14336 /* runs on exception stack */
fe2de317 14337@@ -1481,6 +1868,16 @@ ENTRY(nmi)
bc901d79 14338 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
ae4e228f
MT
14339 call save_paranoid
14340 DEFAULT_FRAME 0
df50ba0c
MT
14341+#ifdef CONFIG_PAX_MEMORY_UDEREF
14342+ testb $3, CS(%rsp)
14343+ jnz 1f
317566c1 14344+ pax_enter_kernel
df50ba0c 14345+ jmp 2f
317566c1 14346+1: pax_enter_kernel_user
df50ba0c
MT
14347+2:
14348+#else
317566c1 14349+ pax_enter_kernel
df50ba0c 14350+#endif
ae4e228f
MT
14351 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
14352 movq %rsp,%rdi
14353 movq $-1,%rsi
fe2de317 14354@@ -1491,12 +1888,28 @@ ENTRY(nmi)
df50ba0c
MT
14355 DISABLE_INTERRUPTS(CLBR_NONE)
14356 testl %ebx,%ebx /* swapgs needed? */
14357 jnz nmi_restore
14358- testl $3,CS(%rsp)
14359+ testb $3,CS(%rsp)
14360 jnz nmi_userspace
317566c1
MT
14361+#ifdef CONFIG_PAX_MEMORY_UDEREF
14362+ pax_exit_kernel
14363+ SWAPGS_UNSAFE_STACK
14364+ RESTORE_ALL 8
fe2de317 14365+ pax_force_retaddr_bts
317566c1
MT
14366+ jmp irq_return
14367+#endif
ae4e228f 14368 nmi_swapgs:
317566c1
MT
14369+#ifdef CONFIG_PAX_MEMORY_UDEREF
14370+ pax_exit_kernel_user
14371+#else
14372+ pax_exit_kernel
14373+#endif
ae4e228f 14374 SWAPGS_UNSAFE_STACK
317566c1
MT
14375+ RESTORE_ALL 8
14376+ jmp irq_return
ae4e228f 14377 nmi_restore:
317566c1 14378+ pax_exit_kernel
ae4e228f 14379 RESTORE_ALL 8
fe2de317 14380+ pax_force_retaddr_bts
ae4e228f
MT
14381 jmp irq_return
14382 nmi_userspace:
15a11c5b 14383 GET_THREAD_INFO(%rcx)
fe2de317 14384@@ -1525,14 +1938,14 @@ nmi_schedule:
6e9df6a3
MT
14385 jmp paranoid_exit
14386 CFI_ENDPROC
14387 #endif
14388-END(nmi)
14389+ENDPROC(nmi)
14390
14391 ENTRY(ignore_sysret)
14392 CFI_STARTPROC
14393 mov $-ENOSYS,%eax
14394 sysret
14395 CFI_ENDPROC
14396-END(ignore_sysret)
14397+ENDPROC(ignore_sysret)
14398
14399 /*
14400 * End of kprobes section
fe2de317
MT
14401diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
14402index c9a281f..ce2f317 100644
14403--- a/arch/x86/kernel/ftrace.c
14404+++ b/arch/x86/kernel/ftrace.c
14405@@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the IP to write to */
15a11c5b 14406 static const void *mod_code_newcode; /* holds the text to write to the IP */
8308f9c9
MT
14407
14408 static unsigned nmi_wait_count;
14409-static atomic_t nmi_update_count = ATOMIC_INIT(0);
14410+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
14411
14412 int ftrace_arch_read_dyn_info(char *buf, int size)
14413 {
fe2de317 14414@@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
8308f9c9
MT
14415
14416 r = snprintf(buf, size, "%u %u",
14417 nmi_wait_count,
14418- atomic_read(&nmi_update_count));
14419+ atomic_read_unchecked(&nmi_update_count));
14420 return r;
14421 }
14422
14423@@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
df50ba0c 14424
ae4e228f
MT
14425 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
14426 smp_rmb();
14427+ pax_open_kernel();
14428 ftrace_mod_code();
8308f9c9 14429- atomic_inc(&nmi_update_count);
ae4e228f 14430+ pax_close_kernel();
8308f9c9 14431+ atomic_inc_unchecked(&nmi_update_count);
ae4e228f
MT
14432 }
14433 /* Must have previous changes seen before executions */
8308f9c9 14434 smp_mb();
fe2de317 14435@@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
ae4e228f
MT
14436 {
14437 unsigned char replaced[MCOUNT_INSN_SIZE];
14438
14439+ ip = ktla_ktva(ip);
14440+
14441 /*
14442 * Note: Due to modules and __init, code can
14443 * disappear and change, we need to protect against faulting
fe2de317 14444@@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
58c5fc13
MT
14445 unsigned char old[MCOUNT_INSN_SIZE], *new;
14446 int ret;
14447
14448- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
14449+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
14450 new = ftrace_call_replace(ip, (unsigned long)func);
ae4e228f
MT
14451 ret = ftrace_modify_code(ip, old, new);
14452
fe2de317 14453@@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long ip,
ae4e228f
MT
14454 {
14455 unsigned char code[MCOUNT_INSN_SIZE];
14456
14457+ ip = ktla_ktva(ip);
14458+
14459 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
14460 return -EFAULT;
14461
fe2de317
MT
14462diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
14463index 3bb0850..55a56f4 100644
14464--- a/arch/x86/kernel/head32.c
14465+++ b/arch/x86/kernel/head32.c
bc901d79 14466@@ -19,6 +19,7 @@
ae4e228f 14467 #include <asm/io_apic.h>
58c5fc13 14468 #include <asm/bios_ebda.h>
bc901d79 14469 #include <asm/tlbflush.h>
58c5fc13
MT
14470+#include <asm/boot.h>
14471
ae4e228f 14472 static void __init i386_default_early_setup(void)
58c5fc13 14473 {
15a11c5b 14474@@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
66a7e928
MT
14475 {
14476 memblock_init();
58c5fc13 14477
bc901d79
MT
14478- memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14479+ memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
58c5fc13
MT
14480
14481 #ifdef CONFIG_BLK_DEV_INITRD
14482 /* Reserve INITRD */
fe2de317
MT
14483diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
14484index ce0be7c..c41476e 100644
14485--- a/arch/x86/kernel/head_32.S
14486+++ b/arch/x86/kernel/head_32.S
df50ba0c 14487@@ -25,6 +25,12 @@
58c5fc13
MT
14488 /* Physical address */
14489 #define pa(X) ((X) - __PAGE_OFFSET)
ae4e228f
MT
14490
14491+#ifdef CONFIG_PAX_KERNEXEC
14492+#define ta(X) (X)
14493+#else
14494+#define ta(X) ((X) - __PAGE_OFFSET)
14495+#endif
14496+
14497 /*
14498 * References to members of the new_cpu_data structure.
14499 */
df50ba0c 14500@@ -54,11 +60,7 @@
58c5fc13
MT
14501 * and small than max_low_pfn, otherwise will waste some page table entries
14502 */
14503
14504-#if PTRS_PER_PMD > 1
14505-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
14506-#else
14507-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
14508-#endif
14509+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
14510
bc901d79
MT
14511 /* Number of possible pages in the lowmem region */
14512 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
fe2de317 14513@@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
58c5fc13
MT
14514 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14515
14516 /*
14517+ * Real beginning of normal "text" segment
14518+ */
14519+ENTRY(stext)
14520+ENTRY(_stext)
14521+
14522+/*
14523 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
14524 * %esi points to the real-mode code as a 32-bit pointer.
14525 * CS and DS must be 4 GB flat segments, but we don't depend on
bc901d79 14526@@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
58c5fc13
MT
14527 * can.
14528 */
ae4e228f 14529 __HEAD
58c5fc13
MT
14530+
14531+#ifdef CONFIG_PAX_KERNEXEC
14532+ jmp startup_32
14533+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
14534+.fill PAGE_SIZE-5,1,0xcc
14535+#endif
14536+
14537 ENTRY(startup_32)
16454cff
MT
14538 movl pa(stack_start),%ecx
14539
14540@@ -105,6 +120,57 @@ ENTRY(startup_32)
58c5fc13 14541 2:
16454cff 14542 leal -__PAGE_OFFSET(%ecx),%esp
58c5fc13
MT
14543
14544+#ifdef CONFIG_SMP
14545+ movl $pa(cpu_gdt_table),%edi
14546+ movl $__per_cpu_load,%eax
14547+ movw %ax,__KERNEL_PERCPU + 2(%edi)
14548+ rorl $16,%eax
14549+ movb %al,__KERNEL_PERCPU + 4(%edi)
14550+ movb %ah,__KERNEL_PERCPU + 7(%edi)
14551+ movl $__per_cpu_end - 1,%eax
ae4e228f 14552+ subl $__per_cpu_start,%eax
58c5fc13
MT
14553+ movw %ax,__KERNEL_PERCPU + 0(%edi)
14554+#endif
14555+
14556+#ifdef CONFIG_PAX_MEMORY_UDEREF
14557+ movl $NR_CPUS,%ecx
14558+ movl $pa(cpu_gdt_table),%edi
14559+1:
14560+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
bc901d79
MT
14561+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
14562+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
58c5fc13
MT
14563+ addl $PAGE_SIZE_asm,%edi
14564+ loop 1b
14565+#endif
14566+
14567+#ifdef CONFIG_PAX_KERNEXEC
14568+ movl $pa(boot_gdt),%edi
ae4e228f 14569+ movl $__LOAD_PHYSICAL_ADDR,%eax
58c5fc13
MT
14570+ movw %ax,__BOOT_CS + 2(%edi)
14571+ rorl $16,%eax
14572+ movb %al,__BOOT_CS + 4(%edi)
14573+ movb %ah,__BOOT_CS + 7(%edi)
14574+ rorl $16,%eax
14575+
ae4e228f
MT
14576+ ljmp $(__BOOT_CS),$1f
14577+1:
14578+
58c5fc13
MT
14579+ movl $NR_CPUS,%ecx
14580+ movl $pa(cpu_gdt_table),%edi
ae4e228f 14581+ addl $__PAGE_OFFSET,%eax
58c5fc13
MT
14582+1:
14583+ movw %ax,__KERNEL_CS + 2(%edi)
ae4e228f 14584+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
58c5fc13
MT
14585+ rorl $16,%eax
14586+ movb %al,__KERNEL_CS + 4(%edi)
ae4e228f 14587+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
58c5fc13 14588+ movb %ah,__KERNEL_CS + 7(%edi)
ae4e228f 14589+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
58c5fc13
MT
14590+ rorl $16,%eax
14591+ addl $PAGE_SIZE_asm,%edi
14592+ loop 1b
14593+#endif
14594+
14595 /*
14596 * Clear BSS first so that there are no surprises...
14597 */
16454cff 14598@@ -195,8 +261,11 @@ ENTRY(startup_32)
58c5fc13
MT
14599 movl %eax, pa(max_pfn_mapped)
14600
14601 /* Do early initialization of the fixmap area */
bc901d79
MT
14602- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
14603- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
58c5fc13 14604+#ifdef CONFIG_COMPAT_VDSO
bc901d79 14605+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
58c5fc13 14606+#else
bc901d79 14607+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
58c5fc13
MT
14608+#endif
14609 #else /* Not PAE */
14610
14611 page_pde_offset = (__PAGE_OFFSET >> 20);
16454cff 14612@@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
58c5fc13
MT
14613 movl %eax, pa(max_pfn_mapped)
14614
14615 /* Do early initialization of the fixmap area */
bc901d79
MT
14616- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
14617- movl %eax,pa(initial_page_table+0xffc)
58c5fc13 14618+#ifdef CONFIG_COMPAT_VDSO
bc901d79 14619+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
58c5fc13 14620+#else
bc901d79 14621+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
58c5fc13
MT
14622+#endif
14623 #endif
16454cff
MT
14624
14625 #ifdef CONFIG_PARAVIRT
14626@@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14627 cmpl $num_subarch_entries, %eax
14628 jae bad_subarch
14629
14630- movl pa(subarch_entries)(,%eax,4), %eax
14631- subl $__PAGE_OFFSET, %eax
14632- jmp *%eax
14633+ jmp *pa(subarch_entries)(,%eax,4)
14634
14635 bad_subarch:
14636 WEAK(lguest_entry)
14637@@ -255,10 +325,10 @@ WEAK(xen_entry)
14638 __INITDATA
14639
14640 subarch_entries:
14641- .long default_entry /* normal x86/PC */
14642- .long lguest_entry /* lguest hypervisor */
14643- .long xen_entry /* Xen hypervisor */
14644- .long default_entry /* Moorestown MID */
66a7e928
MT
14645+ .long ta(default_entry) /* normal x86/PC */
14646+ .long ta(lguest_entry) /* lguest hypervisor */
14647+ .long ta(xen_entry) /* Xen hypervisor */
14648+ .long ta(default_entry) /* Moorestown MID */
16454cff
MT
14649 num_subarch_entries = (. - subarch_entries) / 4
14650 .previous
14651 #else
14652@@ -312,6 +382,7 @@ default_entry:
58c5fc13
MT
14653 orl %edx,%eax
14654 movl %eax,%cr4
14655
14656+#ifdef CONFIG_X86_PAE
ae4e228f
MT
14657 testb $X86_CR4_PAE, %al # check if PAE is enabled
14658 jz 6f
58c5fc13 14659
16454cff 14660@@ -340,6 +411,9 @@ default_entry:
58c5fc13
MT
14661 /* Make changes effective */
14662 wrmsr
14663
14664+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
58c5fc13 14665+#endif
ae4e228f 14666+
58c5fc13
MT
14667 6:
14668
14669 /*
16454cff 14670@@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
58c5fc13
MT
14671 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
14672 movl %eax,%ss # after changing gdt.
14673
14674- movl $(__USER_DS),%eax # DS/ES contains default USER segment
14675+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
14676 movl %eax,%ds
14677 movl %eax,%es
14678
16454cff 14679@@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
58c5fc13
MT
14680 */
14681 cmpb $0,ready
14682 jne 1f
df50ba0c 14683- movl $gdt_page,%eax
58c5fc13 14684+ movl $cpu_gdt_table,%eax
df50ba0c 14685 movl $stack_canary,%ecx
58c5fc13
MT
14686+#ifdef CONFIG_SMP
14687+ addl $__per_cpu_load,%ecx
14688+#endif
14689 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
14690 shrl $16, %ecx
14691 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
bc901d79
MT
14692 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
14693 1:
14694-#endif
14695 movl $(__KERNEL_STACK_CANARY),%eax
14696+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14697+ movl $(__USER_DS),%eax
14698+#else
14699+ xorl %eax,%eax
14700+#endif
14701 movl %eax,%gs
14702
14703 xorl %eax,%eax # Clear LDT
16454cff 14704@@ -558,22 +639,22 @@ early_page_fault:
58c5fc13
MT
14705 jmp early_fault
14706
14707 early_fault:
14708- cld
14709 #ifdef CONFIG_PRINTK
14710+ cmpl $1,%ss:early_recursion_flag
14711+ je hlt_loop
14712+ incl %ss:early_recursion_flag
14713+ cld
14714 pusha
14715 movl $(__KERNEL_DS),%eax
14716 movl %eax,%ds
14717 movl %eax,%es
14718- cmpl $2,early_recursion_flag
14719- je hlt_loop
14720- incl early_recursion_flag
14721 movl %cr2,%eax
14722 pushl %eax
14723 pushl %edx /* trapno */
14724 pushl $fault_msg
14725 call printk
14726+; call dump_stack
14727 #endif
14728- call dump_stack
14729 hlt_loop:
14730 hlt
14731 jmp hlt_loop
16454cff 14732@@ -581,8 +662,11 @@ hlt_loop:
58c5fc13
MT
14733 /* This is the default interrupt "handler" :-) */
14734 ALIGN
14735 ignore_int:
14736- cld
14737 #ifdef CONFIG_PRINTK
14738+ cmpl $2,%ss:early_recursion_flag
14739+ je hlt_loop
14740+ incl %ss:early_recursion_flag
14741+ cld
14742 pushl %eax
14743 pushl %ecx
14744 pushl %edx
16454cff 14745@@ -591,9 +675,6 @@ ignore_int:
58c5fc13
MT
14746 movl $(__KERNEL_DS),%eax
14747 movl %eax,%ds
14748 movl %eax,%es
14749- cmpl $2,early_recursion_flag
14750- je hlt_loop
14751- incl early_recursion_flag
14752 pushl 16(%esp)
14753 pushl 24(%esp)
14754 pushl 32(%esp)
16454cff 14755@@ -622,29 +703,43 @@ ENTRY(initial_code)
58c5fc13
MT
14756 /*
14757 * BSS section
14758 */
ae4e228f 14759-__PAGE_ALIGNED_BSS
66a7e928 14760- .align PAGE_SIZE
58c5fc13 14761 #ifdef CONFIG_X86_PAE
bc901d79 14762+.section .initial_pg_pmd,"a",@progbits
16454cff 14763 initial_pg_pmd:
58c5fc13
MT
14764 .fill 1024*KPMDS,4,0
14765 #else
c52201e0 14766+.section .initial_page_table,"a",@progbits
bc901d79 14767 ENTRY(initial_page_table)
58c5fc13
MT
14768 .fill 1024,4,0
14769 #endif
bc901d79 14770+.section .initial_pg_fixmap,"a",@progbits
16454cff 14771 initial_pg_fixmap:
58c5fc13 14772 .fill 1024,4,0
bc901d79
MT
14773+.section .empty_zero_page,"a",@progbits
14774 ENTRY(empty_zero_page)
14775 .fill 4096,1,0
14776+.section .swapper_pg_dir,"a",@progbits
14777 ENTRY(swapper_pg_dir)
6892158b
MT
14778+#ifdef CONFIG_X86_PAE
14779+ .fill 4,8,0
14780+#else
14781 .fill 1024,4,0
6892158b 14782+#endif
58c5fc13 14783+
bc901d79 14784+/*
58c5fc13
MT
14785+ * The IDT has to be page-aligned to simplify the Pentium
14786+ * F0 0F bug workaround.. We have a special link segment
14787+ * for this.
14788+ */
14789+.section .idt,"a",@progbits
14790+ENTRY(idt_table)
14791+ .fill 256,8,0
bc901d79
MT
14792
14793 /*
58c5fc13
MT
14794 * This starts the data section.
14795 */
14796 #ifdef CONFIG_X86_PAE
ae4e228f 14797-__PAGE_ALIGNED_DATA
58c5fc13 14798- /* Page-aligned for the benefit of paravirt? */
66a7e928 14799- .align PAGE_SIZE
bc901d79
MT
14800+.section .initial_page_table,"a",@progbits
14801 ENTRY(initial_page_table)
14802 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
58c5fc13 14803 # if KPMDS == 3
71d190be 14804@@ -663,18 +758,27 @@ ENTRY(initial_page_table)
df50ba0c
MT
14805 # error "Kernel PMDs should be 1, 2 or 3"
14806 # endif
66a7e928 14807 .align PAGE_SIZE /* needs to be page-sized too */
df50ba0c
MT
14808+
14809+#ifdef CONFIG_PAX_PER_CPU_PGD
14810+ENTRY(cpu_pgd)
14811+ .rept NR_CPUS
14812+ .fill 4,8,0
14813+ .endr
14814+#endif
14815+
14816 #endif
58c5fc13
MT
14817
14818 .data
16454cff 14819 .balign 4
58c5fc13
MT
14820 ENTRY(stack_start)
14821- .long init_thread_union+THREAD_SIZE
14822+ .long init_thread_union+THREAD_SIZE-8
58c5fc13 14823
fe2de317
MT
14824+ready: .byte 0
14825+
58c5fc13
MT
14826+.section .rodata,"a",@progbits
14827 early_recursion_flag:
14828 .long 0
14829
71d190be
MT
14830-ready: .byte 0
14831-
14832 int_msg:
14833 .asciz "Unknown interrupt or fault at: %p %p %p\n"
14834
16454cff 14835@@ -707,7 +811,7 @@ fault_msg:
58c5fc13
MT
14836 .word 0 # 32 bit align gdt_desc.address
14837 boot_gdt_descr:
14838 .word __BOOT_DS+7
14839- .long boot_gdt - __PAGE_OFFSET
14840+ .long pa(boot_gdt)
14841
14842 .word 0 # 32-bit align idt_desc.address
14843 idt_descr:
16454cff 14844@@ -718,7 +822,7 @@ idt_descr:
58c5fc13
MT
14845 .word 0 # 32 bit align gdt_desc.address
14846 ENTRY(early_gdt_descr)
14847 .word GDT_ENTRIES*8-1
df50ba0c 14848- .long gdt_page /* Overwritten for secondary CPUs */
58c5fc13
MT
14849+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
14850
14851 /*
14852 * The boot_gdt must mirror the equivalent in setup.S and is
16454cff 14853@@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
58c5fc13
MT
14854 .align L1_CACHE_BYTES
14855 ENTRY(boot_gdt)
14856 .fill GDT_ENTRY_BOOT_CS,8,0
14857- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
14858- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
14859+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
14860+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
14861+
14862+ .align PAGE_SIZE_asm
14863+ENTRY(cpu_gdt_table)
14864+ .rept NR_CPUS
14865+ .quad 0x0000000000000000 /* NULL descriptor */
14866+ .quad 0x0000000000000000 /* 0x0b reserved */
14867+ .quad 0x0000000000000000 /* 0x13 reserved */
14868+ .quad 0x0000000000000000 /* 0x1b reserved */
ae4e228f
MT
14869+
14870+#ifdef CONFIG_PAX_KERNEXEC
14871+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
14872+#else
58c5fc13 14873+ .quad 0x0000000000000000 /* 0x20 unused */
ae4e228f
MT
14874+#endif
14875+
58c5fc13
MT
14876+ .quad 0x0000000000000000 /* 0x28 unused */
14877+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
14878+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
14879+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
14880+ .quad 0x0000000000000000 /* 0x4b reserved */
14881+ .quad 0x0000000000000000 /* 0x53 reserved */
14882+ .quad 0x0000000000000000 /* 0x5b reserved */
14883+
14884+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
14885+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
14886+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
14887+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
14888+
14889+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
14890+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
14891+
14892+ /*
14893+ * Segments used for calling PnP BIOS have byte granularity.
14894+ * The code segments and data segments have fixed 64k limits,
14895+ * the transfer segment sizes are set at run time.
14896+ */
14897+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
14898+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
14899+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
14900+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
14901+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
14902+
14903+ /*
14904+ * The APM segments have byte granularity and their bases
14905+ * are set at run time. All have 64k limits.
14906+ */
14907+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
14908+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
14909+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
14910+
14911+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
14912+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
15a11c5b 14913+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
58c5fc13
MT
14914+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
14915+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
14916+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
14917+
14918+ /* Be sure this is zeroed to avoid false validations in Xen */
14919+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
14920+ .endr
fe2de317
MT
14921diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
14922index e11e394..9aebc5d 100644
14923--- a/arch/x86/kernel/head_64.S
14924+++ b/arch/x86/kernel/head_64.S
14925@@ -19,6 +19,8 @@
ae4e228f
MT
14926 #include <asm/cache.h>
14927 #include <asm/processor-flags.h>
14928 #include <asm/percpu.h>
14929+#include <asm/cpufeature.h>
fe2de317 14930+#include <asm/alternative-asm.h>
ae4e228f
MT
14931
14932 #ifdef CONFIG_PARAVIRT
14933 #include <asm/asm-offsets.h>
fe2de317 14934@@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
58c5fc13
MT
14935 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
14936 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
14937 L3_START_KERNEL = pud_index(__START_KERNEL_map)
14938+L4_VMALLOC_START = pgd_index(VMALLOC_START)
14939+L3_VMALLOC_START = pud_index(VMALLOC_START)
fe2de317
MT
14940+L4_VMALLOC_END = pgd_index(VMALLOC_END)
14941+L3_VMALLOC_END = pud_index(VMALLOC_END)
58c5fc13
MT
14942+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
14943+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
14944
14945 .text
ae4e228f 14946 __HEAD
fe2de317 14947@@ -85,35 +93,23 @@ startup_64:
58c5fc13
MT
14948 */
14949 addq %rbp, init_level4_pgt + 0(%rip)
14950 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
14951+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
fe2de317 14952+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
58c5fc13
MT
14953+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
14954 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
14955
14956 addq %rbp, level3_ident_pgt + 0(%rip)
ae4e228f 14957+#ifndef CONFIG_XEN
58c5fc13 14958+ addq %rbp, level3_ident_pgt + 8(%rip)
ae4e228f 14959+#endif
58c5fc13
MT
14960
14961- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
14962- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
14963+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
fe2de317 14964+
58c5fc13
MT
14965+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
14966+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
14967
fe2de317
MT
14968 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14969-
58c5fc13
MT
14970- /* Add an Identity mapping if I am above 1G */
14971- leaq _text(%rip), %rdi
14972- andq $PMD_PAGE_MASK, %rdi
14973-
14974- movq %rdi, %rax
14975- shrq $PUD_SHIFT, %rax
14976- andq $(PTRS_PER_PUD - 1), %rax
14977- jz ident_complete
14978-
14979- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
14980- leaq level3_ident_pgt(%rip), %rbx
14981- movq %rdx, 0(%rbx, %rax, 8)
14982-
14983- movq %rdi, %rax
14984- shrq $PMD_SHIFT, %rax
14985- andq $(PTRS_PER_PMD - 1), %rax
14986- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
14987- leaq level2_spare_pgt(%rip), %rbx
14988- movq %rdx, 0(%rbx, %rax, 8)
14989-ident_complete:
58c5fc13
MT
14990+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
14991
14992 /*
14993 * Fixup the kernel text+data virtual addresses. Note that
fe2de317 14994@@ -160,8 +156,8 @@ ENTRY(secondary_startup_64)
df50ba0c
MT
14995 * after the boot processor executes this code.
14996 */
14997
14998- /* Enable PAE mode and PGE */
14999- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
15000+ /* Enable PAE mode and PSE/PGE */
15001+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
15002 movq %rax, %cr4
15003
15004 /* Setup early boot stage 4 level pagetables. */
fe2de317 15005@@ -183,9 +179,17 @@ ENTRY(secondary_startup_64)
ae4e228f
MT
15006 movl $MSR_EFER, %ecx
15007 rdmsr
15008 btsl $_EFER_SCE, %eax /* Enable System Call */
15009- btl $20,%edi /* No Execute supported? */
15010+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
58c5fc13
MT
15011 jnc 1f
15012 btsl $_EFER_NX, %eax
15013+ leaq init_level4_pgt(%rip), %rdi
fe2de317 15014+#ifndef CONFIG_EFI
58c5fc13 15015+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
fe2de317 15016+#endif
58c5fc13 15017+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
fe2de317 15018+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
58c5fc13 15019+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
ae4e228f 15020+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
58c5fc13
MT
15021 1: wrmsr /* Make changes effective */
15022
15023 /* Setup cr0 */
fe2de317
MT
15024@@ -247,6 +251,7 @@ ENTRY(secondary_startup_64)
15025 * jump. In addition we need to ensure %cs is set so we make this
15026 * a far return.
15027 */
15028+ pax_set_fptr_mask
15029 movq initial_code(%rip),%rax
15030 pushq $0 # fake return address to stop unwinder
15031 pushq $__KERNEL_CS # set correct cs
15032@@ -269,7 +274,7 @@ ENTRY(secondary_startup_64)
58c5fc13
MT
15033 bad_address:
15034 jmp bad_address
15035
15036- .section ".init.text","ax"
15037+ __INIT
15038 #ifdef CONFIG_EARLY_PRINTK
15039 .globl early_idt_handlers
15040 early_idt_handlers:
fe2de317 15041@@ -314,18 +319,23 @@ ENTRY(early_idt_handler)
58c5fc13
MT
15042 #endif /* EARLY_PRINTK */
15043 1: hlt
15044 jmp 1b
15045+ .previous
15046
15047 #ifdef CONFIG_EARLY_PRINTK
15048+ __INITDATA
15049 early_recursion_flag:
15050 .long 0
15051+ .previous
15052
15053+ .section .rodata,"a",@progbits
15054 early_idt_msg:
15055 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
15056 early_idt_ripmsg:
15057 .asciz "RIP %s\n"
fe2de317
MT
15058+ .previous
15059 #endif /* CONFIG_EARLY_PRINTK */
15060- .previous
58c5fc13
MT
15061
15062+ .section .rodata,"a",@progbits
15063 #define NEXT_PAGE(name) \
15064 .balign PAGE_SIZE; \
15065 ENTRY(name)
fe2de317 15066@@ -338,7 +348,6 @@ ENTRY(name)
bc901d79
MT
15067 i = i + 1 ; \
15068 .endr
15069
15070- .data
15071 /*
15072 * This default setting generates an ident mapping at address 0x100000
15073 * and a mapping for the kernel that precisely maps virtual address
fe2de317 15074@@ -349,13 +358,41 @@ NEXT_PAGE(init_level4_pgt)
58c5fc13
MT
15075 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15076 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
15077 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15078+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
fe2de317
MT
15079+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
15080+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
15081+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
58c5fc13
MT
15082+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
15083+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15084 .org init_level4_pgt + L4_START_KERNEL*8, 0
15085 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
15086 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
15087
df50ba0c
MT
15088+#ifdef CONFIG_PAX_PER_CPU_PGD
15089+NEXT_PAGE(cpu_pgd)
15090+ .rept NR_CPUS
15091+ .fill 512,8,0
15092+ .endr
15093+#endif
15094+
58c5fc13
MT
15095 NEXT_PAGE(level3_ident_pgt)
15096 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15097+#ifdef CONFIG_XEN
15098 .fill 511,8,0
15099+#else
15100+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
ae4e228f 15101+ .fill 510,8,0
58c5fc13
MT
15102+#endif
15103+
fe2de317
MT
15104+NEXT_PAGE(level3_vmalloc_start_pgt)
15105+ .fill 512,8,0
15106+
15107+NEXT_PAGE(level3_vmalloc_end_pgt)
58c5fc13
MT
15108+ .fill 512,8,0
15109+
15110+NEXT_PAGE(level3_vmemmap_pgt)
15111+ .fill L3_VMEMMAP_START,8,0
15112+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15113
15114 NEXT_PAGE(level3_kernel_pgt)
15115 .fill L3_START_KERNEL,8,0
fe2de317 15116@@ -363,20 +400,23 @@ NEXT_PAGE(level3_kernel_pgt)
58c5fc13
MT
15117 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
15118 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15119
15120+NEXT_PAGE(level2_vmemmap_pgt)
15121+ .fill 512,8,0
15122+
15123 NEXT_PAGE(level2_fixmap_pgt)
15124- .fill 506,8,0
15125- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15126- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
15127- .fill 5,8,0
15128+ .fill 507,8,0
15129+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
15130+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
15131+ .fill 4,8,0
15132
15133-NEXT_PAGE(level1_fixmap_pgt)
15134+NEXT_PAGE(level1_vsyscall_pgt)
15135 .fill 512,8,0
15136
15137-NEXT_PAGE(level2_ident_pgt)
15138- /* Since I easily can, map the first 1G.
ae4e228f 15139+ /* Since I easily can, map the first 2G.
58c5fc13
MT
15140 * Don't set NX because code runs from these pages.
15141 */
15142- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
15143+NEXT_PAGE(level2_ident_pgt)
ae4e228f 15144+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
58c5fc13
MT
15145
15146 NEXT_PAGE(level2_kernel_pgt)
15147 /*
fe2de317 15148@@ -389,33 +429,55 @@ NEXT_PAGE(level2_kernel_pgt)
58c5fc13
MT
15149 * If you want to increase this then increase MODULES_VADDR
15150 * too.)
15151 */
15152- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
15153- KERNEL_IMAGE_SIZE/PMD_SIZE)
15154-
15155-NEXT_PAGE(level2_spare_pgt)
15156- .fill 512, 8, 0
15157+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
15158
15159 #undef PMDS
15160 #undef NEXT_PAGE
15161
15162- .data
15163+ .align PAGE_SIZE
15164+ENTRY(cpu_gdt_table)
15165+ .rept NR_CPUS
15166+ .quad 0x0000000000000000 /* NULL descriptor */
15167+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
15168+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
15169+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
15170+ .quad 0x00cffb000000ffff /* __USER32_CS */
15171+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
15172+ .quad 0x00affb000000ffff /* __USER_CS */
ae4e228f
MT
15173+
15174+#ifdef CONFIG_PAX_KERNEXEC
15175+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
15176+#else
58c5fc13 15177+ .quad 0x0 /* unused */
ae4e228f
MT
15178+#endif
15179+
58c5fc13
MT
15180+ .quad 0,0 /* TSS */
15181+ .quad 0,0 /* LDT */
15182+ .quad 0,0,0 /* three TLS descriptors */
15183+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
15184+ /* asm/segment.h:GDT_ENTRIES must match this */
15185+
15186+ /* zero the remaining page */
15187+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
15188+ .endr
15189+
15190 .align 16
15191 .globl early_gdt_descr
15192 early_gdt_descr:
15193 .word GDT_ENTRIES*8-1
15194 early_gdt_descr_base:
15195- .quad INIT_PER_CPU_VAR(gdt_page)
15196+ .quad cpu_gdt_table
15197
15198 ENTRY(phys_base)
15199 /* This must match the first entry in level2_kernel_pgt */
15200 .quad 0x0000000000000000
15201
15202 #include "../../x86/xen/xen-head.S"
15203-
15204- .section .bss, "aw", @nobits
15205+
15206+ .section .rodata,"a",@progbits
15207 .align L1_CACHE_BYTES
15208 ENTRY(idt_table)
15209- .skip IDT_ENTRIES * 16
15210+ .fill 512,8,0
15211
ae4e228f 15212 __PAGE_ALIGNED_BSS
58c5fc13 15213 .align PAGE_SIZE
fe2de317
MT
15214diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
15215index 9c3bd4a..e1d9b35 100644
15216--- a/arch/x86/kernel/i386_ksyms_32.c
15217+++ b/arch/x86/kernel/i386_ksyms_32.c
ae4e228f
MT
15218@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
15219 EXPORT_SYMBOL(cmpxchg8b_emu);
58c5fc13
MT
15220 #endif
15221
15222+EXPORT_SYMBOL_GPL(cpu_gdt_table);
15223+
15224 /* Networking helper routines. */
15225 EXPORT_SYMBOL(csum_partial_copy_generic);
15226+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
15227+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
15228
15229 EXPORT_SYMBOL(__get_user_1);
15230 EXPORT_SYMBOL(__get_user_2);
ae4e228f 15231@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
58c5fc13
MT
15232
15233 EXPORT_SYMBOL(csum_partial);
15234 EXPORT_SYMBOL(empty_zero_page);
15235+
15236+#ifdef CONFIG_PAX_KERNEXEC
15237+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
15238+#endif
fe2de317
MT
15239diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
15240index 6104852..6114160 100644
15241--- a/arch/x86/kernel/i8259.c
15242+++ b/arch/x86/kernel/i8259.c
8308f9c9
MT
15243@@ -210,7 +210,7 @@ spurious_8259A_irq:
15244 "spurious 8259A interrupt: IRQ%d.\n", irq);
15245 spurious_irq_mask |= irqmask;
15246 }
15247- atomic_inc(&irq_err_count);
15248+ atomic_inc_unchecked(&irq_err_count);
15249 /*
15250 * Theoretically we do not have to handle this IRQ,
15251 * but in Linux this does not cause problems and is
fe2de317
MT
15252diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
15253index 43e9ccf..44ccf6f 100644
15254--- a/arch/x86/kernel/init_task.c
15255+++ b/arch/x86/kernel/init_task.c
15256@@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
71d190be
MT
15257 * way process stacks are handled. This is done by having a special
15258 * "init_task" linker map entry..
15259 */
15260-union thread_union init_thread_union __init_task_data =
15261- { INIT_THREAD_INFO(init_task) };
15262+union thread_union init_thread_union __init_task_data;
15263
15264 /*
15265 * Initial task structure.
15266@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
58c5fc13
MT
15267 * section. Since TSS's are completely CPU-local, we want them
15268 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
15269 */
15270-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
15271-
15272+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
15273+EXPORT_SYMBOL(init_tss);
fe2de317
MT
15274diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
15275index 8c96897..be66bfa 100644
15276--- a/arch/x86/kernel/ioport.c
15277+++ b/arch/x86/kernel/ioport.c
58c5fc13
MT
15278@@ -6,6 +6,7 @@
15279 #include <linux/sched.h>
15280 #include <linux/kernel.h>
15281 #include <linux/capability.h>
15282+#include <linux/security.h>
15283 #include <linux/errno.h>
15284 #include <linux/types.h>
15285 #include <linux/ioport.h>
fe2de317 15286@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
58c5fc13
MT
15287
15288 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
15289 return -EINVAL;
15290+#ifdef CONFIG_GRKERNSEC_IO
df50ba0c 15291+ if (turn_on && grsec_disable_privio) {
58c5fc13
MT
15292+ gr_handle_ioperm();
15293+ return -EPERM;
15294+ }
15295+#endif
15296 if (turn_on && !capable(CAP_SYS_RAWIO))
15297 return -EPERM;
15298
fe2de317 15299@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
58c5fc13
MT
15300 * because the ->io_bitmap_max value must match the bitmap
15301 * contents:
15302 */
15303- tss = &per_cpu(init_tss, get_cpu());
15304+ tss = init_tss + get_cpu();
15305
66a7e928
MT
15306 if (turn_on)
15307 bitmap_clear(t->io_bitmap_ptr, from, num);
fe2de317 15308@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
58c5fc13
MT
15309 return -EINVAL;
15310 /* Trying to gain more privileges? */
15311 if (level > old) {
15312+#ifdef CONFIG_GRKERNSEC_IO
df50ba0c
MT
15313+ if (grsec_disable_privio) {
15314+ gr_handle_iopl();
15315+ return -EPERM;
15316+ }
15317+#endif
58c5fc13
MT
15318 if (!capable(CAP_SYS_RAWIO))
15319 return -EPERM;
58c5fc13 15320 }
fe2de317
MT
15321diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
15322index 6c0802e..bea25ae 100644
15323--- a/arch/x86/kernel/irq.c
15324+++ b/arch/x86/kernel/irq.c
15325@@ -17,7 +17,7 @@
15326 #include <asm/mce.h>
15327 #include <asm/hw_irq.h>
15328
15329-atomic_t irq_err_count;
15330+atomic_unchecked_t irq_err_count;
15331
15332 /* Function pointer for generic interrupt vector handling */
15333 void (*x86_platform_ipi_callback)(void) = NULL;
15334@@ -116,9 +116,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
15335 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
15336 seq_printf(p, " Machine check polls\n");
15337 #endif
15338- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
15339+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
15340 #if defined(CONFIG_X86_IO_APIC)
15341- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
15342+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
15343 #endif
15344 return 0;
15345 }
15346@@ -158,10 +158,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
15347
15348 u64 arch_irq_stat(void)
15349 {
15350- u64 sum = atomic_read(&irq_err_count);
15351+ u64 sum = atomic_read_unchecked(&irq_err_count);
15352
15353 #ifdef CONFIG_X86_IO_APIC
15354- sum += atomic_read(&irq_mis_count);
15355+ sum += atomic_read_unchecked(&irq_mis_count);
15356 #endif
15357 return sum;
15358 }
15359diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
15360index 7209070..cbcd71a 100644
15361--- a/arch/x86/kernel/irq_32.c
15362+++ b/arch/x86/kernel/irq_32.c
71d190be
MT
15363@@ -36,7 +36,7 @@ static int check_stack_overflow(void)
15364 __asm__ __volatile__("andl %%esp,%0" :
15365 "=r" (sp) : "0" (THREAD_SIZE - 1));
15366
15367- return sp < (sizeof(struct thread_info) + STACK_WARN);
15368+ return sp < STACK_WARN;
15369 }
15370
15371 static void print_stack_overflow(void)
fe2de317 15372@@ -54,8 +54,8 @@ static inline void print_stack_overflow(void) { }
71d190be
MT
15373 * per-CPU IRQ handling contexts (thread information and stack)
15374 */
15375 union irq_ctx {
15376- struct thread_info tinfo;
15377- u32 stack[THREAD_SIZE/sizeof(u32)];
15378+ unsigned long previous_esp;
15379+ u32 stack[THREAD_SIZE/sizeof(u32)];
15380 } __attribute__((aligned(THREAD_SIZE)));
15381
15382 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
fe2de317 15383@@ -75,10 +75,9 @@ static void call_on_stack(void *func, void *stack)
71d190be
MT
15384 static inline int
15385 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15386 {
15387- union irq_ctx *curctx, *irqctx;
15388+ union irq_ctx *irqctx;
15389 u32 *isp, arg1, arg2;
15390
15391- curctx = (union irq_ctx *) current_thread_info();
15392 irqctx = __this_cpu_read(hardirq_ctx);
15393
15394 /*
fe2de317 15395@@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
71d190be
MT
15396 * handler) we can't do that and just have to keep using the
15397 * current stack (which is the irq stack already after all)
15398 */
15399- if (unlikely(curctx == irqctx))
15400+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
58c5fc13
MT
15401 return 0;
15402
15403 /* build the stack frame on the IRQ stack */
15404- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
71d190be
MT
15405- irqctx->tinfo.task = curctx->tinfo.task;
15406- irqctx->tinfo.previous_esp = current_stack_pointer;
58c5fc13 15407+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
71d190be 15408+ irqctx->previous_esp = current_stack_pointer;
bc901d79 15409
71d190be
MT
15410- /*
15411- * Copy the softirq bits in preempt_count so that the
15412- * softirq checks work in the hardirq context.
15413- */
15414- irqctx->tinfo.preempt_count =
15415- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
15416- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
bc901d79 15417+#ifdef CONFIG_PAX_MEMORY_UDEREF
71d190be 15418+ __set_fs(MAKE_MM_SEG(0));
bc901d79 15419+#endif
71d190be 15420
bc901d79
MT
15421 if (unlikely(overflow))
15422 call_on_stack(print_stack_overflow, isp);
fe2de317 15423@@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
bc901d79
MT
15424 : "0" (irq), "1" (desc), "2" (isp),
15425 "D" (desc->handle_irq)
15426 : "memory", "cc", "ecx");
15427+
15428+#ifdef CONFIG_PAX_MEMORY_UDEREF
71d190be 15429+ __set_fs(current_thread_info()->addr_limit);
bc901d79
MT
15430+#endif
15431+
15432 return 1;
15433 }
15434
fe2de317 15435@@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
71d190be
MT
15436 */
15437 void __cpuinit irq_ctx_init(int cpu)
15438 {
15439- union irq_ctx *irqctx;
15440-
15441 if (per_cpu(hardirq_ctx, cpu))
15442 return;
15443
15444- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
15445- THREAD_FLAGS,
15446- THREAD_ORDER));
15447- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
15448- irqctx->tinfo.cpu = cpu;
15449- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
15450- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15451-
15452- per_cpu(hardirq_ctx, cpu) = irqctx;
15453-
15454- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
15455- THREAD_FLAGS,
15456- THREAD_ORDER));
15457- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
15458- irqctx->tinfo.cpu = cpu;
15459- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15460-
15461- per_cpu(softirq_ctx, cpu) = irqctx;
15462+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
15463+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
15464
15465 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
15466 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
15a11c5b 15467@@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
71d190be
MT
15468 asmlinkage void do_softirq(void)
15469 {
15470 unsigned long flags;
15471- struct thread_info *curctx;
15472 union irq_ctx *irqctx;
15473 u32 *isp;
15474
15a11c5b 15475@@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
71d190be
MT
15476 local_irq_save(flags);
15477
15478 if (local_softirq_pending()) {
15479- curctx = current_thread_info();
15480 irqctx = __this_cpu_read(softirq_ctx);
15481- irqctx->tinfo.task = curctx->task;
15482- irqctx->tinfo.previous_esp = current_stack_pointer;
15483+ irqctx->previous_esp = current_stack_pointer;
58c5fc13
MT
15484
15485 /* build the stack frame on the softirq stack */
15486- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15487+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
bc901d79
MT
15488+
15489+#ifdef CONFIG_PAX_MEMORY_UDEREF
71d190be 15490+ __set_fs(MAKE_MM_SEG(0));
bc901d79 15491+#endif
58c5fc13
MT
15492
15493 call_on_stack(__do_softirq, isp);
bc901d79
MT
15494+
15495+#ifdef CONFIG_PAX_MEMORY_UDEREF
71d190be 15496+ __set_fs(current_thread_info()->addr_limit);
bc901d79
MT
15497+#endif
15498+
58c5fc13 15499 /*
66a7e928 15500 * Shouldn't happen, we returned above if in_interrupt():
bc901d79 15501 */
fe2de317
MT
15502diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
15503index 00354d4..187ae44 100644
15504--- a/arch/x86/kernel/kgdb.c
15505+++ b/arch/x86/kernel/kgdb.c
15506@@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
6892158b 15507 #ifdef CONFIG_X86_32
66a7e928 15508 switch (regno) {
6892158b
MT
15509 case GDB_SS:
15510- if (!user_mode_vm(regs))
15511+ if (!user_mode(regs))
15512 *(unsigned long *)mem = __KERNEL_DS;
15513 break;
15514 case GDB_SP:
15515- if (!user_mode_vm(regs))
15516+ if (!user_mode(regs))
15517 *(unsigned long *)mem = kernel_stack_pointer(regs);
15518 break;
15519 case GDB_GS:
fe2de317 15520@@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
8308f9c9
MT
15521 case 'k':
15522 /* clear the trace bit */
15523 linux_regs->flags &= ~X86_EFLAGS_TF;
15524- atomic_set(&kgdb_cpu_doing_single_step, -1);
15525+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
15526
15527 /* set the trace bit if we're stepping */
15528 if (remcomInBuffer[0] == 's') {
15529 linux_regs->flags |= X86_EFLAGS_TF;
15530- atomic_set(&kgdb_cpu_doing_single_step,
15531+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
15532 raw_smp_processor_id());
15533 }
15534
fe2de317 15535@@ -534,7 +534,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
66a7e928 15536 return NOTIFY_DONE;
8308f9c9
MT
15537
15538 case DIE_DEBUG:
15539- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
15540+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
15541 if (user_mode(regs))
15542 return single_step_cont(regs, args);
15543 break;
fe2de317
MT
15544diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
15545index 794bc95..c6e29e9 100644
15546--- a/arch/x86/kernel/kprobes.c
15547+++ b/arch/x86/kernel/kprobes.c
15548@@ -117,8 +117,11 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
df50ba0c
MT
15549 } __attribute__((packed)) *insn;
15550
bc901d79 15551 insn = (struct __arch_relative_insn *)from;
58c5fc13 15552+
ae4e228f 15553+ pax_open_kernel();
df50ba0c
MT
15554 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
15555 insn->op = op;
ae4e228f 15556+ pax_close_kernel();
58c5fc13
MT
15557 }
15558
df50ba0c 15559 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
fe2de317 15560@@ -155,7 +158,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
bc901d79
MT
15561 kprobe_opcode_t opcode;
15562 kprobe_opcode_t *orig_opcodes = opcodes;
15563
15564- if (search_exception_tables((unsigned long)opcodes))
15565+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
15566 return 0; /* Page fault may occur on this address. */
15567
15568 retry:
fe2de317 15569@@ -316,7 +319,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
df50ba0c
MT
15570 }
15571 }
15572 insn_get_length(&insn);
ae4e228f 15573+ pax_open_kernel();
df50ba0c 15574 memcpy(dest, insn.kaddr, insn.length);
ae4e228f 15575+ pax_close_kernel();
58c5fc13 15576
df50ba0c
MT
15577 #ifdef CONFIG_X86_64
15578 if (insn_rip_relative(&insn)) {
fe2de317 15579@@ -340,7 +345,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
df50ba0c
MT
15580 (u8 *) dest;
15581 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
15582 disp = (u8 *) dest + insn_offset_displacement(&insn);
15583+ pax_open_kernel();
15584 *(s32 *) disp = (s32) newdisp;
15585+ pax_close_kernel();
15586 }
15587 #endif
15588 return insn.length;
fe2de317 15589@@ -354,12 +361,12 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
df50ba0c
MT
15590 */
15591 __copy_instruction(p->ainsn.insn, p->addr, 0);
58c5fc13
MT
15592
15593- if (can_boost(p->addr))
15594+ if (can_boost(ktla_ktva(p->addr)))
15595 p->ainsn.boostable = 0;
15596 else
15597 p->ainsn.boostable = -1;
15598
15599- p->opcode = *p->addr;
15600+ p->opcode = *(ktla_ktva(p->addr));
15601 }
15602
15603 int __kprobes arch_prepare_kprobe(struct kprobe *p)
fe2de317 15604@@ -476,7 +483,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
df50ba0c
MT
15605 * nor set current_kprobe, because it doesn't use single
15606 * stepping.
15607 */
15608- regs->ip = (unsigned long)p->ainsn.insn;
15609+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15610 preempt_enable_no_resched();
15611 return;
15612 }
fe2de317 15613@@ -495,7 +502,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
58c5fc13
MT
15614 if (p->opcode == BREAKPOINT_INSTRUCTION)
15615 regs->ip = (unsigned long)p->addr;
15616 else
15617- regs->ip = (unsigned long)p->ainsn.insn;
15618+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15619 }
15620
df50ba0c 15621 /*
fe2de317 15622@@ -574,7 +581,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
df50ba0c
MT
15623 setup_singlestep(p, regs, kcb, 0);
15624 return 1;
15625 }
15626- } else if (*addr != BREAKPOINT_INSTRUCTION) {
15627+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
58c5fc13
MT
15628 /*
15629 * The breakpoint instruction was removed right
15630 * after we hit it. Another cpu has removed
fe2de317 15631@@ -682,6 +689,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
6e9df6a3
MT
15632 " movq %rax, 152(%rsp)\n"
15633 RESTORE_REGS_STRING
15634 " popfq\n"
15635+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
15636+ " btsq $63,(%rsp)\n"
15637+#endif
15638 #else
15639 " pushf\n"
15640 SAVE_REGS_STRING
fe2de317 15641@@ -819,7 +829,7 @@ static void __kprobes resume_execution(struct kprobe *p,
58c5fc13
MT
15642 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
15643 {
15644 unsigned long *tos = stack_addr(regs);
15645- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
15646+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
15647 unsigned long orig_ip = (unsigned long)p->addr;
15648 kprobe_opcode_t *insn = p->ainsn.insn;
15649
fe2de317 15650@@ -1001,7 +1011,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
58c5fc13
MT
15651 struct die_args *args = data;
15652 int ret = NOTIFY_DONE;
15653
15654- if (args->regs && user_mode_vm(args->regs))
15655+ if (args->regs && user_mode(args->regs))
15656 return ret;
15657
15658 switch (val) {
fe2de317 15659@@ -1383,7 +1393,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
bc901d79
MT
15660 * Verify if the address gap is in 2GB range, because this uses
15661 * a relative jump.
15662 */
15663- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
15664+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
15665 if (abs(rel) > 0x7fffffff)
15666 return -ERANGE;
15667
fe2de317 15668@@ -1404,11 +1414,11 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
bc901d79
MT
15669 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
15670
15671 /* Set probe function call */
15672- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
15673+ synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
15674
15675 /* Set returning jmp instruction at the tail of out-of-line buffer */
15676 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
15677- (u8 *)op->kp.addr + op->optinsn.size);
15678+ (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
15679
15680 flush_icache_range((unsigned long) buf,
15681 (unsigned long) buf + TMPL_END_IDX +
fe2de317 15682@@ -1430,7 +1440,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
bc901d79
MT
15683 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
15684
15685 /* Backup instructions which will be replaced by jump address */
15686- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
15687+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
15688 RELATIVE_ADDR_SIZE);
15689
16454cff 15690 insn_buf[0] = RELATIVEJUMP_OPCODE;
fe2de317
MT
15691diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
15692index a9c2116..a52d4fc 100644
15693--- a/arch/x86/kernel/kvm.c
15694+++ b/arch/x86/kernel/kvm.c
15695@@ -437,6 +437,7 @@ static void __init paravirt_ops_setup(void)
15a11c5b
MT
15696 pv_mmu_ops.set_pud = kvm_set_pud;
15697 #if PAGETABLE_LEVELS == 4
15698 pv_mmu_ops.set_pgd = kvm_set_pgd;
15699+ pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
15700 #endif
15701 #endif
15702 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
fe2de317
MT
15703diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
15704index ea69726..604d066 100644
15705--- a/arch/x86/kernel/ldt.c
15706+++ b/arch/x86/kernel/ldt.c
15707@@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
58c5fc13
MT
15708 if (reload) {
15709 #ifdef CONFIG_SMP
15710 preempt_disable();
15711- load_LDT(pc);
15712+ load_LDT_nolock(pc);
ae4e228f
MT
15713 if (!cpumask_equal(mm_cpumask(current->mm),
15714 cpumask_of(smp_processor_id())))
58c5fc13
MT
15715 smp_call_function(flush_ldt, current->mm, 1);
15716 preempt_enable();
15717 #else
15718- load_LDT(pc);
15719+ load_LDT_nolock(pc);
15720 #endif
15721 }
15722 if (oldsize) {
fe2de317 15723@@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
58c5fc13
MT
15724 return err;
15725
15726 for (i = 0; i < old->size; i++)
15727- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
15728+ write_ldt_entry(new->ldt, i, old->ldt + i);
15729 return 0;
15730 }
15731
fe2de317 15732@@ -116,6 +116,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
58c5fc13
MT
15733 retval = copy_ldt(&mm->context, &old_mm->context);
15734 mutex_unlock(&old_mm->context.lock);
15735 }
15736+
15737+ if (tsk == current) {
6892158b 15738+ mm->context.vdso = 0;
58c5fc13
MT
15739+
15740+#ifdef CONFIG_X86_32
15741+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
15742+ mm->context.user_cs_base = 0UL;
15743+ mm->context.user_cs_limit = ~0UL;
15744+
15745+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
15746+ cpus_clear(mm->context.cpu_user_cs_mask);
15747+#endif
15748+
15749+#endif
15750+#endif
15751+
15752+ }
15753+
15754 return retval;
15755 }
15756
fe2de317 15757@@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
58c5fc13
MT
15758 }
15759 }
15760
15761+#ifdef CONFIG_PAX_SEGMEXEC
15762+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
15763+ error = -EINVAL;
15764+ goto out_unlock;
15765+ }
15766+#endif
15767+
15768 fill_ldt(&ldt, &ldt_info);
15769 if (oldmode)
15770 ldt.avl = 0;
fe2de317
MT
15771diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
15772index a3fa43b..8966f4c 100644
15773--- a/arch/x86/kernel/machine_kexec_32.c
15774+++ b/arch/x86/kernel/machine_kexec_32.c
ae4e228f 15775@@ -27,7 +27,7 @@
58c5fc13 15776 #include <asm/cacheflush.h>
ae4e228f 15777 #include <asm/debugreg.h>
58c5fc13
MT
15778
15779-static void set_idt(void *newidt, __u16 limit)
15780+static void set_idt(struct desc_struct *newidt, __u16 limit)
15781 {
15782 struct desc_ptr curidt;
15783
fe2de317 15784@@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16 limit)
58c5fc13
MT
15785 }
15786
15787
15788-static void set_gdt(void *newgdt, __u16 limit)
15789+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
15790 {
15791 struct desc_ptr curgdt;
15792
15793@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
15794 }
15795
15796 control_page = page_address(image->control_code_page);
15797- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
15798+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
15799
15800 relocate_kernel_ptr = control_page;
15801 page_list[PA_CONTROL_PAGE] = __pa(control_page);
fe2de317
MT
15802diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
15803index 1a1b606..5c89b55 100644
15804--- a/arch/x86/kernel/microcode_intel.c
15805+++ b/arch/x86/kernel/microcode_intel.c
15806@@ -440,13 +440,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
ae4e228f
MT
15807
15808 static int get_ucode_user(void *to, const void *from, size_t n)
15809 {
15810- return copy_from_user(to, from, n);
6e9df6a3 15811+ return copy_from_user(to, (const void __force_user *)from, n);
ae4e228f
MT
15812 }
15813
15814 static enum ucode_state
15815 request_microcode_user(int cpu, const void __user *buf, size_t size)
15816 {
15817- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
6e9df6a3 15818+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
ae4e228f
MT
15819 }
15820
15821 static void microcode_fini_cpu(int cpu)
fe2de317
MT
15822diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
15823index 925179f..85bec6c 100644
15824--- a/arch/x86/kernel/module.c
15825+++ b/arch/x86/kernel/module.c
6e9df6a3 15826@@ -36,15 +36,60 @@
58c5fc13
MT
15827 #define DEBUGP(fmt...)
15828 #endif
15829
15830-void *module_alloc(unsigned long size)
16454cff 15831+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
58c5fc13 15832 {
16454cff 15833 if (PAGE_ALIGN(size) > MODULES_LEN)
58c5fc13 15834 return NULL;
16454cff
MT
15835 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
15836- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
15837+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
15838 -1, __builtin_return_address(0));
15839 }
58c5fc13 15840
58c5fc13
MT
15841+void *module_alloc(unsigned long size)
15842+{
ae4e228f
MT
15843+
15844+#ifdef CONFIG_PAX_KERNEXEC
58c5fc13 15845+ return __module_alloc(size, PAGE_KERNEL);
ae4e228f
MT
15846+#else
15847+ return __module_alloc(size, PAGE_KERNEL_EXEC);
15848+#endif
15849+
16454cff
MT
15850+}
15851+
ae4e228f
MT
15852+#ifdef CONFIG_PAX_KERNEXEC
15853+#ifdef CONFIG_X86_32
58c5fc13
MT
15854+void *module_alloc_exec(unsigned long size)
15855+{
15856+ struct vm_struct *area;
15857+
15858+ if (size == 0)
15859+ return NULL;
15860+
15861+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
ae4e228f 15862+ return area ? area->addr : NULL;
58c5fc13
MT
15863+}
15864+EXPORT_SYMBOL(module_alloc_exec);
15865+
15866+void module_free_exec(struct module *mod, void *module_region)
15867+{
ae4e228f 15868+ vunmap(module_region);
58c5fc13
MT
15869+}
15870+EXPORT_SYMBOL(module_free_exec);
15871+#else
58c5fc13
MT
15872+void module_free_exec(struct module *mod, void *module_region)
15873+{
15874+ module_free(mod, module_region);
15875+}
15876+EXPORT_SYMBOL(module_free_exec);
15877+
15878+void *module_alloc_exec(unsigned long size)
15879+{
15880+ return __module_alloc(size, PAGE_KERNEL_RX);
15881+}
15882+EXPORT_SYMBOL(module_alloc_exec);
15883+#endif
58c5fc13 15884+#endif
ae4e228f 15885+
6e9df6a3
MT
15886 #ifdef CONFIG_X86_32
15887 int apply_relocate(Elf32_Shdr *sechdrs,
15888 const char *strtab,
15889@@ -55,14 +100,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
58c5fc13
MT
15890 unsigned int i;
15891 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
15892 Elf32_Sym *sym;
15893- uint32_t *location;
15894+ uint32_t *plocation, location;
58c5fc13
MT
15895
15896 DEBUGP("Applying relocate section %u to %u\n", relsec,
15897 sechdrs[relsec].sh_info);
15898 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
15899 /* This is where to make the change */
15900- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
15901- + rel[i].r_offset;
15902+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
15903+ location = (uint32_t)plocation;
15904+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
15905+ plocation = ktla_ktva((void *)plocation);
15906 /* This is the symbol it is referring to. Note that all
15907 undefined symbols have been resolved. */
15908 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
6e9df6a3 15909@@ -71,11 +118,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
58c5fc13
MT
15910 switch (ELF32_R_TYPE(rel[i].r_info)) {
15911 case R_386_32:
15912 /* We add the value into the location given */
15913- *location += sym->st_value;
ae4e228f 15914+ pax_open_kernel();
58c5fc13 15915+ *plocation += sym->st_value;
ae4e228f 15916+ pax_close_kernel();
58c5fc13
MT
15917 break;
15918 case R_386_PC32:
15919 /* Add the value, subtract its postition */
15920- *location += sym->st_value - (uint32_t)location;
ae4e228f 15921+ pax_open_kernel();
58c5fc13 15922+ *plocation += sym->st_value - location;
ae4e228f 15923+ pax_close_kernel();
58c5fc13
MT
15924 break;
15925 default:
15926 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
fe2de317 15927@@ -120,21 +171,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
58c5fc13
MT
15928 case R_X86_64_NONE:
15929 break;
15930 case R_X86_64_64:
ae4e228f 15931+ pax_open_kernel();
58c5fc13 15932 *(u64 *)loc = val;
ae4e228f 15933+ pax_close_kernel();
58c5fc13
MT
15934 break;
15935 case R_X86_64_32:
ae4e228f 15936+ pax_open_kernel();
58c5fc13 15937 *(u32 *)loc = val;
ae4e228f 15938+ pax_close_kernel();
58c5fc13
MT
15939 if (val != *(u32 *)loc)
15940 goto overflow;
15941 break;
15942 case R_X86_64_32S:
ae4e228f 15943+ pax_open_kernel();
58c5fc13 15944 *(s32 *)loc = val;
ae4e228f 15945+ pax_close_kernel();
58c5fc13
MT
15946 if ((s64)val != *(s32 *)loc)
15947 goto overflow;
15948 break;
15949 case R_X86_64_PC32:
15950 val -= (u64)loc;
ae4e228f 15951+ pax_open_kernel();
58c5fc13 15952 *(u32 *)loc = val;
ae4e228f 15953+ pax_close_kernel();
58c5fc13
MT
15954+
15955 #if 0
15956 if ((s64)val != *(s32 *)loc)
15957 goto overflow;
fe2de317
MT
15958diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
15959index 676b8c7..870ba04 100644
15960--- a/arch/x86/kernel/paravirt-spinlocks.c
15961+++ b/arch/x86/kernel/paravirt-spinlocks.c
15962@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
15963 arch_spin_lock(lock);
15964 }
15965
15966-struct pv_lock_ops pv_lock_ops = {
15967+struct pv_lock_ops pv_lock_ops __read_only = {
15968 #ifdef CONFIG_SMP
15969 .spin_is_locked = __ticket_spin_is_locked,
15970 .spin_is_contended = __ticket_spin_is_contended,
15971diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
15972index d90272e..2d54e8e 100644
15973--- a/arch/x86/kernel/paravirt.c
15974+++ b/arch/x86/kernel/paravirt.c
15a11c5b
MT
15975@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
15976 {
15977 return x;
15978 }
15979+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
15980+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
15981+#endif
15982
15983 void __init default_banner(void)
15984 {
fe2de317 15985@@ -133,6 +136,9 @@ static void *get_call_destination(u8 type)
66a7e928
MT
15986 .pv_lock_ops = pv_lock_ops,
15987 #endif
15988 };
15989+
15990+ pax_track_stack();
15991+
15992 return *((void **)&tmpl + type);
15993 }
15994
fe2de317 15995@@ -145,15 +151,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
58c5fc13 15996 if (opfunc == NULL)
df50ba0c
MT
15997 /* If there's no function, patch it with a ud2a (BUG) */
15998 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
15999- else if (opfunc == _paravirt_nop)
16000+ else if (opfunc == (void *)_paravirt_nop)
16001 /* If the operation is a nop, then nop the callsite */
16002 ret = paravirt_patch_nop();
16003
16004 /* identity functions just return their single argument */
16005- else if (opfunc == _paravirt_ident_32)
16006+ else if (opfunc == (void *)_paravirt_ident_32)
16007 ret = paravirt_patch_ident_32(insnbuf, len);
16008- else if (opfunc == _paravirt_ident_64)
16009+ else if (opfunc == (void *)_paravirt_ident_64)
16010 ret = paravirt_patch_ident_64(insnbuf, len);
15a11c5b
MT
16011+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
16012+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
16013+ ret = paravirt_patch_ident_64(insnbuf, len);
16014+#endif
df50ba0c
MT
16015
16016 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
15a11c5b 16017 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
fe2de317 16018@@ -178,7 +188,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
58c5fc13
MT
16019 if (insn_len > len || start == NULL)
16020 insn_len = len;
16021 else
16022- memcpy(insnbuf, start, insn_len);
16023+ memcpy(insnbuf, ktla_ktva(start), insn_len);
16024
16025 return insn_len;
16026 }
6e9df6a3 16027@@ -302,7 +312,7 @@ void arch_flush_lazy_mmu_mode(void)
58c5fc13
MT
16028 preempt_enable();
16029 }
16030
16031-struct pv_info pv_info = {
16032+struct pv_info pv_info __read_only = {
16033 .name = "bare hardware",
16034 .paravirt_enabled = 0,
16035 .kernel_rpl = 0,
6e9df6a3
MT
16036@@ -313,16 +323,16 @@ struct pv_info pv_info = {
16037 #endif
58c5fc13
MT
16038 };
16039
16040-struct pv_init_ops pv_init_ops = {
16041+struct pv_init_ops pv_init_ops __read_only = {
16042 .patch = native_patch,
58c5fc13
MT
16043 };
16044
16045-struct pv_time_ops pv_time_ops = {
16046+struct pv_time_ops pv_time_ops __read_only = {
ae4e228f 16047 .sched_clock = native_sched_clock,
6e9df6a3 16048 .steal_clock = native_steal_clock,
58c5fc13
MT
16049 };
16050
16051-struct pv_irq_ops pv_irq_ops = {
16052+struct pv_irq_ops pv_irq_ops __read_only = {
58c5fc13
MT
16053 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
16054 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
ae4e228f 16055 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
6e9df6a3 16056@@ -334,7 +344,7 @@ struct pv_irq_ops pv_irq_ops = {
58c5fc13
MT
16057 #endif
16058 };
16059
16060-struct pv_cpu_ops pv_cpu_ops = {
16061+struct pv_cpu_ops pv_cpu_ops __read_only = {
16062 .cpuid = native_cpuid,
16063 .get_debugreg = native_get_debugreg,
16064 .set_debugreg = native_set_debugreg,
6e9df6a3 16065@@ -395,21 +405,26 @@ struct pv_cpu_ops pv_cpu_ops = {
58c5fc13
MT
16066 .end_context_switch = paravirt_nop,
16067 };
16068
16069-struct pv_apic_ops pv_apic_ops = {
16070+struct pv_apic_ops pv_apic_ops __read_only = {
16071 #ifdef CONFIG_X86_LOCAL_APIC
ae4e228f
MT
16072 .startup_ipi_hook = paravirt_nop,
16073 #endif
15a11c5b
MT
16074 };
16075
16076-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
16077+#ifdef CONFIG_X86_32
16078+#ifdef CONFIG_X86_PAE
16079+/* 64-bit pagetable entries */
16080+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
16081+#else
16082 /* 32-bit pagetable entries */
16083 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
16084+#endif
16085 #else
16086 /* 64-bit pagetable entries */
58c5fc13
MT
16087 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
16088 #endif
16089
16090-struct pv_mmu_ops pv_mmu_ops = {
16091+struct pv_mmu_ops pv_mmu_ops __read_only = {
ae4e228f
MT
16092
16093 .read_cr2 = native_read_cr2,
16094 .write_cr2 = native_write_cr2,
6e9df6a3 16095@@ -459,6 +474,7 @@ struct pv_mmu_ops pv_mmu_ops = {
15a11c5b
MT
16096 .make_pud = PTE_IDENT,
16097
16098 .set_pgd = native_set_pgd,
16099+ .set_pgd_batched = native_set_pgd_batched,
16100 #endif
16101 #endif /* PAGETABLE_LEVELS >= 3 */
16102
6e9df6a3 16103@@ -478,6 +494,12 @@ struct pv_mmu_ops pv_mmu_ops = {
ae4e228f
MT
16104 },
16105
16106 .set_fixmap = native_set_fixmap,
16107+
16108+#ifdef CONFIG_PAX_KERNEXEC
16109+ .pax_open_kernel = native_pax_open_kernel,
16110+ .pax_close_kernel = native_pax_close_kernel,
16111+#endif
16112+
16113 };
16114
16115 EXPORT_SYMBOL_GPL(pv_time_ops);
fe2de317
MT
16116diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
16117index 35ccf75..67e7d4d 100644
16118--- a/arch/x86/kernel/pci-iommu_table.c
16119+++ b/arch/x86/kernel/pci-iommu_table.c
66a7e928
MT
16120@@ -2,7 +2,7 @@
16121 #include <asm/iommu_table.h>
16122 #include <linux/string.h>
16123 #include <linux/kallsyms.h>
16124-
16125+#include <linux/sched.h>
16126
16127 #define DEBUG 1
16128
fe2de317 16129@@ -51,6 +51,8 @@ void __init check_iommu_entries(struct iommu_table_entry *start,
15a11c5b
MT
16130 {
16131 struct iommu_table_entry *p, *q, *x;
66a7e928
MT
16132
16133+ pax_track_stack();
16134+
16135 /* Simple cyclic dependency checker. */
16136 for (p = start; p < finish; p++) {
16137 q = find_dependents_of(start, finish, p);
fe2de317
MT
16138diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
16139index e7e3b01..43c5af3 100644
16140--- a/arch/x86/kernel/process.c
16141+++ b/arch/x86/kernel/process.c
16142@@ -48,16 +48,33 @@ void free_thread_xstate(struct task_struct *tsk)
16143
16144 void free_thread_info(struct thread_info *ti)
16145 {
16146- free_thread_xstate(ti->task);
16147 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
16148 }
16149
16150+static struct kmem_cache *task_struct_cachep;
16151+
16152 void arch_task_cache_init(void)
16153 {
16154- task_xstate_cachep =
16155- kmem_cache_create("task_xstate", xstate_size,
16156+ /* create a slab on which task_structs can be allocated */
16157+ task_struct_cachep =
16158+ kmem_cache_create("task_struct", sizeof(struct task_struct),
16159+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
16160+
16161+ task_xstate_cachep =
16162+ kmem_cache_create("task_xstate", xstate_size,
16163 __alignof__(union thread_xstate),
16164- SLAB_PANIC | SLAB_NOTRACK, NULL);
16165+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
16166+}
16167+
16168+struct task_struct *alloc_task_struct_node(int node)
16169+{
16170+ return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
16171+}
16172+
16173+void free_task_struct(struct task_struct *task)
16174+{
16175+ free_thread_xstate(task);
16176+ kmem_cache_free(task_struct_cachep, task);
16177 }
16178
16179 /*
16180@@ -70,7 +87,7 @@ void exit_thread(void)
16181 unsigned long *bp = t->io_bitmap_ptr;
16182
16183 if (bp) {
16184- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
16185+ struct tss_struct *tss = init_tss + get_cpu();
16186
16187 t->io_bitmap_ptr = NULL;
16188 clear_thread_flag(TIF_IO_BITMAP);
16189@@ -106,7 +123,7 @@ void show_regs_common(void)
16190
16191 printk(KERN_CONT "\n");
16192 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
16193- current->pid, current->comm, print_tainted(),
16194+ task_pid_nr(current), current->comm, print_tainted(),
16195 init_utsname()->release,
16196 (int)strcspn(init_utsname()->version, " "),
16197 init_utsname()->version);
16198@@ -120,6 +137,9 @@ void flush_thread(void)
16199 {
16200 struct task_struct *tsk = current;
16201
16202+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
16203+ loadsegment(gs, 0);
16204+#endif
16205 flush_ptrace_hw_breakpoint(tsk);
16206 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
16207 /*
16208@@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
16209 regs.di = (unsigned long) arg;
16210
16211 #ifdef CONFIG_X86_32
16212- regs.ds = __USER_DS;
16213- regs.es = __USER_DS;
16214+ regs.ds = __KERNEL_DS;
16215+ regs.es = __KERNEL_DS;
16216 regs.fs = __KERNEL_PERCPU;
16217- regs.gs = __KERNEL_STACK_CANARY;
16218+ savesegment(gs, regs.gs);
16219 #else
16220 regs.ss = __KERNEL_DS;
16221 #endif
16222@@ -403,7 +423,7 @@ void default_idle(void)
16223 EXPORT_SYMBOL(default_idle);
16224 #endif
16225
16226-void stop_this_cpu(void *dummy)
16227+__noreturn void stop_this_cpu(void *dummy)
16228 {
16229 local_irq_disable();
16230 /*
16231@@ -645,16 +665,37 @@ static int __init idle_setup(char *str)
16232 }
16233 early_param("idle", idle_setup);
16234
16235-unsigned long arch_align_stack(unsigned long sp)
16236+#ifdef CONFIG_PAX_RANDKSTACK
16237+void pax_randomize_kstack(struct pt_regs *regs)
16238 {
16239- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
16240- sp -= get_random_int() % 8192;
16241- return sp & ~0xf;
16242-}
16243+ struct thread_struct *thread = &current->thread;
16244+ unsigned long time;
16245
16246-unsigned long arch_randomize_brk(struct mm_struct *mm)
16247-{
16248- unsigned long range_end = mm->brk + 0x02000000;
16249- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
16250-}
16251+ if (!randomize_va_space)
16252+ return;
16253+
16254+ if (v8086_mode(regs))
16255+ return;
16256
16257+ rdtscl(time);
16258+
16259+ /* P4 seems to return a 0 LSB, ignore it */
16260+#ifdef CONFIG_MPENTIUM4
16261+ time &= 0x3EUL;
16262+ time <<= 2;
16263+#elif defined(CONFIG_X86_64)
16264+ time &= 0xFUL;
16265+ time <<= 4;
16266+#else
16267+ time &= 0x1FUL;
16268+ time <<= 3;
16269+#endif
16270+
16271+ thread->sp0 ^= time;
16272+ load_sp0(init_tss + smp_processor_id(), thread);
16273+
16274+#ifdef CONFIG_X86_64
16275+ percpu_write(kernel_stack, thread->sp0);
16276+#endif
16277+}
16278+#endif
16279diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
16280index 7a3b651..5a946f6 100644
16281--- a/arch/x86/kernel/process_32.c
16282+++ b/arch/x86/kernel/process_32.c
16283@@ -66,6 +66,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
57199397
MT
16284 unsigned long thread_saved_pc(struct task_struct *tsk)
16285 {
16286 return ((unsigned long *)tsk->thread.sp)[3];
16287+//XXX return tsk->thread.eip;
16288 }
16289
16290 #ifndef CONFIG_SMP
fe2de317 16291@@ -128,15 +129,14 @@ void __show_regs(struct pt_regs *regs, int all)
57199397
MT
16292 unsigned long sp;
16293 unsigned short ss, gs;
16294
16295- if (user_mode_vm(regs)) {
16296+ if (user_mode(regs)) {
16297 sp = regs->sp;
16298 ss = regs->ss & 0xffff;
bc901d79
MT
16299- gs = get_user_gs(regs);
16300 } else {
16301 sp = kernel_stack_pointer(regs);
16302 savesegment(ss, ss);
16303- savesegment(gs, gs);
16304 }
16305+ gs = get_user_gs(regs);
16306
16307 show_regs_common();
16308
fe2de317 16309@@ -198,13 +198,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
57199397
MT
16310 struct task_struct *tsk;
16311 int err;
16312
16313- childregs = task_pt_regs(p);
16314+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
16315 *childregs = *regs;
16316 childregs->ax = 0;
16317 childregs->sp = sp;
66a7e928
MT
16318
16319 p->thread.sp = (unsigned long) childregs;
16320 p->thread.sp0 = (unsigned long) (childregs+1);
16321+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16322
16323 p->thread.ip = (unsigned long) ret_from_fork;
16324
fe2de317 16325@@ -294,7 +295,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
57199397
MT
16326 struct thread_struct *prev = &prev_p->thread,
16327 *next = &next_p->thread;
16328 int cpu = smp_processor_id();
16329- struct tss_struct *tss = &per_cpu(init_tss, cpu);
16330+ struct tss_struct *tss = init_tss + cpu;
16331 bool preload_fpu;
16332
16333 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
fe2de317 16334@@ -329,6 +330,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
57199397
MT
16335 */
16336 lazy_save_gs(prev->gs);
16337
16338+#ifdef CONFIG_PAX_MEMORY_UDEREF
bc901d79 16339+ __set_fs(task_thread_info(next_p)->addr_limit);
57199397
MT
16340+#endif
16341+
16342 /*
16343 * Load the per-thread Thread-Local Storage descriptor.
16344 */
fe2de317 16345@@ -364,6 +369,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
71d190be
MT
16346 */
16347 arch_end_context_switch(next_p);
57199397 16348
71d190be
MT
16349+ percpu_write(current_task, next_p);
16350+ percpu_write(current_tinfo, &next_p->tinfo);
57199397 16351+
71d190be
MT
16352 if (preload_fpu)
16353 __math_state_restore();
16354
fe2de317 16355@@ -373,8 +381,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
71d190be
MT
16356 if (prev->gs | next->gs)
16357 lazy_load_gs(next->gs);
16358
16359- percpu_write(current_task, next_p);
16360-
16361 return prev_p;
16362 }
16363
fe2de317 16364@@ -404,4 +410,3 @@ unsigned long get_wchan(struct task_struct *p)
71d190be
MT
16365 } while (count++ < 16);
16366 return 0;
16367 }
16368-
fe2de317
MT
16369diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
16370index f693e44..3c979b2 100644
16371--- a/arch/x86/kernel/process_64.c
16372+++ b/arch/x86/kernel/process_64.c
6e9df6a3 16373@@ -88,7 +88,7 @@ static void __exit_idle(void)
57199397
MT
16374 void exit_idle(void)
16375 {
16376 /* idle loop has pid 0 */
16377- if (current->pid)
16378+ if (task_pid_nr(current))
16379 return;
16380 __exit_idle();
16381 }
fe2de317 16382@@ -262,8 +262,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
71d190be
MT
16383 struct pt_regs *childregs;
16384 struct task_struct *me = current;
16385
16386- childregs = ((struct pt_regs *)
16387- (THREAD_SIZE + task_stack_page(p))) - 1;
66a7e928 16388+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
71d190be
MT
16389 *childregs = *regs;
16390
16391 childregs->ax = 0;
fe2de317 16392@@ -275,6 +274,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
66a7e928
MT
16393 p->thread.sp = (unsigned long) childregs;
16394 p->thread.sp0 = (unsigned long) (childregs+1);
16395 p->thread.usersp = me->thread.usersp;
16396+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16397
16398 set_tsk_thread_flag(p, TIF_FORK);
16399
fe2de317 16400@@ -377,7 +377,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
57199397
MT
16401 struct thread_struct *prev = &prev_p->thread;
16402 struct thread_struct *next = &next_p->thread;
16403 int cpu = smp_processor_id();
16404- struct tss_struct *tss = &per_cpu(init_tss, cpu);
16405+ struct tss_struct *tss = init_tss + cpu;
16406 unsigned fsindex, gsindex;
16407 bool preload_fpu;
16408
fe2de317 16409@@ -473,10 +473,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
71d190be
MT
16410 prev->usersp = percpu_read(old_rsp);
16411 percpu_write(old_rsp, next->usersp);
16412 percpu_write(current_task, next_p);
16413+ percpu_write(current_tinfo, &next_p->tinfo);
16414
16415- percpu_write(kernel_stack,
16416- (unsigned long)task_stack_page(next_p) +
16417- THREAD_SIZE - KERNEL_STACK_OFFSET);
16418+ percpu_write(kernel_stack, next->sp0);
16419
16420 /*
16421 * Now maybe reload the debug registers and handle I/O bitmaps
fe2de317 16422@@ -538,12 +537,11 @@ unsigned long get_wchan(struct task_struct *p)
57199397
MT
16423 if (!p || p == current || p->state == TASK_RUNNING)
16424 return 0;
16425 stack = (unsigned long)task_stack_page(p);
16426- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
66a7e928 16427+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
57199397
MT
16428 return 0;
16429 fp = *(u64 *)(p->thread.sp);
16430 do {
16431- if (fp < (unsigned long)stack ||
16432- fp >= (unsigned long)stack+THREAD_SIZE)
66a7e928 16433+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
57199397
MT
16434 return 0;
16435 ip = *(u64 *)(fp+8);
16436 if (!in_sched_functions(ip))
fe2de317
MT
16437diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
16438index 8252879..d3219e0 100644
16439--- a/arch/x86/kernel/ptrace.c
16440+++ b/arch/x86/kernel/ptrace.c
16441@@ -822,7 +822,7 @@ long arch_ptrace(struct task_struct *child, long request,
bc901d79 16442 unsigned long addr, unsigned long data)
ae4e228f
MT
16443 {
16444 int ret;
16445- unsigned long __user *datap = (unsigned long __user *)data;
16446+ unsigned long __user *datap = (__force unsigned long __user *)data;
16447
16448 switch (request) {
16449 /* read the word at location addr in the USER area. */
fe2de317 16450@@ -907,14 +907,14 @@ long arch_ptrace(struct task_struct *child, long request,
bc901d79 16451 if ((int) addr < 0)
ae4e228f
MT
16452 return -EIO;
16453 ret = do_get_thread_area(child, addr,
bc901d79
MT
16454- (struct user_desc __user *)data);
16455+ (__force struct user_desc __user *) data);
ae4e228f
MT
16456 break;
16457
16458 case PTRACE_SET_THREAD_AREA:
bc901d79 16459 if ((int) addr < 0)
ae4e228f
MT
16460 return -EIO;
16461 ret = do_set_thread_area(child, addr,
bc901d79
MT
16462- (struct user_desc __user *)data, 0);
16463+ (__force struct user_desc __user *) data, 0);
ae4e228f
MT
16464 break;
16465 #endif
16466
fe2de317 16467@@ -1331,7 +1331,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
ae4e228f
MT
16468 memset(info, 0, sizeof(*info));
16469 info->si_signo = SIGTRAP;
16470 info->si_code = si_code;
16471- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
16472+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
16473 }
16474
16475 void user_single_step_siginfo(struct task_struct *tsk,
fe2de317
MT
16476diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
16477index 42eb330..139955c 100644
16478--- a/arch/x86/kernel/pvclock.c
16479+++ b/arch/x86/kernel/pvclock.c
16480@@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
66a7e928
MT
16481 return pv_tsc_khz;
16482 }
16483
16484-static atomic64_t last_value = ATOMIC64_INIT(0);
16485+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
16486
16487 void pvclock_resume(void)
16488 {
16489- atomic64_set(&last_value, 0);
16490+ atomic64_set_unchecked(&last_value, 0);
16491 }
16492
16493 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
fe2de317 16494@@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
66a7e928
MT
16495 * updating at the same time, and one of them could be slightly behind,
16496 * making the assumption that last_value always go forward fail to hold.
16497 */
16498- last = atomic64_read(&last_value);
16499+ last = atomic64_read_unchecked(&last_value);
16500 do {
16501 if (ret < last)
16502 return last;
16503- last = atomic64_cmpxchg(&last_value, last, ret);
16504+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
16505 } while (unlikely(last != ret));
16506
16507 return ret;
fe2de317
MT
16508diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
16509index d4a705f..ef8f1a9 100644
16510--- a/arch/x86/kernel/reboot.c
16511+++ b/arch/x86/kernel/reboot.c
66a7e928 16512@@ -35,7 +35,7 @@ void (*pm_power_off)(void);
58c5fc13
MT
16513 EXPORT_SYMBOL(pm_power_off);
16514
16515 static const struct desc_ptr no_idt = {};
16516-static int reboot_mode;
16517+static unsigned short reboot_mode;
15a11c5b 16518 enum reboot_type reboot_type = BOOT_ACPI;
58c5fc13
MT
16519 int reboot_force;
16520
fe2de317 16521@@ -324,13 +324,17 @@ core_initcall(reboot_init);
66a7e928
MT
16522 extern const unsigned char machine_real_restart_asm[];
16523 extern const u64 machine_real_restart_gdt[3];
16524
16525-void machine_real_restart(unsigned int type)
16526+__noreturn void machine_real_restart(unsigned int type)
58c5fc13 16527 {
66a7e928
MT
16528 void *restart_va;
16529 unsigned long restart_pa;
16530- void (*restart_lowmem)(unsigned int);
16531+ void (* __noreturn restart_lowmem)(unsigned int);
16532 u64 *lowmem_gdt;
16533
16534+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
16535+ struct desc_struct *gdt;
16536+#endif
16537+
58c5fc13
MT
16538 local_irq_disable();
16539
66a7e928 16540 /* Write zero to CMOS register number 0x0f, which the BIOS POST
fe2de317 16541@@ -356,14 +360,14 @@ void machine_real_restart(unsigned int type)
58c5fc13
MT
16542 boot)". This seems like a fairly standard thing that gets set by
16543 REBOOT.COM programs, and the previous reset routine did this
16544 too. */
16545- *((unsigned short *)0x472) = reboot_mode;
16546+ *(unsigned short *)(__va(0x472)) = reboot_mode;
16547
66a7e928
MT
16548 /* Patch the GDT in the low memory trampoline */
16549 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
16550
16551 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
16552 restart_pa = virt_to_phys(restart_va);
16553- restart_lowmem = (void (*)(unsigned int))restart_pa;
16554+ restart_lowmem = (void *)restart_pa;
16555
16556 /* GDT[0]: GDT self-pointer */
16557 lowmem_gdt[0] =
fe2de317 16558@@ -374,7 +378,33 @@ void machine_real_restart(unsigned int type)
66a7e928
MT
16559 GDT_ENTRY(0x009b, restart_pa, 0xffff);
16560
16561 /* Jump to the identity-mapped low memory code */
16562+
16563+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
16564+ gdt = get_cpu_gdt_table(smp_processor_id());
16565+ pax_open_kernel();
16566+#ifdef CONFIG_PAX_MEMORY_UDEREF
16567+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
16568+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
16569+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
16570+#endif
16571+#ifdef CONFIG_PAX_KERNEXEC
16572+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
16573+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
16574+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
16575+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
16576+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
16577+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
16578+#endif
16579+ pax_close_kernel();
16580+#endif
16581+
16582+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16583+ asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
16584+ unreachable();
16585+#else
16586 restart_lowmem(type);
16587+#endif
16588+
16589 }
16590 #ifdef CONFIG_APM_MODULE
16591 EXPORT_SYMBOL(machine_real_restart);
fe2de317 16592@@ -532,7 +562,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
15a11c5b
MT
16593 * try to force a triple fault and then cycle between hitting the keyboard
16594 * controller and doing that
16595 */
66a7e928
MT
16596-static void native_machine_emergency_restart(void)
16597+__noreturn static void native_machine_emergency_restart(void)
16598 {
16599 int i;
15a11c5b 16600 int attempt = 0;
fe2de317 16601@@ -656,13 +686,13 @@ void native_machine_shutdown(void)
66a7e928
MT
16602 #endif
16603 }
16604
16605-static void __machine_emergency_restart(int emergency)
16606+static __noreturn void __machine_emergency_restart(int emergency)
16607 {
16608 reboot_emergency = emergency;
16609 machine_ops.emergency_restart();
16610 }
16611
16612-static void native_machine_restart(char *__unused)
16613+static __noreturn void native_machine_restart(char *__unused)
16614 {
16615 printk("machine restart\n");
16616
fe2de317 16617@@ -671,7 +701,7 @@ static void native_machine_restart(char *__unused)
66a7e928
MT
16618 __machine_emergency_restart(0);
16619 }
16620
16621-static void native_machine_halt(void)
16622+static __noreturn void native_machine_halt(void)
16623 {
16624 /* stop other cpus and apics */
16625 machine_shutdown();
fe2de317 16626@@ -682,7 +712,7 @@ static void native_machine_halt(void)
66a7e928
MT
16627 stop_this_cpu(NULL);
16628 }
16629
16630-static void native_machine_power_off(void)
16631+__noreturn static void native_machine_power_off(void)
16632 {
16633 if (pm_power_off) {
16634 if (!reboot_force)
fe2de317 16635@@ -691,6 +721,7 @@ static void native_machine_power_off(void)
66a7e928
MT
16636 }
16637 /* a fallback in case there is no PM info available */
16638 tboot_shutdown(TB_SHUTDOWN_HALT);
16639+ unreachable();
16640 }
16641
16642 struct machine_ops machine_ops = {
fe2de317
MT
16643diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
16644index 7a6f3b3..bed145d7 100644
16645--- a/arch/x86/kernel/relocate_kernel_64.S
16646+++ b/arch/x86/kernel/relocate_kernel_64.S
16647@@ -11,6 +11,7 @@
16648 #include <asm/kexec.h>
16649 #include <asm/processor-flags.h>
16650 #include <asm/pgtable_types.h>
16651+#include <asm/alternative-asm.h>
16652
16653 /*
16654 * Must be relocatable PIC code callable as a C function
16655@@ -160,13 +161,14 @@ identity_mapped:
16656 xorq %rbp, %rbp
16657 xorq %r8, %r8
16658 xorq %r9, %r9
16659- xorq %r10, %r9
16660+ xorq %r10, %r10
16661 xorq %r11, %r11
16662 xorq %r12, %r12
16663 xorq %r13, %r13
16664 xorq %r14, %r14
16665 xorq %r15, %r15
16666
16667+ pax_force_retaddr 0, 1
16668 ret
16669
16670 1:
16671diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
16672index afaf384..1a101fe 100644
16673--- a/arch/x86/kernel/setup.c
16674+++ b/arch/x86/kernel/setup.c
16675@@ -447,7 +447,7 @@ static void __init parse_setup_data(void)
6e9df6a3
MT
16676
16677 switch (data->type) {
16678 case SETUP_E820_EXT:
16679- parse_e820_ext(data);
16680+ parse_e820_ext((struct setup_data __force_kernel *)data);
16681 break;
16682 case SETUP_DTB:
16683 add_dtb(pa_data);
66a7e928 16684@@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
efbe55a5
MT
16685 * area (640->1Mb) as ram even though it is not.
16686 * take them out.
16687 */
16688- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
16689+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
16690 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
16691 }
16692
15a11c5b 16693@@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p)
58c5fc13
MT
16694
16695 if (!boot_params.hdr.root_flags)
16696 root_mountflags &= ~MS_RDONLY;
16697- init_mm.start_code = (unsigned long) _text;
16698- init_mm.end_code = (unsigned long) _etext;
16699+ init_mm.start_code = ktla_ktva((unsigned long) _text);
16700+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
16701 init_mm.end_data = (unsigned long) _edata;
16702 init_mm.brk = _brk_end;
16703
16704- code_resource.start = virt_to_phys(_text);
16705- code_resource.end = virt_to_phys(_etext)-1;
16706- data_resource.start = virt_to_phys(_etext);
16707+ code_resource.start = virt_to_phys(ktla_ktva(_text));
16708+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
16709+ data_resource.start = virt_to_phys(_sdata);
16710 data_resource.end = virt_to_phys(_edata)-1;
16711 bss_resource.start = virt_to_phys(&__bss_start);
16712 bss_resource.end = virt_to_phys(&__bss_stop)-1;
fe2de317
MT
16713diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
16714index 71f4727..16dc9f7 100644
16715--- a/arch/x86/kernel/setup_percpu.c
16716+++ b/arch/x86/kernel/setup_percpu.c
57199397
MT
16717@@ -21,19 +21,17 @@
16718 #include <asm/cpu.h>
16719 #include <asm/stackprotector.h>
58c5fc13 16720
6892158b 16721-DEFINE_PER_CPU(int, cpu_number);
58c5fc13 16722+#ifdef CONFIG_SMP
6892158b 16723+DEFINE_PER_CPU(unsigned int, cpu_number);
58c5fc13
MT
16724 EXPORT_PER_CPU_SYMBOL(cpu_number);
16725+#endif
16726
16727-#ifdef CONFIG_X86_64
16728 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
16729-#else
16730-#define BOOT_PERCPU_OFFSET 0
16731-#endif
16732
16733 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
16734 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
16735
16736-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
16737+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
16738 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
16739 };
16740 EXPORT_SYMBOL(__per_cpu_offset);
fe2de317 16741@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
58c5fc13
MT
16742 {
16743 #ifdef CONFIG_X86_32
ae4e228f
MT
16744 struct desc_struct gdt;
16745+ unsigned long base = per_cpu_offset(cpu);
16746
58c5fc13
MT
16747- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
16748- 0x2 | DESCTYPE_S, 0x8);
16749- gdt.s = 1;
ae4e228f
MT
16750+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
16751+ 0x83 | DESCTYPE_S, 0xC);
16752 write_gdt_entry(get_cpu_gdt_table(cpu),
16753 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
58c5fc13 16754 #endif
bc901d79 16755@@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
58c5fc13
MT
16756 /* alrighty, percpu areas up and running */
16757 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
16758 for_each_possible_cpu(cpu) {
16759+#ifdef CONFIG_CC_STACKPROTECTOR
15a11c5b
MT
16760+#ifdef CONFIG_X86_32
16761+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
58c5fc13
MT
16762+#endif
16763+#endif
ae4e228f 16764 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
58c5fc13
MT
16765 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
16766 per_cpu(cpu_number, cpu) = cpu;
66a7e928
MT
16767@@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
16768 */
57199397 16769 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
58c5fc13 16770 #endif
58c5fc13 16771+#ifdef CONFIG_CC_STACKPROTECTOR
15a11c5b
MT
16772+#ifdef CONFIG_X86_32
16773+ if (!cpu)
16774+ per_cpu(stack_canary.canary, cpu) = canary;
58c5fc13
MT
16775+#endif
16776+#endif
16777 /*
57199397 16778 * Up to this point, the boot CPU has been using .init.data
58c5fc13 16779 * area. Reload any changed state for the boot CPU.
fe2de317
MT
16780diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
16781index 54ddaeb2..a6aa4d2 100644
16782--- a/arch/x86/kernel/signal.c
16783+++ b/arch/x86/kernel/signal.c
16784@@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsigned long sp)
58c5fc13
MT
16785 * Align the stack pointer according to the i386 ABI,
16786 * i.e. so that on function entry ((sp + 4) & 15) == 0.
16787 */
16788- sp = ((sp + 4) & -16ul) - 4;
16789+ sp = ((sp - 12) & -16ul) - 4;
16790 #else /* !CONFIG_X86_32 */
16791 sp = round_down(sp, 16) - 8;
16792 #endif
fe2de317 16793@@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
ae4e228f
MT
16794 * Return an always-bogus address instead so we will die with SIGSEGV.
16795 */
16796 if (onsigstack && !likely(on_sig_stack(sp)))
16797- return (void __user *)-1L;
16798+ return (__force void __user *)-1L;
16799
16800 /* save i387 state */
16801 if (used_math() && save_i387_xstate(*fpstate) < 0)
16802- return (void __user *)-1L;
16803+ return (__force void __user *)-1L;
16804
16805 return (void __user *)sp;
16806 }
fe2de317 16807@@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
58c5fc13
MT
16808 }
16809
16810 if (current->mm->context.vdso)
16811- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
ae4e228f 16812+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
58c5fc13
MT
16813 else
16814- restorer = &frame->retcode;
16815+ restorer = (void __user *)&frame->retcode;
16816 if (ka->sa.sa_flags & SA_RESTORER)
16817 restorer = ka->sa.sa_restorer;
16818
fe2de317 16819@@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
ae4e228f
MT
16820 * reasons and because gdb uses it as a signature to notice
16821 * signal handler stack frames.
16822 */
16823- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
16824+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
16825
16826 if (err)
16827 return -EFAULT;
fe2de317 16828@@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
58c5fc13
MT
16829 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
16830
16831 /* Set up to return from userspace. */
16832- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
6892158b
MT
16833+ if (current->mm->context.vdso)
16834+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16835+ else
16836+ restorer = (void __user *)&frame->retcode;
58c5fc13
MT
16837 if (ka->sa.sa_flags & SA_RESTORER)
16838 restorer = ka->sa.sa_restorer;
16839 put_user_ex(restorer, &frame->pretcode);
fe2de317 16840@@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
ae4e228f
MT
16841 * reasons and because gdb uses it as a signature to notice
16842 * signal handler stack frames.
16843 */
16844- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
16845+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
16846 } put_user_catch(err);
16847
16848 if (err)
fe2de317 16849@@ -762,6 +765,8 @@ static void do_signal(struct pt_regs *regs)
6e9df6a3 16850 siginfo_t info;
66a7e928 16851 int signr;
66a7e928
MT
16852
16853+ pax_track_stack();
16854+
16855 /*
16856 * We want the common case to go fast, which is why we may in certain
16857 * cases get here from kernel mode. Just return without doing anything
fe2de317 16858@@ -769,7 +774,7 @@ static void do_signal(struct pt_regs *regs)
58c5fc13
MT
16859 * X86_32: vm86 regs switched out by assembly code before reaching
16860 * here, so testing against kernel CS suffices.
16861 */
16862- if (!user_mode(regs))
16863+ if (!user_mode_novm(regs))
16864 return;
16865
6e9df6a3 16866 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
fe2de317
MT
16867diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
16868index 9f548cb..caf76f7 100644
16869--- a/arch/x86/kernel/smpboot.c
16870+++ b/arch/x86/kernel/smpboot.c
16871@@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
71d190be
MT
16872 set_idle_for_cpu(cpu, c_idle.idle);
16873 do_rest:
16874 per_cpu(current_task, cpu) = c_idle.idle;
16875+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
16876 #ifdef CONFIG_X86_32
16877 /* Stack for startup_32 can be just as for start_secondary onwards */
16878 irq_ctx_init(cpu);
16879 #else
16880 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
16881 initial_gs = per_cpu_offset(cpu);
16882- per_cpu(kernel_stack, cpu) =
16883- (unsigned long)task_stack_page(c_idle.idle) -
16884- KERNEL_STACK_OFFSET + THREAD_SIZE;
66a7e928 16885+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
58c5fc13
MT
16886 #endif
16887+
ae4e228f 16888+ pax_open_kernel();
58c5fc13 16889 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
ae4e228f 16890+ pax_close_kernel();
58c5fc13
MT
16891+
16892 initial_code = (unsigned long)start_secondary;
16454cff 16893 stack_start = c_idle.idle->thread.sp;
58c5fc13 16894
fe2de317 16895@@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
df50ba0c
MT
16896
16897 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
16898
16899+#ifdef CONFIG_PAX_PER_CPU_PGD
16900+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
16901+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16902+ KERNEL_PGD_PTRS);
16903+#endif
16904+
6892158b 16905 err = do_boot_cpu(apicid, cpu);
6892158b 16906 if (err) {
bc901d79 16907 pr_debug("do_boot_cpu failed %d\n", err);
fe2de317
MT
16908diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
16909index c346d11..d43b163 100644
16910--- a/arch/x86/kernel/step.c
16911+++ b/arch/x86/kernel/step.c
16912@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
ae4e228f 16913 struct desc_struct *desc;
58c5fc13
MT
16914 unsigned long base;
16915
16916- seg &= ~7UL;
16917+ seg >>= 3;
16918
16919 mutex_lock(&child->mm->context.lock);
16920- if (unlikely((seg >> 3) >= child->mm->context.size))
58c5fc13 16921+ if (unlikely(seg >= child->mm->context.size))
ae4e228f 16922 addr = -1L; /* bogus selector, access would fault */
58c5fc13 16923 else {
ae4e228f 16924 desc = child->mm->context.ldt + seg;
fe2de317 16925@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
bc901d79
MT
16926 addr += base;
16927 }
16928 mutex_unlock(&child->mm->context.lock);
16929- }
16930+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
16931+ addr = ktla_ktva(addr);
16932
16933 return addr;
16934 }
fe2de317 16935@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
58c5fc13
MT
16936 unsigned char opcode[15];
16937 unsigned long addr = convert_ip_to_linear(child, regs);
16938
16939+ if (addr == -EINVAL)
16940+ return 0;
16941+
16942 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
16943 for (i = 0; i < copied; i++) {
16944 switch (opcode[i]) {
fe2de317
MT
16945diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
16946index 0b0cb5f..db6b9ed 100644
16947--- a/arch/x86/kernel/sys_i386_32.c
16948+++ b/arch/x86/kernel/sys_i386_32.c
bc901d79 16949@@ -24,17 +24,224 @@
58c5fc13
MT
16950
16951 #include <asm/syscalls.h>
16952
bc901d79
MT
16953-/*
16954- * Do a system call from kernel instead of calling sys_execve so we
16955- * end up with proper pt_regs.
16956- */
16957-int kernel_execve(const char *filename,
16958- const char *const argv[],
16959- const char *const envp[])
58c5fc13 16960+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
bc901d79
MT
16961 {
16962- long __res;
16963- asm volatile ("int $0x80"
16964- : "=a" (__res)
16965- : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
16966- return __res;
58c5fc13
MT
16967+ unsigned long pax_task_size = TASK_SIZE;
16968+
16969+#ifdef CONFIG_PAX_SEGMEXEC
16970+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
16971+ pax_task_size = SEGMEXEC_TASK_SIZE;
16972+#endif
16973+
16974+ if (len > pax_task_size || addr > pax_task_size - len)
16975+ return -EINVAL;
16976+
16977+ return 0;
16978+}
16979+
58c5fc13
MT
16980+unsigned long
16981+arch_get_unmapped_area(struct file *filp, unsigned long addr,
16982+ unsigned long len, unsigned long pgoff, unsigned long flags)
16983+{
16984+ struct mm_struct *mm = current->mm;
16985+ struct vm_area_struct *vma;
16986+ unsigned long start_addr, pax_task_size = TASK_SIZE;
16987+
16988+#ifdef CONFIG_PAX_SEGMEXEC
16989+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
16990+ pax_task_size = SEGMEXEC_TASK_SIZE;
16991+#endif
16992+
6892158b
MT
16993+ pax_task_size -= PAGE_SIZE;
16994+
58c5fc13
MT
16995+ if (len > pax_task_size)
16996+ return -ENOMEM;
16997+
16998+ if (flags & MAP_FIXED)
16999+ return addr;
17000+
17001+#ifdef CONFIG_PAX_RANDMMAP
17002+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17003+#endif
17004+
17005+ if (addr) {
17006+ addr = PAGE_ALIGN(addr);
57199397
MT
17007+ if (pax_task_size - len >= addr) {
17008+ vma = find_vma(mm, addr);
17009+ if (check_heap_stack_gap(vma, addr, len))
17010+ return addr;
17011+ }
58c5fc13
MT
17012+ }
17013+ if (len > mm->cached_hole_size) {
17014+ start_addr = addr = mm->free_area_cache;
17015+ } else {
17016+ start_addr = addr = mm->mmap_base;
17017+ mm->cached_hole_size = 0;
17018+ }
17019+
17020+#ifdef CONFIG_PAX_PAGEEXEC
ae4e228f 17021+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
58c5fc13
MT
17022+ start_addr = 0x00110000UL;
17023+
17024+#ifdef CONFIG_PAX_RANDMMAP
17025+ if (mm->pax_flags & MF_PAX_RANDMMAP)
17026+ start_addr += mm->delta_mmap & 0x03FFF000UL;
17027+#endif
17028+
17029+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
17030+ start_addr = addr = mm->mmap_base;
17031+ else
17032+ addr = start_addr;
17033+ }
17034+#endif
17035+
17036+full_search:
17037+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
17038+ /* At this point: (!vma || addr < vma->vm_end). */
17039+ if (pax_task_size - len < addr) {
17040+ /*
17041+ * Start a new search - just in case we missed
17042+ * some holes.
17043+ */
17044+ if (start_addr != mm->mmap_base) {
17045+ start_addr = addr = mm->mmap_base;
17046+ mm->cached_hole_size = 0;
17047+ goto full_search;
17048+ }
17049+ return -ENOMEM;
17050+ }
57199397
MT
17051+ if (check_heap_stack_gap(vma, addr, len))
17052+ break;
58c5fc13
MT
17053+ if (addr + mm->cached_hole_size < vma->vm_start)
17054+ mm->cached_hole_size = vma->vm_start - addr;
17055+ addr = vma->vm_end;
17056+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
17057+ start_addr = addr = mm->mmap_base;
17058+ mm->cached_hole_size = 0;
17059+ goto full_search;
17060+ }
17061+ }
57199397
MT
17062+
17063+ /*
17064+ * Remember the place where we stopped the search:
17065+ */
17066+ mm->free_area_cache = addr + len;
17067+ return addr;
58c5fc13
MT
17068+}
17069+
17070+unsigned long
17071+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17072+ const unsigned long len, const unsigned long pgoff,
17073+ const unsigned long flags)
17074+{
17075+ struct vm_area_struct *vma;
17076+ struct mm_struct *mm = current->mm;
17077+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
17078+
17079+#ifdef CONFIG_PAX_SEGMEXEC
17080+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
17081+ pax_task_size = SEGMEXEC_TASK_SIZE;
17082+#endif
17083+
6892158b
MT
17084+ pax_task_size -= PAGE_SIZE;
17085+
58c5fc13
MT
17086+ /* requested length too big for entire address space */
17087+ if (len > pax_task_size)
17088+ return -ENOMEM;
17089+
17090+ if (flags & MAP_FIXED)
17091+ return addr;
17092+
17093+#ifdef CONFIG_PAX_PAGEEXEC
ae4e228f 17094+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
58c5fc13
MT
17095+ goto bottomup;
17096+#endif
17097+
17098+#ifdef CONFIG_PAX_RANDMMAP
17099+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17100+#endif
17101+
17102+ /* requesting a specific address */
17103+ if (addr) {
17104+ addr = PAGE_ALIGN(addr);
57199397
MT
17105+ if (pax_task_size - len >= addr) {
17106+ vma = find_vma(mm, addr);
17107+ if (check_heap_stack_gap(vma, addr, len))
17108+ return addr;
17109+ }
58c5fc13
MT
17110+ }
17111+
17112+ /* check if free_area_cache is useful for us */
17113+ if (len <= mm->cached_hole_size) {
17114+ mm->cached_hole_size = 0;
17115+ mm->free_area_cache = mm->mmap_base;
17116+ }
17117+
17118+ /* either no address requested or can't fit in requested address hole */
17119+ addr = mm->free_area_cache;
17120+
17121+ /* make sure it can fit in the remaining address space */
17122+ if (addr > len) {
17123+ vma = find_vma(mm, addr-len);
57199397 17124+ if (check_heap_stack_gap(vma, addr - len, len))
58c5fc13
MT
17125+ /* remember the address as a hint for next time */
17126+ return (mm->free_area_cache = addr-len);
17127+ }
17128+
17129+ if (mm->mmap_base < len)
17130+ goto bottomup;
17131+
17132+ addr = mm->mmap_base-len;
17133+
17134+ do {
17135+ /*
17136+ * Lookup failure means no vma is above this address,
17137+ * else if new region fits below vma->vm_start,
17138+ * return with success:
17139+ */
17140+ vma = find_vma(mm, addr);
57199397 17141+ if (check_heap_stack_gap(vma, addr, len))
58c5fc13
MT
17142+ /* remember the address as a hint for next time */
17143+ return (mm->free_area_cache = addr);
17144+
17145+ /* remember the largest hole we saw so far */
17146+ if (addr + mm->cached_hole_size < vma->vm_start)
17147+ mm->cached_hole_size = vma->vm_start - addr;
17148+
17149+ /* try just below the current vma->vm_start */
16454cff
MT
17150+ addr = skip_heap_stack_gap(vma, len);
17151+ } while (!IS_ERR_VALUE(addr));
58c5fc13
MT
17152+
17153+bottomup:
17154+ /*
17155+ * A failed mmap() very likely causes application failure,
17156+ * so fall back to the bottom-up function here. This scenario
17157+ * can happen with large stack limits and large mmap()
17158+ * allocations.
17159+ */
17160+
17161+#ifdef CONFIG_PAX_SEGMEXEC
17162+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
17163+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
17164+ else
17165+#endif
17166+
17167+ mm->mmap_base = TASK_UNMAPPED_BASE;
17168+
17169+#ifdef CONFIG_PAX_RANDMMAP
17170+ if (mm->pax_flags & MF_PAX_RANDMMAP)
17171+ mm->mmap_base += mm->delta_mmap;
17172+#endif
17173+
17174+ mm->free_area_cache = mm->mmap_base;
17175+ mm->cached_hole_size = ~0UL;
17176+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17177+ /*
17178+ * Restore the topdown base:
17179+ */
17180+ mm->mmap_base = base;
17181+ mm->free_area_cache = base;
17182+ mm->cached_hole_size = ~0UL;
17183+
17184+ return addr;
bc901d79 17185 }
fe2de317
MT
17186diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
17187index ff14a50..35626c3 100644
17188--- a/arch/x86/kernel/sys_x86_64.c
17189+++ b/arch/x86/kernel/sys_x86_64.c
ae4e228f 17190@@ -32,8 +32,8 @@ out:
58c5fc13
MT
17191 return error;
17192 }
17193
17194-static void find_start_end(unsigned long flags, unsigned long *begin,
17195- unsigned long *end)
17196+static void find_start_end(struct mm_struct *mm, unsigned long flags,
17197+ unsigned long *begin, unsigned long *end)
17198 {
17199 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
17200 unsigned long new_begin;
fe2de317 17201@@ -52,7 +52,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
58c5fc13
MT
17202 *begin = new_begin;
17203 }
17204 } else {
17205- *begin = TASK_UNMAPPED_BASE;
17206+ *begin = mm->mmap_base;
17207 *end = TASK_SIZE;
17208 }
17209 }
fe2de317 17210@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
58c5fc13
MT
17211 if (flags & MAP_FIXED)
17212 return addr;
17213
17214- find_start_end(flags, &begin, &end);
17215+ find_start_end(mm, flags, &begin, &end);
17216
17217 if (len > end)
17218 return -ENOMEM;
17219
17220+#ifdef CONFIG_PAX_RANDMMAP
17221+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17222+#endif
17223+
17224 if (addr) {
17225 addr = PAGE_ALIGN(addr);
17226 vma = find_vma(mm, addr);
57199397
MT
17227- if (end - len >= addr &&
17228- (!vma || addr + len <= vma->vm_start))
17229+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
17230 return addr;
17231 }
17232 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
17233@@ -106,7 +109,7 @@ full_search:
17234 }
17235 return -ENOMEM;
17236 }
17237- if (!vma || addr + len <= vma->vm_start) {
17238+ if (check_heap_stack_gap(vma, addr, len)) {
17239 /*
17240 * Remember the place where we stopped the search:
17241 */
fe2de317 17242@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
58c5fc13
MT
17243 {
17244 struct vm_area_struct *vma;
17245 struct mm_struct *mm = current->mm;
17246- unsigned long addr = addr0;
17247+ unsigned long base = mm->mmap_base, addr = addr0;
17248
17249 /* requested length too big for entire address space */
17250 if (len > TASK_SIZE)
fe2de317 17251@@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
58c5fc13
MT
17252 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
17253 goto bottomup;
17254
17255+#ifdef CONFIG_PAX_RANDMMAP
17256+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17257+#endif
17258+
17259 /* requesting a specific address */
17260 if (addr) {
17261 addr = PAGE_ALIGN(addr);
16454cff 17262- vma = find_vma(mm, addr);
57199397
MT
17263- if (TASK_SIZE - len >= addr &&
17264- (!vma || addr + len <= vma->vm_start))
16454cff
MT
17265- return addr;
17266+ if (TASK_SIZE - len >= addr) {
17267+ vma = find_vma(mm, addr);
17268+ if (check_heap_stack_gap(vma, addr, len))
17269+ return addr;
17270+ }
57199397
MT
17271 }
17272
16454cff 17273 /* check if free_area_cache is useful for us */
fe2de317 17274@@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
57199397
MT
17275 /* make sure it can fit in the remaining address space */
17276 if (addr > len) {
17277 vma = find_vma(mm, addr-len);
17278- if (!vma || addr <= vma->vm_start)
17279+ if (check_heap_stack_gap(vma, addr - len, len))
17280 /* remember the address as a hint for next time */
17281 return mm->free_area_cache = addr-len;
17282 }
fe2de317 17283@@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
57199397
MT
17284 * return with success:
17285 */
17286 vma = find_vma(mm, addr);
17287- if (!vma || addr+len <= vma->vm_start)
17288+ if (check_heap_stack_gap(vma, addr, len))
17289 /* remember the address as a hint for next time */
17290 return mm->free_area_cache = addr;
17291
fe2de317 17292@@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
16454cff
MT
17293 mm->cached_hole_size = vma->vm_start - addr;
17294
17295 /* try just below the current vma->vm_start */
17296- addr = vma->vm_start-len;
17297- } while (len < vma->vm_start);
17298+ addr = skip_heap_stack_gap(vma, len);
17299+ } while (!IS_ERR_VALUE(addr));
17300
17301 bottomup:
17302 /*
17303@@ -198,13 +206,21 @@ bottomup:
58c5fc13
MT
17304 * can happen with large stack limits and large mmap()
17305 * allocations.
17306 */
17307+ mm->mmap_base = TASK_UNMAPPED_BASE;
17308+
17309+#ifdef CONFIG_PAX_RANDMMAP
17310+ if (mm->pax_flags & MF_PAX_RANDMMAP)
17311+ mm->mmap_base += mm->delta_mmap;
17312+#endif
17313+
17314+ mm->free_area_cache = mm->mmap_base;
17315 mm->cached_hole_size = ~0UL;
17316- mm->free_area_cache = TASK_UNMAPPED_BASE;
17317 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17318 /*
17319 * Restore the topdown base:
17320 */
17321- mm->free_area_cache = mm->mmap_base;
17322+ mm->mmap_base = base;
17323+ mm->free_area_cache = base;
17324 mm->cached_hole_size = ~0UL;
17325
17326 return addr;
fe2de317
MT
17327diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
17328index bc19be3..0f5fbf7 100644
17329--- a/arch/x86/kernel/syscall_table_32.S
17330+++ b/arch/x86/kernel/syscall_table_32.S
17331@@ -1,3 +1,4 @@
17332+.section .rodata,"a",@progbits
17333 ENTRY(sys_call_table)
17334 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
17335 .long sys_exit
17336diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
17337index e07a2fc..db0369d 100644
17338--- a/arch/x86/kernel/tboot.c
17339+++ b/arch/x86/kernel/tboot.c
6e9df6a3 17340@@ -218,7 +218,7 @@ static int tboot_setup_sleep(void)
66a7e928
MT
17341
17342 void tboot_shutdown(u32 shutdown_type)
17343 {
17344- void (*shutdown)(void);
17345+ void (* __noreturn shutdown)(void);
17346
17347 if (!tboot_enabled())
17348 return;
6e9df6a3 17349@@ -240,7 +240,7 @@ void tboot_shutdown(u32 shutdown_type)
66a7e928
MT
17350
17351 switch_to_tboot_pt();
17352
17353- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
17354+ shutdown = (void *)tboot->shutdown_entry;
17355 shutdown();
17356
17357 /* should not reach here */
fe2de317 17358@@ -297,7 +297,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
8308f9c9
MT
17359 tboot_shutdown(acpi_shutdown_map[sleep_state]);
17360 }
17361
17362-static atomic_t ap_wfs_count;
17363+static atomic_unchecked_t ap_wfs_count;
17364
17365 static int tboot_wait_for_aps(int num_aps)
17366 {
fe2de317 17367@@ -321,9 +321,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
8308f9c9
MT
17368 {
17369 switch (action) {
17370 case CPU_DYING:
17371- atomic_inc(&ap_wfs_count);
17372+ atomic_inc_unchecked(&ap_wfs_count);
17373 if (num_online_cpus() == 1)
17374- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
17375+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
17376 return NOTIFY_BAD;
17377 break;
17378 }
6e9df6a3 17379@@ -342,7 +342,7 @@ static __init int tboot_late_init(void)
8308f9c9
MT
17380
17381 tboot_create_trampoline();
17382
17383- atomic_set(&ap_wfs_count, 0);
17384+ atomic_set_unchecked(&ap_wfs_count, 0);
17385 register_hotcpu_notifier(&tboot_cpu_notifier);
17386 return 0;
17387 }
fe2de317
MT
17388diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
17389index 5a64d05..804587b 100644
17390--- a/arch/x86/kernel/time.c
17391+++ b/arch/x86/kernel/time.c
17392@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
ae4e228f 17393 {
58c5fc13
MT
17394 unsigned long pc = instruction_pointer(regs);
17395
58c5fc13
MT
17396- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
17397+ if (!user_mode(regs) && in_lock_functions(pc)) {
17398 #ifdef CONFIG_FRAME_POINTER
17399- return *(unsigned long *)(regs->bp + sizeof(long));
17400+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
17401 #else
ae4e228f
MT
17402 unsigned long *sp =
17403 (unsigned long *)kernel_stack_pointer(regs);
fe2de317 17404@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
ae4e228f
MT
17405 * or above a saved flags. Eflags has bits 22-31 zero,
17406 * kernel addresses don't.
17407 */
58c5fc13
MT
17408+
17409+#ifdef CONFIG_PAX_KERNEXEC
17410+ return ktla_ktva(sp[0]);
17411+#else
17412 if (sp[0] >> 22)
17413 return sp[0];
17414 if (sp[1] >> 22)
17415 return sp[1];
17416 #endif
17417+
17418+#endif
17419 }
58c5fc13
MT
17420 return pc;
17421 }
fe2de317
MT
17422diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
17423index 6bb7b85..dd853e1 100644
17424--- a/arch/x86/kernel/tls.c
17425+++ b/arch/x86/kernel/tls.c
17426@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
58c5fc13
MT
17427 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
17428 return -EINVAL;
17429
17430+#ifdef CONFIG_PAX_SEGMEXEC
17431+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
17432+ return -EINVAL;
17433+#endif
17434+
17435 set_tls_desc(p, idx, &info, 1);
17436
17437 return 0;
fe2de317
MT
17438diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
17439index 451c0a7..e57f551 100644
17440--- a/arch/x86/kernel/trampoline_32.S
17441+++ b/arch/x86/kernel/trampoline_32.S
ae4e228f
MT
17442@@ -32,6 +32,12 @@
17443 #include <asm/segment.h>
17444 #include <asm/page_types.h>
17445
17446+#ifdef CONFIG_PAX_KERNEXEC
17447+#define ta(X) (X)
17448+#else
17449+#define ta(X) ((X) - __PAGE_OFFSET)
17450+#endif
17451+
66a7e928
MT
17452 #ifdef CONFIG_SMP
17453
17454 .section ".x86_trampoline","a"
17455@@ -62,7 +68,7 @@ r_base = .
ae4e228f
MT
17456 inc %ax # protected mode (PE) bit
17457 lmsw %ax # into protected mode
17458 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
17459- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
17460+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
17461
17462 # These need to be in the same 64K segment as the above;
17463 # hence we don't use the boot_gdt_descr defined in head.S
fe2de317
MT
17464diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
17465index 09ff517..df19fbff 100644
17466--- a/arch/x86/kernel/trampoline_64.S
17467+++ b/arch/x86/kernel/trampoline_64.S
66a7e928 17468@@ -90,7 +90,7 @@ startup_32:
6892158b
MT
17469 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
17470 movl %eax, %ds
17471
17472- movl $X86_CR4_PAE, %eax
17473+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17474 movl %eax, %cr4 # Enable PAE mode
17475
17476 # Setup trampoline 4 level pagetables
17477@@ -138,7 +138,7 @@ tidt:
17478 # so the kernel can live anywhere
17479 .balign 4
17480 tgdt:
17481- .short tgdt_end - tgdt # gdt limit
17482+ .short tgdt_end - tgdt - 1 # gdt limit
17483 .long tgdt - r_base
17484 .short 0
17485 .quad 0x00cf9b000000ffff # __KERNEL32_CS
fe2de317
MT
17486diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
17487index 6913369..7e7dff6 100644
17488--- a/arch/x86/kernel/traps.c
17489+++ b/arch/x86/kernel/traps.c
57199397 17490@@ -70,12 +70,6 @@ asmlinkage int system_call(void);
58c5fc13
MT
17491
17492 /* Do we ignore FPU interrupts ? */
17493 char ignore_fpu_irq;
17494-
17495-/*
17496- * The IDT has to be page-aligned to simplify the Pentium
ae4e228f 17497- * F0 0F bug workaround.
58c5fc13 17498- */
ae4e228f 17499-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
58c5fc13
MT
17500 #endif
17501
17502 DECLARE_BITMAP(used_vectors, NR_VECTORS);
fe2de317 17503@@ -117,13 +111,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
58c5fc13 17504 }
ae4e228f
MT
17505
17506 static void __kprobes
17507-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17508+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
17509 long error_code, siginfo_t *info)
17510 {
58c5fc13
MT
17511 struct task_struct *tsk = current;
17512
17513 #ifdef CONFIG_X86_32
17514- if (regs->flags & X86_VM_MASK) {
17515+ if (v8086_mode(regs)) {
17516 /*
17517 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
17518 * On nmi (interrupt 2), do_trap should not be called.
fe2de317 17519@@ -134,7 +128,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
58c5fc13
MT
17520 }
17521 #endif
17522
17523- if (!user_mode(regs))
17524+ if (!user_mode_novm(regs))
17525 goto kernel_trap;
17526
17527 #ifdef CONFIG_X86_32
16454cff 17528@@ -157,7 +151,7 @@ trap_signal:
58c5fc13
MT
17529 printk_ratelimit()) {
17530 printk(KERN_INFO
17531 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
17532- tsk->comm, tsk->pid, str,
17533+ tsk->comm, task_pid_nr(tsk), str,
17534 regs->ip, regs->sp, error_code);
17535 print_vma_addr(" in ", regs->ip);
17536 printk("\n");
16454cff 17537@@ -174,8 +168,20 @@ kernel_trap:
ae4e228f
MT
17538 if (!fixup_exception(regs)) {
17539 tsk->thread.error_code = error_code;
58c5fc13 17540 tsk->thread.trap_no = trapnr;
ae4e228f
MT
17541+
17542+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17543+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
17544+ str = "PAX: suspicious stack segment fault";
17545+#endif
17546+
58c5fc13
MT
17547 die(str, regs, error_code);
17548 }
17549+
17550+#ifdef CONFIG_PAX_REFCOUNT
17551+ if (trapnr == 4)
17552+ pax_report_refcount_overflow(regs);
17553+#endif
17554+
17555 return;
17556
17557 #ifdef CONFIG_X86_32
fe2de317 17558@@ -264,14 +270,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
58c5fc13
MT
17559 conditional_sti(regs);
17560
17561 #ifdef CONFIG_X86_32
17562- if (regs->flags & X86_VM_MASK)
17563+ if (v8086_mode(regs))
17564 goto gp_in_vm86;
17565 #endif
17566
17567 tsk = current;
17568- if (!user_mode(regs))
17569+ if (!user_mode_novm(regs))
17570 goto gp_in_kernel;
17571
17572+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
ae4e228f 17573+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
58c5fc13
MT
17574+ struct mm_struct *mm = tsk->mm;
17575+ unsigned long limit;
17576+
17577+ down_write(&mm->mmap_sem);
17578+ limit = mm->context.user_cs_limit;
17579+ if (limit < TASK_SIZE) {
17580+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
17581+ up_write(&mm->mmap_sem);
17582+ return;
17583+ }
17584+ up_write(&mm->mmap_sem);
17585+ }
17586+#endif
17587+
17588 tsk->thread.error_code = error_code;
17589 tsk->thread.trap_no = 13;
17590
16454cff 17591@@ -304,6 +326,13 @@ gp_in_kernel:
58c5fc13
MT
17592 if (notify_die(DIE_GPF, "general protection fault", regs,
17593 error_code, 13, SIGSEGV) == NOTIFY_STOP)
17594 return;
17595+
17596+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
ae4e228f 17597+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
58c5fc13
MT
17598+ die("PAX: suspicious general protection fault", regs, error_code);
17599+ else
17600+#endif
17601+
17602 die("general protection fault", regs, error_code);
17603 }
17604
fe2de317 17605@@ -433,6 +462,17 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
15a11c5b
MT
17606 dotraplinkage notrace __kprobes void
17607 do_nmi(struct pt_regs *regs, long error_code)
17608 {
17609+
17610+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17611+ if (!user_mode(regs)) {
17612+ unsigned long cs = regs->cs & 0xFFFF;
17613+ unsigned long ip = ktva_ktla(regs->ip);
17614+
17615+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
17616+ regs->ip = ip;
17617+ }
17618+#endif
17619+
17620 nmi_enter();
17621
17622 inc_irq_stat(__nmi_count);
fe2de317 17623@@ -569,7 +609,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
ae4e228f
MT
17624 /* It's safe to allow irq's after DR6 has been saved */
17625 preempt_conditional_sti(regs);
58c5fc13 17626
ae4e228f
MT
17627- if (regs->flags & X86_VM_MASK) {
17628+ if (v8086_mode(regs)) {
17629 handle_vm86_trap((struct kernel_vm86_regs *) regs,
17630 error_code, 1);
bc901d79 17631 preempt_conditional_cli(regs);
fe2de317 17632@@ -583,7 +623,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
ae4e228f
MT
17633 * We already checked v86 mode above, so we can check for kernel mode
17634 * by just checking the CPL of CS.
58c5fc13 17635 */
ae4e228f
MT
17636- if ((dr6 & DR_STEP) && !user_mode(regs)) {
17637+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
17638 tsk->thread.debugreg6 &= ~DR_STEP;
17639 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
17640 regs->flags &= ~X86_EFLAGS_TF;
fe2de317 17641@@ -612,7 +652,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
58c5fc13 17642 return;
57199397
MT
17643 conditional_sti(regs);
17644
17645- if (!user_mode_vm(regs))
17646+ if (!user_mode(regs))
17647 {
17648 if (!fixup_exception(regs)) {
17649 task->thread.error_code = error_code;
fe2de317 17650@@ -723,7 +763,7 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
71d190be
MT
17651 void __math_state_restore(void)
17652 {
17653 struct thread_info *thread = current_thread_info();
17654- struct task_struct *tsk = thread->task;
17655+ struct task_struct *tsk = current;
17656
17657 /*
17658 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
15a11c5b 17659@@ -750,8 +790,7 @@ void __math_state_restore(void)
71d190be
MT
17660 */
17661 asmlinkage void math_state_restore(void)
17662 {
17663- struct thread_info *thread = current_thread_info();
17664- struct task_struct *tsk = thread->task;
17665+ struct task_struct *tsk = current;
58c5fc13 17666
71d190be
MT
17667 if (!tsk_used_math(tsk)) {
17668 local_irq_enable();
fe2de317
MT
17669diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
17670index b9242ba..50c5edd 100644
17671--- a/arch/x86/kernel/verify_cpu.S
17672+++ b/arch/x86/kernel/verify_cpu.S
15a11c5b
MT
17673@@ -20,6 +20,7 @@
17674 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
17675 * arch/x86/kernel/trampoline_64.S: secondary processor verification
17676 * arch/x86/kernel/head_32.S: processor startup
17677+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
17678 *
17679 * verify_cpu, returns the status of longmode and SSE in register %eax.
17680 * 0: Success 1: Failure
fe2de317
MT
17681diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
17682index 863f875..4307295 100644
17683--- a/arch/x86/kernel/vm86_32.c
17684+++ b/arch/x86/kernel/vm86_32.c
ae4e228f
MT
17685@@ -41,6 +41,7 @@
17686 #include <linux/ptrace.h>
17687 #include <linux/audit.h>
17688 #include <linux/stddef.h>
17689+#include <linux/grsecurity.h>
17690
17691 #include <asm/uaccess.h>
17692 #include <asm/io.h>
fe2de317 17693@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
58c5fc13
MT
17694 do_exit(SIGSEGV);
17695 }
17696
17697- tss = &per_cpu(init_tss, get_cpu());
17698+ tss = init_tss + get_cpu();
17699 current->thread.sp0 = current->thread.saved_sp0;
17700 current->thread.sysenter_cs = __KERNEL_CS;
17701 load_sp0(tss, &current->thread);
fe2de317 17702@@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
ae4e228f
MT
17703 struct task_struct *tsk;
17704 int tmp, ret = -EPERM;
17705
17706+#ifdef CONFIG_GRKERNSEC_VM86
17707+ if (!capable(CAP_SYS_RAWIO)) {
17708+ gr_handle_vm86();
17709+ goto out;
17710+ }
17711+#endif
17712+
17713 tsk = current;
17714 if (tsk->thread.saved_sp0)
17715 goto out;
fe2de317 17716@@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
ae4e228f
MT
17717 int tmp, ret;
17718 struct vm86plus_struct __user *v86;
17719
17720+#ifdef CONFIG_GRKERNSEC_VM86
17721+ if (!capable(CAP_SYS_RAWIO)) {
17722+ gr_handle_vm86();
17723+ ret = -EPERM;
17724+ goto out;
17725+ }
17726+#endif
17727+
17728 tsk = current;
17729 switch (cmd) {
17730 case VM86_REQUEST_IRQ:
fe2de317 17731@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
58c5fc13
MT
17732 tsk->thread.saved_fs = info->regs32->fs;
17733 tsk->thread.saved_gs = get_user_gs(info->regs32);
17734
17735- tss = &per_cpu(init_tss, get_cpu());
17736+ tss = init_tss + get_cpu();
17737 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
17738 if (cpu_has_sep)
17739 tsk->thread.sysenter_cs = 0;
fe2de317 17740@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
ae4e228f
MT
17741 goto cannot_handle;
17742 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
17743 goto cannot_handle;
17744- intr_ptr = (unsigned long __user *) (i << 2);
17745+ intr_ptr = (__force unsigned long __user *) (i << 2);
17746 if (get_user(segoffs, intr_ptr))
17747 goto cannot_handle;
17748 if ((segoffs >> 16) == BIOSSEG)
fe2de317
MT
17749diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
17750index 0f703f1..9e15f64 100644
17751--- a/arch/x86/kernel/vmlinux.lds.S
17752+++ b/arch/x86/kernel/vmlinux.lds.S
57199397 17753@@ -26,6 +26,13 @@
58c5fc13
MT
17754 #include <asm/page_types.h>
17755 #include <asm/cache.h>
17756 #include <asm/boot.h>
17757+#include <asm/segment.h>
17758+
58c5fc13
MT
17759+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17760+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
17761+#else
17762+#define __KERNEL_TEXT_OFFSET 0
17763+#endif
17764
17765 #undef i386 /* in case the preprocessor is a 32bit one */
17766
6e9df6a3 17767@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
ae4e228f 17768
58c5fc13
MT
17769 PHDRS {
17770 text PT_LOAD FLAGS(5); /* R_E */
57199397
MT
17771+#ifdef CONFIG_X86_32
17772+ module PT_LOAD FLAGS(5); /* R_E */
17773+#endif
ae4e228f
MT
17774+#ifdef CONFIG_XEN
17775+ rodata PT_LOAD FLAGS(5); /* R_E */
17776+#else
58c5fc13 17777+ rodata PT_LOAD FLAGS(4); /* R__ */
ae4e228f 17778+#endif
16454cff 17779 data PT_LOAD FLAGS(6); /* RW_ */
6e9df6a3 17780-#ifdef CONFIG_X86_64
58c5fc13
MT
17781+ init.begin PT_LOAD FLAGS(6); /* RW_ */
17782 #ifdef CONFIG_SMP
ae4e228f 17783 percpu PT_LOAD FLAGS(6); /* RW_ */
58c5fc13
MT
17784 #endif
17785+ text.init PT_LOAD FLAGS(5); /* R_E */
17786+ text.exit PT_LOAD FLAGS(5); /* R_E */
17787 init PT_LOAD FLAGS(7); /* RWE */
17788-#endif
17789 note PT_NOTE FLAGS(0); /* ___ */
17790 }
17791
17792 SECTIONS
17793 {
17794 #ifdef CONFIG_X86_32
17795- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
17796- phys_startup_32 = startup_32 - LOAD_OFFSET;
17797+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
17798 #else
17799- . = __START_KERNEL;
17800- phys_startup_64 = startup_64 - LOAD_OFFSET;
17801+ . = __START_KERNEL;
17802 #endif
17803
17804 /* Text and read-only data */
ae4e228f
MT
17805- .text : AT(ADDR(.text) - LOAD_OFFSET) {
17806- _text = .;
58c5fc13 17807+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
ae4e228f 17808 /* bootstrapping code */
58c5fc13
MT
17809+#ifdef CONFIG_X86_32
17810+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17811+#else
17812+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17813+#endif
17814+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
ae4e228f
MT
17815+ _text = .;
17816 HEAD_TEXT
58c5fc13 17817 #ifdef CONFIG_X86_32
58c5fc13 17818 . = ALIGN(PAGE_SIZE);
6e9df6a3 17819@@ -108,13 +128,47 @@ SECTIONS
ae4e228f
MT
17820 IRQENTRY_TEXT
17821 *(.fixup)
17822 *(.gnu.warning)
17823- /* End of text section */
17824- _etext = .;
58c5fc13
MT
17825 } :text = 0x9090
17826
17827- NOTES :text :note
17828+ . += __KERNEL_TEXT_OFFSET;
fe2de317
MT
17829
17830- EXCEPTION_TABLE(16) :text = 0x9090
58c5fc13
MT
17831+#ifdef CONFIG_X86_32
17832+ . = ALIGN(PAGE_SIZE);
58c5fc13 17833+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
ae4e228f
MT
17834+
17835+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
58c5fc13
MT
17836+ MODULES_EXEC_VADDR = .;
17837+ BYTE(0)
ae4e228f 17838+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
57199397 17839+ . = ALIGN(HPAGE_SIZE);
58c5fc13 17840+ MODULES_EXEC_END = . - 1;
58c5fc13 17841+#endif
ae4e228f
MT
17842+
17843+ } :module
58c5fc13
MT
17844+#endif
17845+
57199397 17846+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
ae4e228f
MT
17847+ /* End of text section */
17848+ _etext = . - __KERNEL_TEXT_OFFSET;
57199397 17849+ }
15a11c5b 17850+
57199397
MT
17851+#ifdef CONFIG_X86_32
17852+ . = ALIGN(PAGE_SIZE);
17853+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
17854+ *(.idt)
17855+ . = ALIGN(PAGE_SIZE);
17856+ *(.empty_zero_page)
bc901d79
MT
17857+ *(.initial_pg_fixmap)
17858+ *(.initial_pg_pmd)
17859+ *(.initial_page_table)
57199397
MT
17860+ *(.swapper_pg_dir)
17861+ } :rodata
17862+#endif
17863+
17864+ . = ALIGN(PAGE_SIZE);
17865+ NOTES :rodata :note
fe2de317 17866+
57199397
MT
17867+ EXCEPTION_TABLE(16) :rodata
17868
16454cff
MT
17869 #if defined(CONFIG_DEBUG_RODATA)
17870 /* .text should occupy whole number of pages */
6e9df6a3 17871@@ -126,16 +180,20 @@ SECTIONS
57199397
MT
17872
17873 /* Data */
17874 .data : AT(ADDR(.data) - LOAD_OFFSET) {
58c5fc13
MT
17875+
17876+#ifdef CONFIG_PAX_KERNEXEC
bc901d79 17877+ . = ALIGN(HPAGE_SIZE);
58c5fc13 17878+#else
bc901d79 17879+ . = ALIGN(PAGE_SIZE);
58c5fc13
MT
17880+#endif
17881+
17882 /* Start of data section */
17883 _sdata = .;
17884
17885 /* init_task */
17886 INIT_TASK_DATA(THREAD_SIZE)
17887
17888-#ifdef CONFIG_X86_32
17889- /* 32 bit has nosave before _edata */
17890 NOSAVE_DATA
17891-#endif
17892
17893 PAGE_ALIGNED_DATA(PAGE_SIZE)
ae4e228f 17894
6e9df6a3 17895@@ -176,12 +234,19 @@ SECTIONS
58c5fc13
MT
17896 #endif /* CONFIG_X86_64 */
17897
17898 /* Init code and data - will be freed after init */
17899- . = ALIGN(PAGE_SIZE);
17900 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
17901+ BYTE(0)
17902+
17903+#ifdef CONFIG_PAX_KERNEXEC
57199397 17904+ . = ALIGN(HPAGE_SIZE);
58c5fc13
MT
17905+#else
17906+ . = ALIGN(PAGE_SIZE);
17907+#endif
17908+
17909 __init_begin = .; /* paired with __init_end */
17910- }
17911+ } :init.begin
17912
17913-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
17914+#ifdef CONFIG_SMP
17915 /*
17916 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
17917 * output PHDR, so the next output section - .init.text - should
6e9df6a3 17918@@ -190,12 +255,27 @@ SECTIONS
66a7e928 17919 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
58c5fc13
MT
17920 #endif
17921
ae4e228f 17922- INIT_TEXT_SECTION(PAGE_SIZE)
58c5fc13
MT
17923-#ifdef CONFIG_X86_64
17924- :init
17925-#endif
ae4e228f
MT
17926+ . = ALIGN(PAGE_SIZE);
17927+ init_begin = .;
17928+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
17929+ VMLINUX_SYMBOL(_sinittext) = .;
17930+ INIT_TEXT
17931+ VMLINUX_SYMBOL(_einittext) = .;
17932+ . = ALIGN(PAGE_SIZE);
58c5fc13 17933+ } :text.init
bc901d79
MT
17934
17935- INIT_DATA_SECTION(16)
58c5fc13
MT
17936+ /*
17937+ * .exit.text is discard at runtime, not link time, to deal with
17938+ * references from .altinstructions and .eh_frame
17939+ */
57199397 17940+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
58c5fc13
MT
17941+ EXIT_TEXT
17942+ . = ALIGN(16);
17943+ } :text.exit
17944+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
bc901d79 17945+
ae4e228f
MT
17946+ . = ALIGN(PAGE_SIZE);
17947+ INIT_DATA_SECTION(16) :init
58c5fc13 17948
66a7e928
MT
17949 /*
17950 * Code and data for a variety of lowlevel trampolines, to be
6e9df6a3 17951@@ -269,19 +349,12 @@ SECTIONS
58c5fc13 17952 }
66a7e928 17953
bc901d79 17954 . = ALIGN(8);
58c5fc13
MT
17955- /*
17956- * .exit.text is discard at runtime, not link time, to deal with
17957- * references from .altinstructions and .eh_frame
17958- */
17959- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
17960- EXIT_TEXT
17961- }
bc901d79 17962
58c5fc13
MT
17963 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
17964 EXIT_DATA
17965 }
58c5fc13
MT
17966
17967-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
17968+#ifndef CONFIG_SMP
15a11c5b 17969 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
58c5fc13
MT
17970 #endif
17971
6e9df6a3 17972@@ -300,16 +373,10 @@ SECTIONS
df50ba0c
MT
17973 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
17974 __smp_locks = .;
17975 *(.smp_locks)
17976- . = ALIGN(PAGE_SIZE);
17977 __smp_locks_end = .;
17978+ . = ALIGN(PAGE_SIZE);
58c5fc13
MT
17979 }
17980
17981-#ifdef CONFIG_X86_64
17982- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
17983- NOSAVE_DATA
17984- }
17985-#endif
17986-
17987 /* BSS */
17988 . = ALIGN(PAGE_SIZE);
17989 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
6e9df6a3 17990@@ -325,6 +392,7 @@ SECTIONS
58c5fc13
MT
17991 __brk_base = .;
17992 . += 64 * 1024; /* 64k alignment slop space */
17993 *(.brk_reservation) /* areas brk users have reserved */
57199397 17994+ . = ALIGN(HPAGE_SIZE);
58c5fc13
MT
17995 __brk_limit = .;
17996 }
17997
6e9df6a3 17998@@ -351,13 +419,12 @@ SECTIONS
58c5fc13
MT
17999 * for the boot processor.
18000 */
df50ba0c 18001 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
58c5fc13
MT
18002-INIT_PER_CPU(gdt_page);
18003 INIT_PER_CPU(irq_stack_union);
18004
18005 /*
18006 * Build-time check on the image size:
18007 */
18008-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
18009+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
18010 "kernel image bigger than KERNEL_IMAGE_SIZE");
18011
18012 #ifdef CONFIG_SMP
fe2de317
MT
18013diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
18014index b56c65de..561a55b 100644
18015--- a/arch/x86/kernel/vsyscall_64.c
18016+++ b/arch/x86/kernel/vsyscall_64.c
18017@@ -56,15 +56,13 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
15a11c5b 18018 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
15a11c5b
MT
18019 };
18020
6e9df6a3
MT
18021-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE;
18022+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
18023
18024 static int __init vsyscall_setup(char *str)
18025 {
18026 if (str) {
18027 if (!strcmp("emulate", str))
18028 vsyscall_mode = EMULATE;
18029- else if (!strcmp("native", str))
18030- vsyscall_mode = NATIVE;
18031 else if (!strcmp("none", str))
18032 vsyscall_mode = NONE;
18033 else
fe2de317 18034@@ -177,7 +175,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
6e9df6a3
MT
18035
18036 tsk = current;
18037 if (seccomp_mode(&tsk->seccomp))
18038- do_exit(SIGKILL);
18039+ do_group_exit(SIGKILL);
18040
18041 switch (vsyscall_nr) {
18042 case 0:
fe2de317 18043@@ -219,8 +217,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
6e9df6a3
MT
18044 return true;
18045
18046 sigsegv:
18047- force_sig(SIGSEGV, current);
18048- return true;
18049+ do_group_exit(SIGKILL);
18050 }
18051
18052 /*
18053@@ -273,10 +270,7 @@ void __init map_vsyscall(void)
18054 extern char __vvar_page;
18055 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
18056
18057- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
18058- vsyscall_mode == NATIVE
18059- ? PAGE_KERNEL_VSYSCALL
18060- : PAGE_KERNEL_VVAR);
18061+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
18062 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
18063 (unsigned long)VSYSCALL_START);
18064
fe2de317
MT
18065diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
18066index 9796c2f..f686fbf 100644
18067--- a/arch/x86/kernel/x8664_ksyms_64.c
18068+++ b/arch/x86/kernel/x8664_ksyms_64.c
df50ba0c
MT
18069@@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
18070 EXPORT_SYMBOL(copy_user_generic_string);
18071 EXPORT_SYMBOL(copy_user_generic_unrolled);
58c5fc13 18072 EXPORT_SYMBOL(__copy_user_nocache);
ae4e228f
MT
18073-EXPORT_SYMBOL(_copy_from_user);
18074-EXPORT_SYMBOL(_copy_to_user);
58c5fc13
MT
18075
18076 EXPORT_SYMBOL(copy_page);
ae4e228f 18077 EXPORT_SYMBOL(clear_page);
fe2de317
MT
18078diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
18079index a391134..d0b63b6e 100644
18080--- a/arch/x86/kernel/xsave.c
18081+++ b/arch/x86/kernel/xsave.c
18082@@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
ae4e228f 18083 fx_sw_user->xstate_size > fx_sw_user->extended_size)
6892158b 18084 return -EINVAL;
ae4e228f
MT
18085
18086- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
18087+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
18088 fx_sw_user->extended_size -
18089 FP_XSTATE_MAGIC2_SIZE));
6892158b
MT
18090 if (err)
18091@@ -267,7 +267,7 @@ fx_only:
ae4e228f
MT
18092 * the other extended state.
18093 */
18094 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
18095- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
6e9df6a3 18096+ return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
ae4e228f
MT
18097 }
18098
18099 /*
fe2de317 18100@@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf)
57199397 18101 if (use_xsave())
ae4e228f
MT
18102 err = restore_user_xstate(buf);
18103 else
18104- err = fxrstor_checking((__force struct i387_fxsave_struct *)
6e9df6a3 18105+ err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
ae4e228f
MT
18106 buf);
18107 if (unlikely(err)) {
18108 /*
fe2de317
MT
18109diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
18110index 8b4cc5f..f086b5b 100644
18111--- a/arch/x86/kvm/emulate.c
18112+++ b/arch/x86/kvm/emulate.c
15a11c5b 18113@@ -96,7 +96,7 @@
df50ba0c
MT
18114 #define Src2ImmByte (2<<29)
18115 #define Src2One (3<<29)
bc901d79 18116 #define Src2Imm (4<<29)
57199397 18117-#define Src2Mask (7<<29)
df50ba0c
MT
18118+#define Src2Mask (7U<<29)
18119
bc901d79
MT
18120 #define X2(x...) x, x
18121 #define X3(x...) X2(x), x
15a11c5b 18122@@ -207,6 +207,7 @@ struct gprefix {
ae4e228f 18123
bc901d79 18124 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
ae4e228f
MT
18125 do { \
18126+ unsigned long _tmp; \
18127 __asm__ __volatile__ ( \
18128 _PRE_EFLAGS("0", "4", "2") \
18129 _op _suffix " %"_x"3,%1; " \
15a11c5b 18130@@ -220,8 +221,6 @@ struct gprefix {
ae4e228f
MT
18131 /* Raw emulation: instruction has two explicit operands. */
18132 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
18133 do { \
18134- unsigned long _tmp; \
18135- \
18136 switch ((_dst).bytes) { \
18137 case 2: \
bc901d79 18138 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
15a11c5b 18139@@ -237,7 +236,6 @@ struct gprefix {
ae4e228f
MT
18140
18141 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
18142 do { \
18143- unsigned long _tmp; \
18144 switch ((_dst).bytes) { \
18145 case 1: \
bc901d79 18146 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
fe2de317
MT
18147diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
18148index 57dcbd4..79aba9b 100644
18149--- a/arch/x86/kvm/lapic.c
18150+++ b/arch/x86/kvm/lapic.c
6892158b 18151@@ -53,7 +53,7 @@
df50ba0c
MT
18152 #define APIC_BUS_CYCLE_NS 1
18153
18154 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
18155-#define apic_debug(fmt, arg...)
18156+#define apic_debug(fmt, arg...) do {} while (0)
18157
18158 #define APIC_LVT_NUM 6
18159 /* 14 is the version for Xeon and Pentium 8.4.8*/
fe2de317
MT
18160diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
18161index 8e8da79..13bc641 100644
18162--- a/arch/x86/kvm/mmu.c
18163+++ b/arch/x86/kvm/mmu.c
18164@@ -3552,7 +3552,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
8308f9c9
MT
18165
18166 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
18167
18168- invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
18169+ invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
18170
18171 /*
18172 * Assume that the pte write on a page table of the same type
fe2de317 18173@@ -3584,7 +3584,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
15a11c5b 18174 }
8308f9c9 18175
8308f9c9
MT
18176 spin_lock(&vcpu->kvm->mmu_lock);
18177- if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
18178+ if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
18179 gentry = 0;
8308f9c9 18180 kvm_mmu_free_some_pages(vcpu);
66a7e928 18181 ++vcpu->kvm->stat.mmu_pte_write;
fe2de317
MT
18182diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
18183index 507e2b8..fc55f89 100644
18184--- a/arch/x86/kvm/paging_tmpl.h
18185+++ b/arch/x86/kvm/paging_tmpl.h
6e9df6a3
MT
18186@@ -197,7 +197,7 @@ retry_walk:
18187 if (unlikely(kvm_is_error_hva(host_addr)))
18188 goto error;
18189
18190- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
18191+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
18192 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
18193 goto error;
18194
fe2de317 18195@@ -575,6 +575,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
66a7e928
MT
18196 unsigned long mmu_seq;
18197 bool map_writable;
18198
18199+ pax_track_stack();
18200+
18201 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
18202
6e9df6a3 18203 if (unlikely(error_code & PFERR_RSVD_MASK))
fe2de317 18204@@ -701,7 +703,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
8308f9c9
MT
18205 if (need_flush)
18206 kvm_flush_remote_tlbs(vcpu->kvm);
18207
18208- atomic_inc(&vcpu->kvm->arch.invlpg_counter);
18209+ atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
18210
18211 spin_unlock(&vcpu->kvm->mmu_lock);
18212
fe2de317
MT
18213diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
18214index 475d1c9..33658ff 100644
18215--- a/arch/x86/kvm/svm.c
18216+++ b/arch/x86/kvm/svm.c
18217@@ -3381,7 +3381,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
58c5fc13
MT
18218 int cpu = raw_smp_processor_id();
18219
ae4e228f 18220 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
58c5fc13 18221+
ae4e228f
MT
18222+ pax_open_kernel();
18223 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
18224+ pax_close_kernel();
58c5fc13
MT
18225+
18226 load_TR_desc();
18227 }
18228
fe2de317 18229@@ -3759,6 +3763,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
66a7e928 18230 #endif
8308f9c9
MT
18231 #endif
18232
18233+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18234+ __set_fs(current_thread_info()->addr_limit);
18235+#endif
18236+
18237 reload_tss(vcpu);
18238
18239 local_irq_disable();
fe2de317
MT
18240diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
18241index e65a158..656dc24 100644
18242--- a/arch/x86/kvm/vmx.c
18243+++ b/arch/x86/kvm/vmx.c
6e9df6a3 18244@@ -1251,7 +1251,11 @@ static void reload_tss(void)
bc901d79 18245 struct desc_struct *descs;
58c5fc13 18246
bc901d79 18247 descs = (void *)gdt->address;
58c5fc13 18248+
ae4e228f 18249+ pax_open_kernel();
58c5fc13 18250 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
ae4e228f 18251+ pax_close_kernel();
58c5fc13
MT
18252+
18253 load_TR_desc();
18254 }
18255
6e9df6a3 18256@@ -2520,8 +2524,11 @@ static __init int hardware_setup(void)
58c5fc13
MT
18257 if (!cpu_has_vmx_flexpriority())
18258 flexpriority_enabled = 0;
18259
18260- if (!cpu_has_vmx_tpr_shadow())
18261- kvm_x86_ops->update_cr8_intercept = NULL;
18262+ if (!cpu_has_vmx_tpr_shadow()) {
ae4e228f 18263+ pax_open_kernel();
58c5fc13 18264+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
ae4e228f 18265+ pax_close_kernel();
58c5fc13
MT
18266+ }
18267
ae4e228f
MT
18268 if (enable_ept && !cpu_has_vmx_ept_2m_page())
18269 kvm_disable_largepages();
fe2de317 18270@@ -3535,7 +3542,7 @@ static void vmx_set_constant_host_state(void)
57199397 18271 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
58c5fc13 18272
6e9df6a3
MT
18273 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
18274- vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
18275+ vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
18276
18277 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
18278 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
fe2de317 18279@@ -6021,6 +6028,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
58c5fc13
MT
18280 "jmp .Lkvm_vmx_return \n\t"
18281 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
18282 ".Lkvm_vmx_return: "
18283+
18284+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18285+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
18286+ ".Lkvm_vmx_return2: "
18287+#endif
18288+
18289 /* Save guest registers, load host registers, keep flags */
66a7e928
MT
18290 "mov %0, %c[wordsize](%%"R"sp) \n\t"
18291 "pop %0 \n\t"
fe2de317 18292@@ -6069,6 +6082,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
58c5fc13 18293 #endif
66a7e928
MT
18294 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
18295 [wordsize]"i"(sizeof(ulong))
58c5fc13
MT
18296+
18297+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18298+ ,[cs]"i"(__KERNEL_CS)
18299+#endif
18300+
18301 : "cc", "memory"
bc901d79 18302 , R"ax", R"bx", R"di", R"si"
58c5fc13 18303 #ifdef CONFIG_X86_64
fe2de317 18304@@ -6097,7 +6115,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
6e9df6a3
MT
18305 }
18306 }
58c5fc13
MT
18307
18308- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
6892158b 18309+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
71d190be
MT
18310+
18311+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
8308f9c9 18312+ loadsegment(fs, __KERNEL_PERCPU);
71d190be
MT
18313+#endif
18314+
18315+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18316+ __set_fs(current_thread_info()->addr_limit);
18317+#endif
18318+
6e9df6a3 18319 vmx->loaded_vmcs->launched = 1;
58c5fc13 18320
bc901d79 18321 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
fe2de317
MT
18322diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
18323index 84a28ea..9326501 100644
18324--- a/arch/x86/kvm/x86.c
18325+++ b/arch/x86/kvm/x86.c
18326@@ -1334,8 +1334,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
6e9df6a3
MT
18327 {
18328 struct kvm *kvm = vcpu->kvm;
18329 int lm = is_long_mode(vcpu);
18330- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
18331- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
18332+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
18333+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
18334 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
18335 : kvm->arch.xen_hvm_config.blob_size_32;
18336 u32 page_num = data & ~PAGE_MASK;
fe2de317 18337@@ -2137,6 +2137,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
ae4e228f
MT
18338 if (n < msr_list.nmsrs)
18339 goto out;
18340 r = -EFAULT;
18341+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
18342+ goto out;
18343 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
18344 num_msrs_to_save * sizeof(u32)))
18345 goto out;
fe2de317 18346@@ -2312,15 +2314,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
71d190be
MT
18347 struct kvm_cpuid2 *cpuid,
18348 struct kvm_cpuid_entry2 __user *entries)
18349 {
18350- int r;
18351+ int r, i;
18352
18353 r = -E2BIG;
18354 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
18355 goto out;
18356 r = -EFAULT;
18357- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
18358- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18359+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18360 goto out;
18361+ for (i = 0; i < cpuid->nent; ++i) {
18362+ struct kvm_cpuid_entry2 cpuid_entry;
18363+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
18364+ goto out;
18365+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
18366+ }
18367 vcpu->arch.cpuid_nent = cpuid->nent;
18368 kvm_apic_set_version(vcpu);
18369 kvm_x86_ops->cpuid_update(vcpu);
fe2de317 18370@@ -2335,15 +2342,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
71d190be
MT
18371 struct kvm_cpuid2 *cpuid,
18372 struct kvm_cpuid_entry2 __user *entries)
18373 {
18374- int r;
18375+ int r, i;
18376
18377 r = -E2BIG;
18378 if (cpuid->nent < vcpu->arch.cpuid_nent)
18379 goto out;
18380 r = -EFAULT;
18381- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
18382- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18383+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18384 goto out;
18385+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
18386+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
18387+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
18388+ goto out;
18389+ }
18390 return 0;
18391
18392 out:
fe2de317 18393@@ -2718,7 +2729,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
58c5fc13
MT
18394 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
18395 struct kvm_interrupt *irq)
18396 {
18397- if (irq->irq < 0 || irq->irq >= 256)
18398+ if (irq->irq >= 256)
18399 return -EINVAL;
18400 if (irqchip_in_kernel(vcpu->kvm))
18401 return -ENXIO;
6e9df6a3
MT
18402@@ -5089,7 +5100,7 @@ static void kvm_set_mmio_spte_mask(void)
18403 kvm_mmu_set_mmio_spte_mask(mask);
ae4e228f 18404 }
58c5fc13
MT
18405
18406-int kvm_arch_init(void *opaque)
18407+int kvm_arch_init(const void *opaque)
18408 {
ae4e228f 18409 int r;
15a11c5b 18410 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
fe2de317
MT
18411diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
18412index 13ee258..b9632f6 100644
18413--- a/arch/x86/lguest/boot.c
18414+++ b/arch/x86/lguest/boot.c
18415@@ -1184,9 +1184,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
15a11c5b
MT
18416 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
18417 * Launcher to reboot us.
18418 */
18419-static void lguest_restart(char *reason)
18420+static __noreturn void lguest_restart(char *reason)
18421 {
18422 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
18423+ BUG();
18424 }
18425
18426 /*G:050
fe2de317
MT
18427diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
18428index 042f682..c92afb6 100644
18429--- a/arch/x86/lib/atomic64_32.c
18430+++ b/arch/x86/lib/atomic64_32.c
66a7e928 18431@@ -8,18 +8,30 @@
8308f9c9
MT
18432
18433 long long atomic64_read_cx8(long long, const atomic64_t *v);
18434 EXPORT_SYMBOL(atomic64_read_cx8);
18435+long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
18436+EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
18437 long long atomic64_set_cx8(long long, const atomic64_t *v);
18438 EXPORT_SYMBOL(atomic64_set_cx8);
66a7e928
MT
18439+long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
18440+EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
8308f9c9 18441 long long atomic64_xchg_cx8(long long, unsigned high);
66a7e928
MT
18442 EXPORT_SYMBOL(atomic64_xchg_cx8);
18443 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
18444 EXPORT_SYMBOL(atomic64_add_return_cx8);
18445+long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18446+EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
18447 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
8308f9c9 18448 EXPORT_SYMBOL(atomic64_sub_return_cx8);
66a7e928
MT
18449+long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18450+EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
8308f9c9
MT
18451 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
18452 EXPORT_SYMBOL(atomic64_inc_return_cx8);
18453+long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18454+EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
18455 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
18456 EXPORT_SYMBOL(atomic64_dec_return_cx8);
66a7e928
MT
18457+long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18458+EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
8308f9c9 18459 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
66a7e928
MT
18460 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
18461 int atomic64_inc_not_zero_cx8(atomic64_t *v);
18462@@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
18463 #ifndef CONFIG_X86_CMPXCHG64
18464 long long atomic64_read_386(long long, const atomic64_t *v);
18465 EXPORT_SYMBOL(atomic64_read_386);
18466+long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
18467+EXPORT_SYMBOL(atomic64_read_unchecked_386);
18468 long long atomic64_set_386(long long, const atomic64_t *v);
18469 EXPORT_SYMBOL(atomic64_set_386);
18470+long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
18471+EXPORT_SYMBOL(atomic64_set_unchecked_386);
18472 long long atomic64_xchg_386(long long, unsigned high);
18473 EXPORT_SYMBOL(atomic64_xchg_386);
18474 long long atomic64_add_return_386(long long a, atomic64_t *v);
18475 EXPORT_SYMBOL(atomic64_add_return_386);
18476+long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18477+EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
18478 long long atomic64_sub_return_386(long long a, atomic64_t *v);
18479 EXPORT_SYMBOL(atomic64_sub_return_386);
18480+long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18481+EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
18482 long long atomic64_inc_return_386(long long a, atomic64_t *v);
18483 EXPORT_SYMBOL(atomic64_inc_return_386);
18484+long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18485+EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
18486 long long atomic64_dec_return_386(long long a, atomic64_t *v);
18487 EXPORT_SYMBOL(atomic64_dec_return_386);
18488+long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18489+EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
18490 long long atomic64_add_386(long long a, atomic64_t *v);
18491 EXPORT_SYMBOL(atomic64_add_386);
18492+long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
18493+EXPORT_SYMBOL(atomic64_add_unchecked_386);
18494 long long atomic64_sub_386(long long a, atomic64_t *v);
18495 EXPORT_SYMBOL(atomic64_sub_386);
18496+long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
18497+EXPORT_SYMBOL(atomic64_sub_unchecked_386);
18498 long long atomic64_inc_386(long long a, atomic64_t *v);
18499 EXPORT_SYMBOL(atomic64_inc_386);
18500+long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
18501+EXPORT_SYMBOL(atomic64_inc_unchecked_386);
18502 long long atomic64_dec_386(long long a, atomic64_t *v);
18503 EXPORT_SYMBOL(atomic64_dec_386);
18504+long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
18505+EXPORT_SYMBOL(atomic64_dec_unchecked_386);
18506 long long atomic64_dec_if_positive_386(atomic64_t *v);
18507 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
18508 int atomic64_inc_not_zero_386(atomic64_t *v);
fe2de317
MT
18509diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
18510index e8e7e0d..56fd1b0 100644
18511--- a/arch/x86/lib/atomic64_386_32.S
18512+++ b/arch/x86/lib/atomic64_386_32.S
66a7e928
MT
18513@@ -48,6 +48,10 @@ BEGIN(read)
18514 movl (v), %eax
18515 movl 4(v), %edx
18516 RET_ENDP
18517+BEGIN(read_unchecked)
18518+ movl (v), %eax
18519+ movl 4(v), %edx
18520+RET_ENDP
18521 #undef v
18522
18523 #define v %esi
18524@@ -55,6 +59,10 @@ BEGIN(set)
18525 movl %ebx, (v)
18526 movl %ecx, 4(v)
18527 RET_ENDP
18528+BEGIN(set_unchecked)
18529+ movl %ebx, (v)
18530+ movl %ecx, 4(v)
18531+RET_ENDP
18532 #undef v
18533
18534 #define v %esi
18535@@ -70,6 +78,20 @@ RET_ENDP
18536 BEGIN(add)
18537 addl %eax, (v)
18538 adcl %edx, 4(v)
18539+
18540+#ifdef CONFIG_PAX_REFCOUNT
18541+ jno 0f
18542+ subl %eax, (v)
18543+ sbbl %edx, 4(v)
18544+ int $4
18545+0:
18546+ _ASM_EXTABLE(0b, 0b)
18547+#endif
18548+
18549+RET_ENDP
18550+BEGIN(add_unchecked)
18551+ addl %eax, (v)
18552+ adcl %edx, 4(v)
18553 RET_ENDP
18554 #undef v
18555
18556@@ -77,6 +99,24 @@ RET_ENDP
18557 BEGIN(add_return)
18558 addl (v), %eax
18559 adcl 4(v), %edx
18560+
18561+#ifdef CONFIG_PAX_REFCOUNT
18562+ into
18563+1234:
18564+ _ASM_EXTABLE(1234b, 2f)
18565+#endif
18566+
18567+ movl %eax, (v)
18568+ movl %edx, 4(v)
18569+
18570+#ifdef CONFIG_PAX_REFCOUNT
18571+2:
18572+#endif
18573+
18574+RET_ENDP
18575+BEGIN(add_return_unchecked)
18576+ addl (v), %eax
18577+ adcl 4(v), %edx
18578 movl %eax, (v)
18579 movl %edx, 4(v)
18580 RET_ENDP
18581@@ -86,6 +126,20 @@ RET_ENDP
18582 BEGIN(sub)
18583 subl %eax, (v)
18584 sbbl %edx, 4(v)
18585+
18586+#ifdef CONFIG_PAX_REFCOUNT
18587+ jno 0f
18588+ addl %eax, (v)
18589+ adcl %edx, 4(v)
18590+ int $4
18591+0:
18592+ _ASM_EXTABLE(0b, 0b)
18593+#endif
18594+
18595+RET_ENDP
18596+BEGIN(sub_unchecked)
18597+ subl %eax, (v)
18598+ sbbl %edx, 4(v)
18599 RET_ENDP
18600 #undef v
18601
18602@@ -96,6 +150,27 @@ BEGIN(sub_return)
18603 sbbl $0, %edx
18604 addl (v), %eax
18605 adcl 4(v), %edx
18606+
18607+#ifdef CONFIG_PAX_REFCOUNT
18608+ into
18609+1234:
18610+ _ASM_EXTABLE(1234b, 2f)
18611+#endif
18612+
18613+ movl %eax, (v)
18614+ movl %edx, 4(v)
18615+
18616+#ifdef CONFIG_PAX_REFCOUNT
18617+2:
18618+#endif
18619+
18620+RET_ENDP
18621+BEGIN(sub_return_unchecked)
18622+ negl %edx
18623+ negl %eax
18624+ sbbl $0, %edx
18625+ addl (v), %eax
18626+ adcl 4(v), %edx
18627 movl %eax, (v)
18628 movl %edx, 4(v)
18629 RET_ENDP
18630@@ -105,6 +180,20 @@ RET_ENDP
18631 BEGIN(inc)
18632 addl $1, (v)
18633 adcl $0, 4(v)
18634+
18635+#ifdef CONFIG_PAX_REFCOUNT
18636+ jno 0f
18637+ subl $1, (v)
18638+ sbbl $0, 4(v)
18639+ int $4
18640+0:
18641+ _ASM_EXTABLE(0b, 0b)
18642+#endif
18643+
18644+RET_ENDP
18645+BEGIN(inc_unchecked)
18646+ addl $1, (v)
18647+ adcl $0, 4(v)
18648 RET_ENDP
18649 #undef v
18650
18651@@ -114,6 +203,26 @@ BEGIN(inc_return)
18652 movl 4(v), %edx
18653 addl $1, %eax
18654 adcl $0, %edx
18655+
18656+#ifdef CONFIG_PAX_REFCOUNT
18657+ into
18658+1234:
18659+ _ASM_EXTABLE(1234b, 2f)
18660+#endif
18661+
18662+ movl %eax, (v)
18663+ movl %edx, 4(v)
18664+
18665+#ifdef CONFIG_PAX_REFCOUNT
18666+2:
18667+#endif
18668+
18669+RET_ENDP
18670+BEGIN(inc_return_unchecked)
18671+ movl (v), %eax
18672+ movl 4(v), %edx
18673+ addl $1, %eax
18674+ adcl $0, %edx
18675 movl %eax, (v)
18676 movl %edx, 4(v)
18677 RET_ENDP
18678@@ -123,6 +232,20 @@ RET_ENDP
18679 BEGIN(dec)
18680 subl $1, (v)
18681 sbbl $0, 4(v)
18682+
18683+#ifdef CONFIG_PAX_REFCOUNT
18684+ jno 0f
18685+ addl $1, (v)
18686+ adcl $0, 4(v)
18687+ int $4
18688+0:
18689+ _ASM_EXTABLE(0b, 0b)
18690+#endif
18691+
18692+RET_ENDP
18693+BEGIN(dec_unchecked)
18694+ subl $1, (v)
18695+ sbbl $0, 4(v)
18696 RET_ENDP
18697 #undef v
18698
18699@@ -132,6 +255,26 @@ BEGIN(dec_return)
18700 movl 4(v), %edx
18701 subl $1, %eax
18702 sbbl $0, %edx
18703+
18704+#ifdef CONFIG_PAX_REFCOUNT
18705+ into
18706+1234:
18707+ _ASM_EXTABLE(1234b, 2f)
18708+#endif
18709+
18710+ movl %eax, (v)
18711+ movl %edx, 4(v)
18712+
18713+#ifdef CONFIG_PAX_REFCOUNT
18714+2:
18715+#endif
18716+
18717+RET_ENDP
18718+BEGIN(dec_return_unchecked)
18719+ movl (v), %eax
18720+ movl 4(v), %edx
18721+ subl $1, %eax
18722+ sbbl $0, %edx
18723 movl %eax, (v)
18724 movl %edx, 4(v)
18725 RET_ENDP
18726@@ -143,6 +286,13 @@ BEGIN(add_unless)
18727 adcl %edx, %edi
18728 addl (v), %eax
18729 adcl 4(v), %edx
18730+
18731+#ifdef CONFIG_PAX_REFCOUNT
18732+ into
18733+1234:
18734+ _ASM_EXTABLE(1234b, 2f)
18735+#endif
18736+
18737 cmpl %eax, %esi
18738 je 3f
18739 1:
18740@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
18741 1:
18742 addl $1, %eax
18743 adcl $0, %edx
18744+
18745+#ifdef CONFIG_PAX_REFCOUNT
18746+ into
18747+1234:
18748+ _ASM_EXTABLE(1234b, 2f)
18749+#endif
18750+
18751 movl %eax, (v)
18752 movl %edx, 4(v)
18753 movl $1, %eax
18754@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
18755 movl 4(v), %edx
18756 subl $1, %eax
18757 sbbl $0, %edx
18758+
18759+#ifdef CONFIG_PAX_REFCOUNT
18760+ into
18761+1234:
18762+ _ASM_EXTABLE(1234b, 1f)
18763+#endif
18764+
18765 js 1f
18766 movl %eax, (v)
18767 movl %edx, 4(v)
fe2de317
MT
18768diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
18769index 391a083..d658e9f 100644
18770--- a/arch/x86/lib/atomic64_cx8_32.S
18771+++ b/arch/x86/lib/atomic64_cx8_32.S
6e9df6a3 18772@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
15a11c5b
MT
18773 CFI_STARTPROC
18774
18775 read64 %ecx
6e9df6a3 18776+ pax_force_retaddr
15a11c5b 18777 ret
8308f9c9
MT
18778 CFI_ENDPROC
18779 ENDPROC(atomic64_read_cx8)
18780
18781+ENTRY(atomic64_read_unchecked_cx8)
18782+ CFI_STARTPROC
18783+
18784+ read64 %ecx
6e9df6a3 18785+ pax_force_retaddr
8308f9c9
MT
18786+ ret
18787+ CFI_ENDPROC
66a7e928 18788+ENDPROC(atomic64_read_unchecked_cx8)
8308f9c9
MT
18789+
18790 ENTRY(atomic64_set_cx8)
18791 CFI_STARTPROC
18792
6e9df6a3 18793@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
15a11c5b
MT
18794 cmpxchg8b (%esi)
18795 jne 1b
18796
6e9df6a3 18797+ pax_force_retaddr
15a11c5b 18798 ret
66a7e928
MT
18799 CFI_ENDPROC
18800 ENDPROC(atomic64_set_cx8)
18801
18802+ENTRY(atomic64_set_unchecked_cx8)
18803+ CFI_STARTPROC
18804+
18805+1:
18806+/* we don't need LOCK_PREFIX since aligned 64-bit writes
18807+ * are atomic on 586 and newer */
18808+ cmpxchg8b (%esi)
18809+ jne 1b
18810+
6e9df6a3 18811+ pax_force_retaddr
66a7e928
MT
18812+ ret
18813+ CFI_ENDPROC
18814+ENDPROC(atomic64_set_unchecked_cx8)
18815+
18816 ENTRY(atomic64_xchg_cx8)
18817 CFI_STARTPROC
18818
6e9df6a3 18819@@ -62,12 +87,13 @@ ENTRY(atomic64_xchg_cx8)
15a11c5b
MT
18820 cmpxchg8b (%esi)
18821 jne 1b
18822
6e9df6a3 18823+ pax_force_retaddr
15a11c5b 18824 ret
8308f9c9
MT
18825 CFI_ENDPROC
18826 ENDPROC(atomic64_xchg_cx8)
18827
18828-.macro addsub_return func ins insc
18829-ENTRY(atomic64_\func\()_return_cx8)
18830+.macro addsub_return func ins insc unchecked=""
18831+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
18832 CFI_STARTPROC
18833 SAVE ebp
18834 SAVE ebx
6e9df6a3 18835@@ -84,27 +110,44 @@ ENTRY(atomic64_\func\()_return_cx8)
6892158b
MT
18836 movl %edx, %ecx
18837 \ins\()l %esi, %ebx
18838 \insc\()l %edi, %ecx
18839+
8308f9c9 18840+.ifb \unchecked
6892158b
MT
18841+#ifdef CONFIG_PAX_REFCOUNT
18842+ into
18843+2:
18844+ _ASM_EXTABLE(2b, 3f)
18845+#endif
8308f9c9 18846+.endif
6892158b
MT
18847+
18848 LOCK_PREFIX
18849 cmpxchg8b (%ebp)
18850 jne 1b
18851-
18852-10:
18853 movl %ebx, %eax
18854 movl %ecx, %edx
18855+
8308f9c9 18856+.ifb \unchecked
6892158b
MT
18857+#ifdef CONFIG_PAX_REFCOUNT
18858+3:
18859+#endif
8308f9c9 18860+.endif
6892158b
MT
18861+
18862 RESTORE edi
18863 RESTORE esi
18864 RESTORE ebx
66a7e928 18865 RESTORE ebp
6e9df6a3 18866+ pax_force_retaddr
66a7e928
MT
18867 ret
18868 CFI_ENDPROC
18869-ENDPROC(atomic64_\func\()_return_cx8)
18870+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
18871 .endm
8308f9c9
MT
18872
18873 addsub_return add add adc
18874 addsub_return sub sub sbb
18875+addsub_return add add adc _unchecked
66a7e928 18876+addsub_return sub sub sbb _unchecked
8308f9c9
MT
18877
18878-.macro incdec_return func ins insc
18879-ENTRY(atomic64_\func\()_return_cx8)
18880+.macro incdec_return func ins insc unchecked
18881+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
18882 CFI_STARTPROC
18883 SAVE ebx
18884
6e9df6a3 18885@@ -114,21 +157,39 @@ ENTRY(atomic64_\func\()_return_cx8)
6892158b
MT
18886 movl %edx, %ecx
18887 \ins\()l $1, %ebx
18888 \insc\()l $0, %ecx
18889+
8308f9c9 18890+.ifb \unchecked
6892158b
MT
18891+#ifdef CONFIG_PAX_REFCOUNT
18892+ into
18893+2:
18894+ _ASM_EXTABLE(2b, 3f)
18895+#endif
8308f9c9 18896+.endif
6892158b
MT
18897+
18898 LOCK_PREFIX
18899 cmpxchg8b (%esi)
18900 jne 1b
18901
18902-10:
18903 movl %ebx, %eax
18904 movl %ecx, %edx
18905+
8308f9c9 18906+.ifb \unchecked
6892158b
MT
18907+#ifdef CONFIG_PAX_REFCOUNT
18908+3:
18909+#endif
8308f9c9 18910+.endif
6892158b
MT
18911+
18912 RESTORE ebx
6e9df6a3 18913+ pax_force_retaddr
6892158b
MT
18914 ret
18915 CFI_ENDPROC
66a7e928
MT
18916-ENDPROC(atomic64_\func\()_return_cx8)
18917+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
18918 .endm
8308f9c9
MT
18919
18920 incdec_return inc add adc
18921 incdec_return dec sub sbb
18922+incdec_return inc add adc _unchecked
66a7e928 18923+incdec_return dec sub sbb _unchecked
8308f9c9
MT
18924
18925 ENTRY(atomic64_dec_if_positive_cx8)
18926 CFI_STARTPROC
6e9df6a3 18927@@ -140,6 +201,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
66a7e928
MT
18928 movl %edx, %ecx
18929 subl $1, %ebx
18930 sbb $0, %ecx
18931+
18932+#ifdef CONFIG_PAX_REFCOUNT
18933+ into
18934+1234:
18935+ _ASM_EXTABLE(1234b, 2f)
18936+#endif
18937+
18938 js 2f
18939 LOCK_PREFIX
18940 cmpxchg8b (%esi)
6e9df6a3 18941@@ -149,6 +217,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
15a11c5b
MT
18942 movl %ebx, %eax
18943 movl %ecx, %edx
18944 RESTORE ebx
6e9df6a3 18945+ pax_force_retaddr
15a11c5b
MT
18946 ret
18947 CFI_ENDPROC
18948 ENDPROC(atomic64_dec_if_positive_cx8)
6e9df6a3 18949@@ -174,6 +243,13 @@ ENTRY(atomic64_add_unless_cx8)
6892158b
MT
18950 movl %edx, %ecx
18951 addl %esi, %ebx
18952 adcl %edi, %ecx
18953+
18954+#ifdef CONFIG_PAX_REFCOUNT
18955+ into
18956+1234:
66a7e928 18957+ _ASM_EXTABLE(1234b, 3f)
6892158b
MT
18958+#endif
18959+
18960 LOCK_PREFIX
18961 cmpxchg8b (%ebp)
18962 jne 1b
6e9df6a3 18963@@ -184,6 +260,7 @@ ENTRY(atomic64_add_unless_cx8)
15a11c5b
MT
18964 CFI_ADJUST_CFA_OFFSET -8
18965 RESTORE ebx
18966 RESTORE ebp
6e9df6a3 18967+ pax_force_retaddr
15a11c5b
MT
18968 ret
18969 4:
18970 cmpl %edx, 4(%esp)
6e9df6a3 18971@@ -206,6 +283,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
6892158b
MT
18972 movl %edx, %ecx
18973 addl $1, %ebx
18974 adcl $0, %ecx
18975+
18976+#ifdef CONFIG_PAX_REFCOUNT
18977+ into
18978+1234:
66a7e928 18979+ _ASM_EXTABLE(1234b, 3f)
6892158b
MT
18980+#endif
18981+
18982 LOCK_PREFIX
18983 cmpxchg8b (%esi)
18984 jne 1b
6e9df6a3 18985@@ -213,6 +297,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
15a11c5b
MT
18986 movl $1, %eax
18987 3:
18988 RESTORE ebx
6e9df6a3 18989+ pax_force_retaddr
15a11c5b
MT
18990 ret
18991 4:
18992 testl %edx, %edx
fe2de317
MT
18993diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
18994index 78d16a5..fbcf666 100644
18995--- a/arch/x86/lib/checksum_32.S
18996+++ b/arch/x86/lib/checksum_32.S
58c5fc13
MT
18997@@ -28,7 +28,8 @@
18998 #include <linux/linkage.h>
18999 #include <asm/dwarf2.h>
19000 #include <asm/errno.h>
19001-
19002+#include <asm/segment.h>
19003+
19004 /*
19005 * computes a partial checksum, e.g. for TCP/UDP fragments
19006 */
fe2de317 19007@@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
58c5fc13
MT
19008
19009 #define ARGBASE 16
19010 #define FP 12
19011-
19012-ENTRY(csum_partial_copy_generic)
19013+
19014+ENTRY(csum_partial_copy_generic_to_user)
19015 CFI_STARTPROC
bc901d79
MT
19016+
19017+#ifdef CONFIG_PAX_MEMORY_UDEREF
66a7e928
MT
19018+ pushl_cfi %gs
19019+ popl_cfi %es
58c5fc13 19020+ jmp csum_partial_copy_generic
bc901d79 19021+#endif
58c5fc13
MT
19022+
19023+ENTRY(csum_partial_copy_generic_from_user)
bc901d79
MT
19024+
19025+#ifdef CONFIG_PAX_MEMORY_UDEREF
66a7e928
MT
19026+ pushl_cfi %gs
19027+ popl_cfi %ds
bc901d79 19028+#endif
58c5fc13
MT
19029+
19030+ENTRY(csum_partial_copy_generic)
19031 subl $4,%esp
19032 CFI_ADJUST_CFA_OFFSET 4
66a7e928
MT
19033 pushl_cfi %edi
19034@@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
58c5fc13
MT
19035 jmp 4f
19036 SRC(1: movw (%esi), %bx )
19037 addl $2, %esi
19038-DST( movw %bx, (%edi) )
19039+DST( movw %bx, %es:(%edi) )
19040 addl $2, %edi
19041 addw %bx, %ax
19042 adcl $0, %eax
66a7e928 19043@@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
58c5fc13
MT
19044 SRC(1: movl (%esi), %ebx )
19045 SRC( movl 4(%esi), %edx )
19046 adcl %ebx, %eax
19047-DST( movl %ebx, (%edi) )
19048+DST( movl %ebx, %es:(%edi) )
19049 adcl %edx, %eax
19050-DST( movl %edx, 4(%edi) )
19051+DST( movl %edx, %es:4(%edi) )
19052
19053 SRC( movl 8(%esi), %ebx )
19054 SRC( movl 12(%esi), %edx )
19055 adcl %ebx, %eax
19056-DST( movl %ebx, 8(%edi) )
19057+DST( movl %ebx, %es:8(%edi) )
19058 adcl %edx, %eax
19059-DST( movl %edx, 12(%edi) )
19060+DST( movl %edx, %es:12(%edi) )
19061
19062 SRC( movl 16(%esi), %ebx )
19063 SRC( movl 20(%esi), %edx )
19064 adcl %ebx, %eax
19065-DST( movl %ebx, 16(%edi) )
19066+DST( movl %ebx, %es:16(%edi) )
19067 adcl %edx, %eax
19068-DST( movl %edx, 20(%edi) )
19069+DST( movl %edx, %es:20(%edi) )
19070
19071 SRC( movl 24(%esi), %ebx )
19072 SRC( movl 28(%esi), %edx )
19073 adcl %ebx, %eax
19074-DST( movl %ebx, 24(%edi) )
19075+DST( movl %ebx, %es:24(%edi) )
19076 adcl %edx, %eax
19077-DST( movl %edx, 28(%edi) )
19078+DST( movl %edx, %es:28(%edi) )
19079
19080 lea 32(%esi), %esi
19081 lea 32(%edi), %edi
66a7e928 19082@@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
58c5fc13
MT
19083 shrl $2, %edx # This clears CF
19084 SRC(3: movl (%esi), %ebx )
19085 adcl %ebx, %eax
19086-DST( movl %ebx, (%edi) )
19087+DST( movl %ebx, %es:(%edi) )
19088 lea 4(%esi), %esi
19089 lea 4(%edi), %edi
19090 dec %edx
66a7e928 19091@@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
58c5fc13
MT
19092 jb 5f
19093 SRC( movw (%esi), %cx )
19094 leal 2(%esi), %esi
19095-DST( movw %cx, (%edi) )
19096+DST( movw %cx, %es:(%edi) )
19097 leal 2(%edi), %edi
19098 je 6f
19099 shll $16,%ecx
19100 SRC(5: movb (%esi), %cl )
19101-DST( movb %cl, (%edi) )
19102+DST( movb %cl, %es:(%edi) )
19103 6: addl %ecx, %eax
19104 adcl $0, %eax
19105 7:
66a7e928 19106@@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
58c5fc13
MT
19107
19108 6001:
19109 movl ARGBASE+20(%esp), %ebx # src_err_ptr
19110- movl $-EFAULT, (%ebx)
19111+ movl $-EFAULT, %ss:(%ebx)
19112
19113 # zero the complete destination - computing the rest
19114 # is too much work
66a7e928 19115@@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
58c5fc13
MT
19116
19117 6002:
19118 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19119- movl $-EFAULT,(%ebx)
19120+ movl $-EFAULT,%ss:(%ebx)
19121 jmp 5000b
19122
19123 .previous
19124
66a7e928
MT
19125+ pushl_cfi %ss
19126+ popl_cfi %ds
19127+ pushl_cfi %ss
19128+ popl_cfi %es
19129 popl_cfi %ebx
58c5fc13 19130 CFI_RESTORE ebx
66a7e928
MT
19131 popl_cfi %esi
19132@@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
19133 popl_cfi %ecx # equivalent to addl $4,%esp
58c5fc13
MT
19134 ret
19135 CFI_ENDPROC
19136-ENDPROC(csum_partial_copy_generic)
19137+ENDPROC(csum_partial_copy_generic_to_user)
19138
19139 #else
19140
19141 /* Version for PentiumII/PPro */
19142
19143 #define ROUND1(x) \
19144+ nop; nop; nop; \
19145 SRC(movl x(%esi), %ebx ) ; \
19146 addl %ebx, %eax ; \
19147- DST(movl %ebx, x(%edi) ) ;
19148+ DST(movl %ebx, %es:x(%edi)) ;
19149
19150 #define ROUND(x) \
19151+ nop; nop; nop; \
19152 SRC(movl x(%esi), %ebx ) ; \
19153 adcl %ebx, %eax ; \
19154- DST(movl %ebx, x(%edi) ) ;
19155+ DST(movl %ebx, %es:x(%edi)) ;
19156
19157 #define ARGBASE 12
19158-
19159-ENTRY(csum_partial_copy_generic)
19160+
19161+ENTRY(csum_partial_copy_generic_to_user)
19162 CFI_STARTPROC
bc901d79
MT
19163+
19164+#ifdef CONFIG_PAX_MEMORY_UDEREF
66a7e928
MT
19165+ pushl_cfi %gs
19166+ popl_cfi %es
58c5fc13 19167+ jmp csum_partial_copy_generic
bc901d79 19168+#endif
58c5fc13
MT
19169+
19170+ENTRY(csum_partial_copy_generic_from_user)
bc901d79
MT
19171+
19172+#ifdef CONFIG_PAX_MEMORY_UDEREF
66a7e928
MT
19173+ pushl_cfi %gs
19174+ popl_cfi %ds
bc901d79 19175+#endif
58c5fc13
MT
19176+
19177+ENTRY(csum_partial_copy_generic)
66a7e928 19178 pushl_cfi %ebx
58c5fc13 19179 CFI_REL_OFFSET ebx, 0
66a7e928
MT
19180 pushl_cfi %edi
19181@@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
58c5fc13
MT
19182 subl %ebx, %edi
19183 lea -1(%esi),%edx
19184 andl $-32,%edx
19185- lea 3f(%ebx,%ebx), %ebx
19186+ lea 3f(%ebx,%ebx,2), %ebx
19187 testl %esi, %esi
19188 jmp *%ebx
19189 1: addl $64,%esi
66a7e928 19190@@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
58c5fc13
MT
19191 jb 5f
19192 SRC( movw (%esi), %dx )
19193 leal 2(%esi), %esi
19194-DST( movw %dx, (%edi) )
19195+DST( movw %dx, %es:(%edi) )
19196 leal 2(%edi), %edi
19197 je 6f
19198 shll $16,%edx
19199 5:
19200 SRC( movb (%esi), %dl )
19201-DST( movb %dl, (%edi) )
19202+DST( movb %dl, %es:(%edi) )
19203 6: addl %edx, %eax
19204 adcl $0, %eax
19205 7:
19206 .section .fixup, "ax"
19207 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
19208- movl $-EFAULT, (%ebx)
19209+ movl $-EFAULT, %ss:(%ebx)
19210 # zero the complete destination (computing the rest is too much work)
19211 movl ARGBASE+8(%esp),%edi # dst
19212 movl ARGBASE+12(%esp),%ecx # len
66a7e928 19213@@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
58c5fc13
MT
19214 rep; stosb
19215 jmp 7b
19216 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19217- movl $-EFAULT, (%ebx)
19218+ movl $-EFAULT, %ss:(%ebx)
19219 jmp 7b
19220 .previous
19221
bc901d79 19222+#ifdef CONFIG_PAX_MEMORY_UDEREF
66a7e928
MT
19223+ pushl_cfi %ss
19224+ popl_cfi %ds
19225+ pushl_cfi %ss
19226+ popl_cfi %es
bc901d79
MT
19227+#endif
19228+
66a7e928 19229 popl_cfi %esi
58c5fc13 19230 CFI_RESTORE esi
66a7e928
MT
19231 popl_cfi %edi
19232@@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
58c5fc13
MT
19233 CFI_RESTORE ebx
19234 ret
19235 CFI_ENDPROC
19236-ENDPROC(csum_partial_copy_generic)
19237+ENDPROC(csum_partial_copy_generic_to_user)
19238
19239 #undef ROUND
19240 #undef ROUND1
fe2de317
MT
19241diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
19242index f2145cf..cea889d 100644
19243--- a/arch/x86/lib/clear_page_64.S
19244+++ b/arch/x86/lib/clear_page_64.S
6e9df6a3 19245@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
15a11c5b
MT
19246 movl $4096/8,%ecx
19247 xorl %eax,%eax
19248 rep stosq
6e9df6a3 19249+ pax_force_retaddr
15a11c5b
MT
19250 ret
19251 CFI_ENDPROC
19252 ENDPROC(clear_page_c)
6e9df6a3 19253@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
15a11c5b
MT
19254 movl $4096,%ecx
19255 xorl %eax,%eax
19256 rep stosb
6e9df6a3 19257+ pax_force_retaddr
15a11c5b
MT
19258 ret
19259 CFI_ENDPROC
19260 ENDPROC(clear_page_c_e)
6e9df6a3 19261@@ -43,6 +45,7 @@ ENTRY(clear_page)
15a11c5b
MT
19262 leaq 64(%rdi),%rdi
19263 jnz .Lloop
19264 nop
6e9df6a3 19265+ pax_force_retaddr
15a11c5b
MT
19266 ret
19267 CFI_ENDPROC
19268 .Lclear_page_end:
6e9df6a3 19269@@ -58,7 +61,7 @@ ENDPROC(clear_page)
58c5fc13
MT
19270
19271 #include <asm/cpufeature.h>
19272
19273- .section .altinstr_replacement,"ax"
19274+ .section .altinstr_replacement,"a"
19275 1: .byte 0xeb /* jmp <disp8> */
19276 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
15a11c5b 19277 2: .byte 0xeb /* jmp <disp8> */
fe2de317
MT
19278diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
19279index 1e572c5..2a162cd 100644
19280--- a/arch/x86/lib/cmpxchg16b_emu.S
19281+++ b/arch/x86/lib/cmpxchg16b_emu.S
6e9df6a3
MT
19282@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
19283
19284 popf
19285 mov $1, %al
19286+ pax_force_retaddr
19287 ret
19288
19289 not_same:
19290 popf
19291 xor %al,%al
19292+ pax_force_retaddr
19293 ret
19294
19295 CFI_ENDPROC
fe2de317
MT
19296diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
19297index 01c805b..dccb07f 100644
19298--- a/arch/x86/lib/copy_page_64.S
19299+++ b/arch/x86/lib/copy_page_64.S
6e9df6a3 19300@@ -9,6 +9,7 @@ copy_page_c:
15a11c5b
MT
19301 CFI_STARTPROC
19302 movl $4096/8,%ecx
19303 rep movsq
6e9df6a3 19304+ pax_force_retaddr
15a11c5b
MT
19305 ret
19306 CFI_ENDPROC
19307 ENDPROC(copy_page_c)
fe2de317
MT
19308@@ -39,7 +40,7 @@ ENTRY(copy_page)
19309 movq 16 (%rsi), %rdx
19310 movq 24 (%rsi), %r8
19311 movq 32 (%rsi), %r9
19312- movq 40 (%rsi), %r10
19313+ movq 40 (%rsi), %r13
19314 movq 48 (%rsi), %r11
19315 movq 56 (%rsi), %r12
19316
19317@@ -50,7 +51,7 @@ ENTRY(copy_page)
19318 movq %rdx, 16 (%rdi)
19319 movq %r8, 24 (%rdi)
19320 movq %r9, 32 (%rdi)
19321- movq %r10, 40 (%rdi)
19322+ movq %r13, 40 (%rdi)
19323 movq %r11, 48 (%rdi)
19324 movq %r12, 56 (%rdi)
19325
19326@@ -69,7 +70,7 @@ ENTRY(copy_page)
19327 movq 16 (%rsi), %rdx
19328 movq 24 (%rsi), %r8
19329 movq 32 (%rsi), %r9
19330- movq 40 (%rsi), %r10
19331+ movq 40 (%rsi), %r13
19332 movq 48 (%rsi), %r11
19333 movq 56 (%rsi), %r12
19334
19335@@ -78,7 +79,7 @@ ENTRY(copy_page)
19336 movq %rdx, 16 (%rdi)
19337 movq %r8, 24 (%rdi)
19338 movq %r9, 32 (%rdi)
19339- movq %r10, 40 (%rdi)
19340+ movq %r13, 40 (%rdi)
19341 movq %r11, 48 (%rdi)
19342 movq %r12, 56 (%rdi)
19343
6e9df6a3 19344@@ -95,6 +96,7 @@ ENTRY(copy_page)
15a11c5b
MT
19345 CFI_RESTORE r13
19346 addq $3*8,%rsp
19347 CFI_ADJUST_CFA_OFFSET -3*8
6e9df6a3 19348+ pax_force_retaddr
15a11c5b
MT
19349 ret
19350 .Lcopy_page_end:
19351 CFI_ENDPROC
6e9df6a3 19352@@ -105,7 +107,7 @@ ENDPROC(copy_page)
58c5fc13
MT
19353
19354 #include <asm/cpufeature.h>
19355
19356- .section .altinstr_replacement,"ax"
19357+ .section .altinstr_replacement,"a"
19358 1: .byte 0xeb /* jmp <disp8> */
19359 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
19360 2:
fe2de317
MT
19361diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
19362index 0248402..821c786 100644
19363--- a/arch/x86/lib/copy_user_64.S
19364+++ b/arch/x86/lib/copy_user_64.S
15a11c5b 19365@@ -16,6 +16,7 @@
df50ba0c
MT
19366 #include <asm/thread_info.h>
19367 #include <asm/cpufeature.h>
15a11c5b 19368 #include <asm/alternative-asm.h>
df50ba0c
MT
19369+#include <asm/pgtable.h>
19370
15a11c5b
MT
19371 /*
19372 * By placing feature2 after feature1 in altinstructions section, we logically
19373@@ -29,7 +30,7 @@
58c5fc13
MT
19374 .byte 0xe9 /* 32bit jump */
19375 .long \orig-1f /* by default jump to orig */
19376 1:
19377- .section .altinstr_replacement,"ax"
19378+ .section .altinstr_replacement,"a"
19379 2: .byte 0xe9 /* near jump with 32bit immediate */
15a11c5b
MT
19380 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
19381 3: .byte 0xe9 /* near jump with 32bit immediate */
6e9df6a3 19382@@ -71,47 +72,20 @@
58c5fc13
MT
19383 #endif
19384 .endm
19385
19386-/* Standard copy_to_user with segment limit checking */
ae4e228f 19387-ENTRY(_copy_to_user)
58c5fc13
MT
19388- CFI_STARTPROC
19389- GET_THREAD_INFO(%rax)
19390- movq %rdi,%rcx
19391- addq %rdx,%rcx
19392- jc bad_to_user
19393- cmpq TI_addr_limit(%rax),%rcx
15a11c5b
MT
19394- ja bad_to_user
19395- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
19396- copy_user_generic_unrolled,copy_user_generic_string, \
19397- copy_user_enhanced_fast_string
58c5fc13 19398- CFI_ENDPROC
ae4e228f 19399-ENDPROC(_copy_to_user)
58c5fc13
MT
19400-
19401-/* Standard copy_from_user with segment limit checking */
ae4e228f 19402-ENTRY(_copy_from_user)
58c5fc13
MT
19403- CFI_STARTPROC
19404- GET_THREAD_INFO(%rax)
19405- movq %rsi,%rcx
19406- addq %rdx,%rcx
19407- jc bad_from_user
19408- cmpq TI_addr_limit(%rax),%rcx
15a11c5b
MT
19409- ja bad_from_user
19410- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
19411- copy_user_generic_unrolled,copy_user_generic_string, \
19412- copy_user_enhanced_fast_string
58c5fc13 19413- CFI_ENDPROC
ae4e228f 19414-ENDPROC(_copy_from_user)
58c5fc13 19415-
df50ba0c
MT
19416 .section .fixup,"ax"
19417 /* must zero dest */
58c5fc13
MT
19418 ENTRY(bad_from_user)
19419 bad_from_user:
19420 CFI_STARTPROC
19421+ testl %edx,%edx
19422+ js bad_to_user
19423 movl %edx,%ecx
19424 xorl %eax,%eax
19425 rep
15a11c5b
MT
19426 stosb
19427 bad_to_user:
19428 movl %edx,%eax
6e9df6a3 19429+ pax_force_retaddr
15a11c5b
MT
19430 ret
19431 CFI_ENDPROC
19432 ENDPROC(bad_from_user)
fe2de317
MT
19433@@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
19434 jz 17f
19435 1: movq (%rsi),%r8
19436 2: movq 1*8(%rsi),%r9
19437-3: movq 2*8(%rsi),%r10
19438+3: movq 2*8(%rsi),%rax
19439 4: movq 3*8(%rsi),%r11
19440 5: movq %r8,(%rdi)
19441 6: movq %r9,1*8(%rdi)
19442-7: movq %r10,2*8(%rdi)
19443+7: movq %rax,2*8(%rdi)
19444 8: movq %r11,3*8(%rdi)
19445 9: movq 4*8(%rsi),%r8
19446 10: movq 5*8(%rsi),%r9
19447-11: movq 6*8(%rsi),%r10
19448+11: movq 6*8(%rsi),%rax
19449 12: movq 7*8(%rsi),%r11
19450 13: movq %r8,4*8(%rdi)
19451 14: movq %r9,5*8(%rdi)
19452-15: movq %r10,6*8(%rdi)
19453+15: movq %rax,6*8(%rdi)
19454 16: movq %r11,7*8(%rdi)
19455 leaq 64(%rsi),%rsi
19456 leaq 64(%rdi),%rdi
6e9df6a3 19457@@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
15a11c5b
MT
19458 decl %ecx
19459 jnz 21b
19460 23: xor %eax,%eax
6e9df6a3 19461+ pax_force_retaddr
15a11c5b
MT
19462 ret
19463
19464 .section .fixup,"ax"
6e9df6a3 19465@@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
15a11c5b
MT
19466 3: rep
19467 movsb
19468 4: xorl %eax,%eax
6e9df6a3 19469+ pax_force_retaddr
15a11c5b
MT
19470 ret
19471
19472 .section .fixup,"ax"
6e9df6a3 19473@@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
15a11c5b
MT
19474 1: rep
19475 movsb
19476 2: xorl %eax,%eax
6e9df6a3 19477+ pax_force_retaddr
15a11c5b
MT
19478 ret
19479
19480 .section .fixup,"ax"
fe2de317
MT
19481diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
19482index cb0c112..e3a6895 100644
19483--- a/arch/x86/lib/copy_user_nocache_64.S
19484+++ b/arch/x86/lib/copy_user_nocache_64.S
6e9df6a3
MT
19485@@ -8,12 +8,14 @@
19486
19487 #include <linux/linkage.h>
19488 #include <asm/dwarf2.h>
19489+#include <asm/alternative-asm.h>
19490
19491 #define FIX_ALIGNMENT 1
19492
df50ba0c
MT
19493 #include <asm/current.h>
19494 #include <asm/asm-offsets.h>
19495 #include <asm/thread_info.h>
19496+#include <asm/pgtable.h>
19497
19498 .macro ALIGN_DESTINATION
19499 #ifdef FIX_ALIGNMENT
6e9df6a3 19500@@ -50,6 +52,15 @@
df50ba0c
MT
19501 */
19502 ENTRY(__copy_user_nocache)
19503 CFI_STARTPROC
19504+
19505+#ifdef CONFIG_PAX_MEMORY_UDEREF
19506+ mov $PAX_USER_SHADOW_BASE,%rcx
19507+ cmp %rcx,%rsi
19508+ jae 1f
19509+ add %rcx,%rsi
19510+1:
19511+#endif
19512+
19513 cmpl $8,%edx
19514 jb 20f /* less then 8 bytes, go to byte copy loop */
19515 ALIGN_DESTINATION
fe2de317
MT
19516@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
19517 jz 17f
19518 1: movq (%rsi),%r8
19519 2: movq 1*8(%rsi),%r9
19520-3: movq 2*8(%rsi),%r10
19521+3: movq 2*8(%rsi),%rax
19522 4: movq 3*8(%rsi),%r11
19523 5: movnti %r8,(%rdi)
19524 6: movnti %r9,1*8(%rdi)
19525-7: movnti %r10,2*8(%rdi)
19526+7: movnti %rax,2*8(%rdi)
19527 8: movnti %r11,3*8(%rdi)
19528 9: movq 4*8(%rsi),%r8
19529 10: movq 5*8(%rsi),%r9
19530-11: movq 6*8(%rsi),%r10
19531+11: movq 6*8(%rsi),%rax
19532 12: movq 7*8(%rsi),%r11
19533 13: movnti %r8,4*8(%rdi)
19534 14: movnti %r9,5*8(%rdi)
19535-15: movnti %r10,6*8(%rdi)
19536+15: movnti %rax,6*8(%rdi)
19537 16: movnti %r11,7*8(%rdi)
19538 leaq 64(%rsi),%rsi
19539 leaq 64(%rdi),%rdi
6e9df6a3 19540@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
15a11c5b
MT
19541 jnz 21b
19542 23: xorl %eax,%eax
19543 sfence
6e9df6a3 19544+ pax_force_retaddr
15a11c5b
MT
19545 ret
19546
19547 .section .fixup,"ax"
fe2de317
MT
19548diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
19549index fb903b7..c92b7f7 100644
19550--- a/arch/x86/lib/csum-copy_64.S
19551+++ b/arch/x86/lib/csum-copy_64.S
6e9df6a3
MT
19552@@ -8,6 +8,7 @@
19553 #include <linux/linkage.h>
19554 #include <asm/dwarf2.h>
19555 #include <asm/errno.h>
19556+#include <asm/alternative-asm.h>
19557
19558 /*
19559 * Checksum copy with exception handling.
19560@@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
15a11c5b
MT
19561 CFI_RESTORE rbp
19562 addq $7*8, %rsp
19563 CFI_ADJUST_CFA_OFFSET -7*8
fe2de317 19564+ pax_force_retaddr 0, 1
15a11c5b
MT
19565 ret
19566 CFI_RESTORE_STATE
19567
fe2de317
MT
19568diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
19569index 459b58a..9570bc7 100644
19570--- a/arch/x86/lib/csum-wrappers_64.c
19571+++ b/arch/x86/lib/csum-wrappers_64.c
19572@@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
df50ba0c
MT
19573 len -= 2;
19574 }
19575 }
6e9df6a3 19576- isum = csum_partial_copy_generic((__force const void *)src,
8308f9c9
MT
19577+
19578+#ifdef CONFIG_PAX_MEMORY_UDEREF
df50ba0c
MT
19579+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19580+ src += PAX_USER_SHADOW_BASE;
8308f9c9
MT
19581+#endif
19582+
6e9df6a3 19583+ isum = csum_partial_copy_generic((const void __force_kernel *)src,
df50ba0c
MT
19584 dst, len, isum, errp, NULL);
19585 if (unlikely(*errp))
6e9df6a3 19586 goto out_err;
fe2de317 19587@@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
df50ba0c
MT
19588 }
19589
19590 *errp = 0;
6e9df6a3 19591- return csum_partial_copy_generic(src, (void __force *)dst,
8308f9c9
MT
19592+
19593+#ifdef CONFIG_PAX_MEMORY_UDEREF
df50ba0c
MT
19594+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
19595+ dst += PAX_USER_SHADOW_BASE;
8308f9c9
MT
19596+#endif
19597+
6e9df6a3 19598+ return csum_partial_copy_generic(src, (void __force_kernel *)dst,
df50ba0c
MT
19599 len, isum, NULL, errp);
19600 }
6e9df6a3 19601 EXPORT_SYMBOL(csum_partial_copy_to_user);
fe2de317
MT
19602diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
19603index 51f1504..ddac4c1 100644
19604--- a/arch/x86/lib/getuser.S
19605+++ b/arch/x86/lib/getuser.S
6e9df6a3 19606@@ -33,15 +33,38 @@
58c5fc13
MT
19607 #include <asm/asm-offsets.h>
19608 #include <asm/thread_info.h>
19609 #include <asm/asm.h>
19610+#include <asm/segment.h>
df50ba0c 19611+#include <asm/pgtable.h>
6e9df6a3 19612+#include <asm/alternative-asm.h>
bc901d79
MT
19613+
19614+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16454cff 19615+#define __copyuser_seg gs;
bc901d79
MT
19616+#else
19617+#define __copyuser_seg
19618+#endif
58c5fc13
MT
19619
19620 .text
19621 ENTRY(__get_user_1)
ae4e228f 19622 CFI_STARTPROC
58c5fc13 19623+
bc901d79 19624+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
ae4e228f
MT
19625 GET_THREAD_INFO(%_ASM_DX)
19626 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19627 jae bad_get_user
bc901d79 19628-1: movzb (%_ASM_AX),%edx
df50ba0c
MT
19629+
19630+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19631+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19632+ cmp %_ASM_DX,%_ASM_AX
19633+ jae 1234f
19634+ add %_ASM_DX,%_ASM_AX
19635+1234:
19636+#endif
19637+
58c5fc13
MT
19638+#endif
19639+
16454cff 19640+1: __copyuser_seg movzb (%_ASM_AX),%edx
58c5fc13 19641 xor %eax,%eax
6e9df6a3 19642+ pax_force_retaddr
58c5fc13
MT
19643 ret
19644 CFI_ENDPROC
6e9df6a3
MT
19645 ENDPROC(__get_user_1)
19646@@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
ae4e228f
MT
19647 ENTRY(__get_user_2)
19648 CFI_STARTPROC
19649 add $1,%_ASM_AX
58c5fc13 19650+
bc901d79 19651+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
ae4e228f
MT
19652 jc bad_get_user
19653 GET_THREAD_INFO(%_ASM_DX)
19654 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19655 jae bad_get_user
bc901d79 19656-2: movzwl -1(%_ASM_AX),%edx
df50ba0c
MT
19657+
19658+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19659+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19660+ cmp %_ASM_DX,%_ASM_AX
19661+ jae 1234f
19662+ add %_ASM_DX,%_ASM_AX
19663+1234:
19664+#endif
19665+
58c5fc13
MT
19666+#endif
19667+
16454cff 19668+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
58c5fc13 19669 xor %eax,%eax
6e9df6a3 19670+ pax_force_retaddr
58c5fc13
MT
19671 ret
19672 CFI_ENDPROC
6e9df6a3
MT
19673 ENDPROC(__get_user_2)
19674@@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
ae4e228f
MT
19675 ENTRY(__get_user_4)
19676 CFI_STARTPROC
19677 add $3,%_ASM_AX
58c5fc13 19678+
bc901d79 19679+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
ae4e228f
MT
19680 jc bad_get_user
19681 GET_THREAD_INFO(%_ASM_DX)
19682 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19683 jae bad_get_user
bc901d79 19684-3: mov -3(%_ASM_AX),%edx
df50ba0c
MT
19685+
19686+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19687+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19688+ cmp %_ASM_DX,%_ASM_AX
19689+ jae 1234f
19690+ add %_ASM_DX,%_ASM_AX
19691+1234:
19692+#endif
19693+
58c5fc13
MT
19694+#endif
19695+
16454cff 19696+3: __copyuser_seg mov -3(%_ASM_AX),%edx
58c5fc13 19697 xor %eax,%eax
6e9df6a3 19698+ pax_force_retaddr
58c5fc13
MT
19699 ret
19700 CFI_ENDPROC
6e9df6a3
MT
19701 ENDPROC(__get_user_4)
19702@@ -80,8 +131,18 @@ ENTRY(__get_user_8)
df50ba0c
MT
19703 GET_THREAD_INFO(%_ASM_DX)
19704 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19705 jae bad_get_user
19706+
19707+#ifdef CONFIG_PAX_MEMORY_UDEREF
19708+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19709+ cmp %_ASM_DX,%_ASM_AX
19710+ jae 1234f
19711+ add %_ASM_DX,%_ASM_AX
19712+1234:
19713+#endif
19714+
19715 4: movq -7(%_ASM_AX),%_ASM_DX
19716 xor %eax,%eax
6e9df6a3
MT
19717+ pax_force_retaddr
19718 ret
19719 CFI_ENDPROC
19720 ENDPROC(__get_user_8)
19721@@ -91,6 +152,7 @@ bad_get_user:
19722 CFI_STARTPROC
19723 xor %edx,%edx
19724 mov $(-EFAULT),%_ASM_AX
19725+ pax_force_retaddr
df50ba0c 19726 ret
6e9df6a3
MT
19727 CFI_ENDPROC
19728 END(bad_get_user)
fe2de317
MT
19729diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
19730index 9f33b98..dfc7678 100644
19731--- a/arch/x86/lib/insn.c
19732+++ b/arch/x86/lib/insn.c
c52201e0 19733@@ -21,6 +21,11 @@
57199397
MT
19734 #include <linux/string.h>
19735 #include <asm/inat.h>
19736 #include <asm/insn.h>
c52201e0 19737+#ifdef __KERNEL__
57199397 19738+#include <asm/pgtable_types.h>
c52201e0
MT
19739+#else
19740+#define ktla_ktva(addr) addr
19741+#endif
57199397
MT
19742
19743 #define get_next(t, insn) \
19744 ({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
c52201e0 19745@@ -40,8 +45,8 @@
57199397
MT
19746 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
19747 {
19748 memset(insn, 0, sizeof(*insn));
19749- insn->kaddr = kaddr;
19750- insn->next_byte = kaddr;
19751+ insn->kaddr = ktla_ktva(kaddr);
19752+ insn->next_byte = ktla_ktva(kaddr);
19753 insn->x86_64 = x86_64 ? 1 : 0;
19754 insn->opnd_bytes = 4;
19755 if (x86_64)
fe2de317
MT
19756diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
19757index 05a95e7..326f2fa 100644
19758--- a/arch/x86/lib/iomap_copy_64.S
19759+++ b/arch/x86/lib/iomap_copy_64.S
6e9df6a3
MT
19760@@ -17,6 +17,7 @@
19761
19762 #include <linux/linkage.h>
19763 #include <asm/dwarf2.h>
19764+#include <asm/alternative-asm.h>
19765
19766 /*
19767 * override generic version in lib/iomap_copy.c
19768@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
15a11c5b
MT
19769 CFI_STARTPROC
19770 movl %edx,%ecx
19771 rep movsd
6e9df6a3 19772+ pax_force_retaddr
15a11c5b
MT
19773 ret
19774 CFI_ENDPROC
19775 ENDPROC(__iowrite32_copy)
fe2de317
MT
19776diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
19777index efbf2a0..8893637 100644
19778--- a/arch/x86/lib/memcpy_64.S
19779+++ b/arch/x86/lib/memcpy_64.S
6e9df6a3 19780@@ -34,6 +34,7 @@
15a11c5b
MT
19781 rep movsq
19782 movl %edx, %ecx
19783 rep movsb
6e9df6a3 19784+ pax_force_retaddr
15a11c5b
MT
19785 ret
19786 .Lmemcpy_e:
19787 .previous
6e9df6a3 19788@@ -51,6 +52,7 @@
15a11c5b
MT
19789
19790 movl %edx, %ecx
19791 rep movsb
6e9df6a3 19792+ pax_force_retaddr
15a11c5b
MT
19793 ret
19794 .Lmemcpy_e_e:
19795 .previous
fe2de317
MT
19796@@ -81,13 +83,13 @@ ENTRY(memcpy)
19797 */
19798 movq 0*8(%rsi), %r8
19799 movq 1*8(%rsi), %r9
19800- movq 2*8(%rsi), %r10
19801+ movq 2*8(%rsi), %rcx
19802 movq 3*8(%rsi), %r11
19803 leaq 4*8(%rsi), %rsi
19804
19805 movq %r8, 0*8(%rdi)
15a11c5b 19806 movq %r9, 1*8(%rdi)
fe2de317
MT
19807- movq %r10, 2*8(%rdi)
19808+ movq %rcx, 2*8(%rdi)
19809 movq %r11, 3*8(%rdi)
19810 leaq 4*8(%rdi), %rdi
19811 jae .Lcopy_forward_loop
19812@@ -110,12 +112,12 @@ ENTRY(memcpy)
19813 subq $0x20, %rdx
19814 movq -1*8(%rsi), %r8
19815 movq -2*8(%rsi), %r9
19816- movq -3*8(%rsi), %r10
19817+ movq -3*8(%rsi), %rcx
19818 movq -4*8(%rsi), %r11
19819 leaq -4*8(%rsi), %rsi
19820 movq %r8, -1*8(%rdi)
19821 movq %r9, -2*8(%rdi)
19822- movq %r10, -3*8(%rdi)
19823+ movq %rcx, -3*8(%rdi)
19824 movq %r11, -4*8(%rdi)
19825 leaq -4*8(%rdi), %rdi
19826 jae .Lcopy_backward_loop
19827@@ -135,12 +137,13 @@ ENTRY(memcpy)
19828 */
19829 movq 0*8(%rsi), %r8
19830 movq 1*8(%rsi), %r9
19831- movq -2*8(%rsi, %rdx), %r10
19832+ movq -2*8(%rsi, %rdx), %rcx
19833 movq -1*8(%rsi, %rdx), %r11
19834 movq %r8, 0*8(%rdi)
19835 movq %r9, 1*8(%rdi)
19836- movq %r10, -2*8(%rdi, %rdx)
19837+ movq %rcx, -2*8(%rdi, %rdx)
15a11c5b 19838 movq %r11, -1*8(%rdi, %rdx)
6e9df6a3 19839+ pax_force_retaddr
15a11c5b
MT
19840 retq
19841 .p2align 4
19842 .Lless_16bytes:
6e9df6a3 19843@@ -153,6 +156,7 @@ ENTRY(memcpy)
15a11c5b
MT
19844 movq -1*8(%rsi, %rdx), %r9
19845 movq %r8, 0*8(%rdi)
19846 movq %r9, -1*8(%rdi, %rdx)
6e9df6a3 19847+ pax_force_retaddr
15a11c5b
MT
19848 retq
19849 .p2align 4
19850 .Lless_8bytes:
6e9df6a3 19851@@ -166,6 +170,7 @@ ENTRY(memcpy)
15a11c5b
MT
19852 movl -4(%rsi, %rdx), %r8d
19853 movl %ecx, (%rdi)
19854 movl %r8d, -4(%rdi, %rdx)
6e9df6a3 19855+ pax_force_retaddr
15a11c5b
MT
19856 retq
19857 .p2align 4
19858 .Lless_3bytes:
6e9df6a3 19859@@ -183,6 +188,7 @@ ENTRY(memcpy)
15a11c5b
MT
19860 jnz .Lloop_1
19861
19862 .Lend:
6e9df6a3 19863+ pax_force_retaddr
15a11c5b
MT
19864 retq
19865 CFI_ENDPROC
19866 ENDPROC(memcpy)
fe2de317
MT
19867diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
19868index ee16461..c39c199 100644
19869--- a/arch/x86/lib/memmove_64.S
19870+++ b/arch/x86/lib/memmove_64.S
19871@@ -61,13 +61,13 @@ ENTRY(memmove)
19872 5:
19873 sub $0x20, %rdx
19874 movq 0*8(%rsi), %r11
19875- movq 1*8(%rsi), %r10
19876+ movq 1*8(%rsi), %rcx
19877 movq 2*8(%rsi), %r9
19878 movq 3*8(%rsi), %r8
19879 leaq 4*8(%rsi), %rsi
19880
19881 movq %r11, 0*8(%rdi)
19882- movq %r10, 1*8(%rdi)
19883+ movq %rcx, 1*8(%rdi)
19884 movq %r9, 2*8(%rdi)
19885 movq %r8, 3*8(%rdi)
19886 leaq 4*8(%rdi), %rdi
19887@@ -81,10 +81,10 @@ ENTRY(memmove)
19888 4:
19889 movq %rdx, %rcx
19890 movq -8(%rsi, %rdx), %r11
19891- lea -8(%rdi, %rdx), %r10
19892+ lea -8(%rdi, %rdx), %r9
19893 shrq $3, %rcx
19894 rep movsq
19895- movq %r11, (%r10)
19896+ movq %r11, (%r9)
19897 jmp 13f
19898 .Lmemmove_end_forward:
19899
19900@@ -95,14 +95,14 @@ ENTRY(memmove)
19901 7:
19902 movq %rdx, %rcx
19903 movq (%rsi), %r11
19904- movq %rdi, %r10
19905+ movq %rdi, %r9
19906 leaq -8(%rsi, %rdx), %rsi
19907 leaq -8(%rdi, %rdx), %rdi
19908 shrq $3, %rcx
19909 std
19910 rep movsq
19911 cld
19912- movq %r11, (%r10)
19913+ movq %r11, (%r9)
19914 jmp 13f
19915
19916 /*
19917@@ -127,13 +127,13 @@ ENTRY(memmove)
19918 8:
19919 subq $0x20, %rdx
19920 movq -1*8(%rsi), %r11
19921- movq -2*8(%rsi), %r10
19922+ movq -2*8(%rsi), %rcx
19923 movq -3*8(%rsi), %r9
19924 movq -4*8(%rsi), %r8
19925 leaq -4*8(%rsi), %rsi
19926
19927 movq %r11, -1*8(%rdi)
19928- movq %r10, -2*8(%rdi)
19929+ movq %rcx, -2*8(%rdi)
19930 movq %r9, -3*8(%rdi)
19931 movq %r8, -4*8(%rdi)
19932 leaq -4*8(%rdi), %rdi
19933@@ -151,11 +151,11 @@ ENTRY(memmove)
19934 * Move data from 16 bytes to 31 bytes.
19935 */
19936 movq 0*8(%rsi), %r11
19937- movq 1*8(%rsi), %r10
19938+ movq 1*8(%rsi), %rcx
19939 movq -2*8(%rsi, %rdx), %r9
19940 movq -1*8(%rsi, %rdx), %r8
19941 movq %r11, 0*8(%rdi)
19942- movq %r10, 1*8(%rdi)
19943+ movq %rcx, 1*8(%rdi)
19944 movq %r9, -2*8(%rdi, %rdx)
19945 movq %r8, -1*8(%rdi, %rdx)
19946 jmp 13f
19947@@ -167,9 +167,9 @@ ENTRY(memmove)
19948 * Move data from 8 bytes to 15 bytes.
19949 */
19950 movq 0*8(%rsi), %r11
19951- movq -1*8(%rsi, %rdx), %r10
19952+ movq -1*8(%rsi, %rdx), %r9
19953 movq %r11, 0*8(%rdi)
19954- movq %r10, -1*8(%rdi, %rdx)
19955+ movq %r9, -1*8(%rdi, %rdx)
19956 jmp 13f
19957 10:
19958 cmpq $4, %rdx
19959@@ -178,9 +178,9 @@ ENTRY(memmove)
19960 * Move data from 4 bytes to 7 bytes.
19961 */
19962 movl (%rsi), %r11d
19963- movl -4(%rsi, %rdx), %r10d
19964+ movl -4(%rsi, %rdx), %r9d
19965 movl %r11d, (%rdi)
19966- movl %r10d, -4(%rdi, %rdx)
19967+ movl %r9d, -4(%rdi, %rdx)
19968 jmp 13f
19969 11:
19970 cmp $2, %rdx
19971@@ -189,9 +189,9 @@ ENTRY(memmove)
19972 * Move data from 2 bytes to 3 bytes.
19973 */
19974 movw (%rsi), %r11w
19975- movw -2(%rsi, %rdx), %r10w
19976+ movw -2(%rsi, %rdx), %r9w
19977 movw %r11w, (%rdi)
19978- movw %r10w, -2(%rdi, %rdx)
19979+ movw %r9w, -2(%rdi, %rdx)
19980 jmp 13f
19981 12:
19982 cmp $1, %rdx
6e9df6a3 19983@@ -202,6 +202,7 @@ ENTRY(memmove)
15a11c5b
MT
19984 movb (%rsi), %r11b
19985 movb %r11b, (%rdi)
19986 13:
6e9df6a3 19987+ pax_force_retaddr
15a11c5b
MT
19988 retq
19989 CFI_ENDPROC
19990
6e9df6a3 19991@@ -210,6 +211,7 @@ ENTRY(memmove)
15a11c5b
MT
19992 /* Forward moving data. */
19993 movq %rdx, %rcx
19994 rep movsb
6e9df6a3 19995+ pax_force_retaddr
15a11c5b
MT
19996 retq
19997 .Lmemmove_end_forward_efs:
19998 .previous
fe2de317
MT
19999diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
20000index 79bd454..dff325a 100644
20001--- a/arch/x86/lib/memset_64.S
20002+++ b/arch/x86/lib/memset_64.S
6e9df6a3 20003@@ -31,6 +31,7 @@
15a11c5b
MT
20004 movl %r8d,%ecx
20005 rep stosb
20006 movq %r9,%rax
6e9df6a3 20007+ pax_force_retaddr
15a11c5b
MT
20008 ret
20009 .Lmemset_e:
20010 .previous
6e9df6a3 20011@@ -53,6 +54,7 @@
15a11c5b
MT
20012 movl %edx,%ecx
20013 rep stosb
20014 movq %r9,%rax
6e9df6a3 20015+ pax_force_retaddr
15a11c5b
MT
20016 ret
20017 .Lmemset_e_e:
20018 .previous
fe2de317
MT
20019@@ -60,13 +62,13 @@
20020 ENTRY(memset)
20021 ENTRY(__memset)
20022 CFI_STARTPROC
20023- movq %rdi,%r10
20024 movq %rdx,%r11
20025
20026 /* expand byte value */
20027 movzbl %sil,%ecx
20028 movabs $0x0101010101010101,%rax
20029 mul %rcx /* with rax, clobbers rdx */
20030+ movq %rdi,%rdx
20031
20032 /* align dst */
20033 movl %edi,%r9d
20034@@ -120,7 +122,8 @@ ENTRY(__memset)
20035 jnz .Lloop_1
15a11c5b
MT
20036
20037 .Lende:
fe2de317
MT
20038- movq %r10,%rax
20039+ movq %rdx,%rax
6e9df6a3 20040+ pax_force_retaddr
15a11c5b
MT
20041 ret
20042
20043 CFI_RESTORE_STATE
fe2de317
MT
20044diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
20045index c9f2d9b..e7fd2c0 100644
20046--- a/arch/x86/lib/mmx_32.c
20047+++ b/arch/x86/lib/mmx_32.c
20048@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
58c5fc13
MT
20049 {
20050 void *p;
20051 int i;
20052+ unsigned long cr0;
20053
20054 if (unlikely(in_interrupt()))
20055 return __memcpy(to, from, len);
fe2de317 20056@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
58c5fc13
MT
20057 kernel_fpu_begin();
20058
20059 __asm__ __volatile__ (
20060- "1: prefetch (%0)\n" /* This set is 28 bytes */
20061- " prefetch 64(%0)\n"
20062- " prefetch 128(%0)\n"
20063- " prefetch 192(%0)\n"
20064- " prefetch 256(%0)\n"
20065+ "1: prefetch (%1)\n" /* This set is 28 bytes */
20066+ " prefetch 64(%1)\n"
20067+ " prefetch 128(%1)\n"
20068+ " prefetch 192(%1)\n"
20069+ " prefetch 256(%1)\n"
20070 "2: \n"
20071 ".section .fixup, \"ax\"\n"
20072- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20073+ "3: \n"
20074+
20075+#ifdef CONFIG_PAX_KERNEXEC
20076+ " movl %%cr0, %0\n"
20077+ " movl %0, %%eax\n"
20078+ " andl $0xFFFEFFFF, %%eax\n"
20079+ " movl %%eax, %%cr0\n"
20080+#endif
20081+
20082+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20083+
20084+#ifdef CONFIG_PAX_KERNEXEC
20085+ " movl %0, %%cr0\n"
20086+#endif
20087+
20088 " jmp 2b\n"
20089 ".previous\n"
20090 _ASM_EXTABLE(1b, 3b)
20091- : : "r" (from));
20092+ : "=&r" (cr0) : "r" (from) : "ax");
20093
20094 for ( ; i > 5; i--) {
20095 __asm__ __volatile__ (
20096- "1: prefetch 320(%0)\n"
20097- "2: movq (%0), %%mm0\n"
20098- " movq 8(%0), %%mm1\n"
20099- " movq 16(%0), %%mm2\n"
20100- " movq 24(%0), %%mm3\n"
20101- " movq %%mm0, (%1)\n"
20102- " movq %%mm1, 8(%1)\n"
20103- " movq %%mm2, 16(%1)\n"
20104- " movq %%mm3, 24(%1)\n"
20105- " movq 32(%0), %%mm0\n"
20106- " movq 40(%0), %%mm1\n"
20107- " movq 48(%0), %%mm2\n"
20108- " movq 56(%0), %%mm3\n"
20109- " movq %%mm0, 32(%1)\n"
20110- " movq %%mm1, 40(%1)\n"
20111- " movq %%mm2, 48(%1)\n"
20112- " movq %%mm3, 56(%1)\n"
20113+ "1: prefetch 320(%1)\n"
20114+ "2: movq (%1), %%mm0\n"
20115+ " movq 8(%1), %%mm1\n"
20116+ " movq 16(%1), %%mm2\n"
20117+ " movq 24(%1), %%mm3\n"
20118+ " movq %%mm0, (%2)\n"
20119+ " movq %%mm1, 8(%2)\n"
20120+ " movq %%mm2, 16(%2)\n"
20121+ " movq %%mm3, 24(%2)\n"
20122+ " movq 32(%1), %%mm0\n"
20123+ " movq 40(%1), %%mm1\n"
20124+ " movq 48(%1), %%mm2\n"
20125+ " movq 56(%1), %%mm3\n"
20126+ " movq %%mm0, 32(%2)\n"
20127+ " movq %%mm1, 40(%2)\n"
20128+ " movq %%mm2, 48(%2)\n"
20129+ " movq %%mm3, 56(%2)\n"
20130 ".section .fixup, \"ax\"\n"
20131- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20132+ "3:\n"
20133+
20134+#ifdef CONFIG_PAX_KERNEXEC
20135+ " movl %%cr0, %0\n"
20136+ " movl %0, %%eax\n"
20137+ " andl $0xFFFEFFFF, %%eax\n"
20138+ " movl %%eax, %%cr0\n"
20139+#endif
20140+
20141+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20142+
20143+#ifdef CONFIG_PAX_KERNEXEC
20144+ " movl %0, %%cr0\n"
20145+#endif
20146+
20147 " jmp 2b\n"
20148 ".previous\n"
20149 _ASM_EXTABLE(1b, 3b)
20150- : : "r" (from), "r" (to) : "memory");
20151+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20152
20153 from += 64;
20154 to += 64;
20155@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
20156 static void fast_copy_page(void *to, void *from)
20157 {
20158 int i;
20159+ unsigned long cr0;
20160
20161 kernel_fpu_begin();
20162
fe2de317 20163@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
58c5fc13
MT
20164 * but that is for later. -AV
20165 */
20166 __asm__ __volatile__(
20167- "1: prefetch (%0)\n"
20168- " prefetch 64(%0)\n"
20169- " prefetch 128(%0)\n"
20170- " prefetch 192(%0)\n"
20171- " prefetch 256(%0)\n"
20172+ "1: prefetch (%1)\n"
20173+ " prefetch 64(%1)\n"
20174+ " prefetch 128(%1)\n"
20175+ " prefetch 192(%1)\n"
20176+ " prefetch 256(%1)\n"
20177 "2: \n"
20178 ".section .fixup, \"ax\"\n"
20179- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20180+ "3: \n"
20181+
20182+#ifdef CONFIG_PAX_KERNEXEC
20183+ " movl %%cr0, %0\n"
20184+ " movl %0, %%eax\n"
20185+ " andl $0xFFFEFFFF, %%eax\n"
20186+ " movl %%eax, %%cr0\n"
20187+#endif
20188+
20189+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20190+
20191+#ifdef CONFIG_PAX_KERNEXEC
20192+ " movl %0, %%cr0\n"
20193+#endif
20194+
20195 " jmp 2b\n"
20196 ".previous\n"
20197- _ASM_EXTABLE(1b, 3b) : : "r" (from));
20198+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
20199
20200 for (i = 0; i < (4096-320)/64; i++) {
20201 __asm__ __volatile__ (
20202- "1: prefetch 320(%0)\n"
20203- "2: movq (%0), %%mm0\n"
20204- " movntq %%mm0, (%1)\n"
20205- " movq 8(%0), %%mm1\n"
20206- " movntq %%mm1, 8(%1)\n"
20207- " movq 16(%0), %%mm2\n"
20208- " movntq %%mm2, 16(%1)\n"
20209- " movq 24(%0), %%mm3\n"
20210- " movntq %%mm3, 24(%1)\n"
20211- " movq 32(%0), %%mm4\n"
20212- " movntq %%mm4, 32(%1)\n"
20213- " movq 40(%0), %%mm5\n"
20214- " movntq %%mm5, 40(%1)\n"
20215- " movq 48(%0), %%mm6\n"
20216- " movntq %%mm6, 48(%1)\n"
20217- " movq 56(%0), %%mm7\n"
20218- " movntq %%mm7, 56(%1)\n"
20219+ "1: prefetch 320(%1)\n"
20220+ "2: movq (%1), %%mm0\n"
20221+ " movntq %%mm0, (%2)\n"
20222+ " movq 8(%1), %%mm1\n"
20223+ " movntq %%mm1, 8(%2)\n"
20224+ " movq 16(%1), %%mm2\n"
20225+ " movntq %%mm2, 16(%2)\n"
20226+ " movq 24(%1), %%mm3\n"
20227+ " movntq %%mm3, 24(%2)\n"
20228+ " movq 32(%1), %%mm4\n"
20229+ " movntq %%mm4, 32(%2)\n"
20230+ " movq 40(%1), %%mm5\n"
20231+ " movntq %%mm5, 40(%2)\n"
20232+ " movq 48(%1), %%mm6\n"
20233+ " movntq %%mm6, 48(%2)\n"
20234+ " movq 56(%1), %%mm7\n"
20235+ " movntq %%mm7, 56(%2)\n"
20236 ".section .fixup, \"ax\"\n"
20237- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20238+ "3:\n"
20239+
20240+#ifdef CONFIG_PAX_KERNEXEC
20241+ " movl %%cr0, %0\n"
20242+ " movl %0, %%eax\n"
20243+ " andl $0xFFFEFFFF, %%eax\n"
20244+ " movl %%eax, %%cr0\n"
20245+#endif
20246+
20247+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20248+
20249+#ifdef CONFIG_PAX_KERNEXEC
20250+ " movl %0, %%cr0\n"
20251+#endif
20252+
20253 " jmp 2b\n"
20254 ".previous\n"
20255- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
20256+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20257
20258 from += 64;
20259 to += 64;
20260@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
20261 static void fast_copy_page(void *to, void *from)
20262 {
20263 int i;
20264+ unsigned long cr0;
20265
20266 kernel_fpu_begin();
20267
20268 __asm__ __volatile__ (
20269- "1: prefetch (%0)\n"
20270- " prefetch 64(%0)\n"
20271- " prefetch 128(%0)\n"
20272- " prefetch 192(%0)\n"
20273- " prefetch 256(%0)\n"
20274+ "1: prefetch (%1)\n"
20275+ " prefetch 64(%1)\n"
20276+ " prefetch 128(%1)\n"
20277+ " prefetch 192(%1)\n"
20278+ " prefetch 256(%1)\n"
20279 "2: \n"
20280 ".section .fixup, \"ax\"\n"
20281- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20282+ "3: \n"
20283+
20284+#ifdef CONFIG_PAX_KERNEXEC
20285+ " movl %%cr0, %0\n"
20286+ " movl %0, %%eax\n"
20287+ " andl $0xFFFEFFFF, %%eax\n"
20288+ " movl %%eax, %%cr0\n"
20289+#endif
20290+
20291+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20292+
20293+#ifdef CONFIG_PAX_KERNEXEC
20294+ " movl %0, %%cr0\n"
20295+#endif
20296+
20297 " jmp 2b\n"
20298 ".previous\n"
20299- _ASM_EXTABLE(1b, 3b) : : "r" (from));
20300+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
20301
20302 for (i = 0; i < 4096/64; i++) {
20303 __asm__ __volatile__ (
20304- "1: prefetch 320(%0)\n"
20305- "2: movq (%0), %%mm0\n"
20306- " movq 8(%0), %%mm1\n"
20307- " movq 16(%0), %%mm2\n"
20308- " movq 24(%0), %%mm3\n"
20309- " movq %%mm0, (%1)\n"
20310- " movq %%mm1, 8(%1)\n"
20311- " movq %%mm2, 16(%1)\n"
20312- " movq %%mm3, 24(%1)\n"
20313- " movq 32(%0), %%mm0\n"
20314- " movq 40(%0), %%mm1\n"
20315- " movq 48(%0), %%mm2\n"
20316- " movq 56(%0), %%mm3\n"
20317- " movq %%mm0, 32(%1)\n"
20318- " movq %%mm1, 40(%1)\n"
20319- " movq %%mm2, 48(%1)\n"
20320- " movq %%mm3, 56(%1)\n"
20321+ "1: prefetch 320(%1)\n"
20322+ "2: movq (%1), %%mm0\n"
20323+ " movq 8(%1), %%mm1\n"
20324+ " movq 16(%1), %%mm2\n"
20325+ " movq 24(%1), %%mm3\n"
20326+ " movq %%mm0, (%2)\n"
20327+ " movq %%mm1, 8(%2)\n"
20328+ " movq %%mm2, 16(%2)\n"
20329+ " movq %%mm3, 24(%2)\n"
20330+ " movq 32(%1), %%mm0\n"
20331+ " movq 40(%1), %%mm1\n"
20332+ " movq 48(%1), %%mm2\n"
20333+ " movq 56(%1), %%mm3\n"
20334+ " movq %%mm0, 32(%2)\n"
20335+ " movq %%mm1, 40(%2)\n"
20336+ " movq %%mm2, 48(%2)\n"
20337+ " movq %%mm3, 56(%2)\n"
20338 ".section .fixup, \"ax\"\n"
20339- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20340+ "3:\n"
20341+
20342+#ifdef CONFIG_PAX_KERNEXEC
20343+ " movl %%cr0, %0\n"
20344+ " movl %0, %%eax\n"
20345+ " andl $0xFFFEFFFF, %%eax\n"
20346+ " movl %%eax, %%cr0\n"
20347+#endif
20348+
20349+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20350+
20351+#ifdef CONFIG_PAX_KERNEXEC
20352+ " movl %0, %%cr0\n"
20353+#endif
20354+
20355 " jmp 2b\n"
20356 ".previous\n"
20357 _ASM_EXTABLE(1b, 3b)
20358- : : "r" (from), "r" (to) : "memory");
20359+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20360
20361 from += 64;
20362 to += 64;
fe2de317
MT
20363diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
20364index 69fa106..adda88b 100644
20365--- a/arch/x86/lib/msr-reg.S
20366+++ b/arch/x86/lib/msr-reg.S
6e9df6a3
MT
20367@@ -3,6 +3,7 @@
20368 #include <asm/dwarf2.h>
20369 #include <asm/asm.h>
20370 #include <asm/msr.h>
20371+#include <asm/alternative-asm.h>
20372
20373 #ifdef CONFIG_X86_64
20374 /*
fe2de317
MT
20375@@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
20376 CFI_STARTPROC
20377 pushq_cfi %rbx
20378 pushq_cfi %rbp
20379- movq %rdi, %r10 /* Save pointer */
20380+ movq %rdi, %r9 /* Save pointer */
20381 xorl %r11d, %r11d /* Return value */
20382 movl (%rdi), %eax
20383 movl 4(%rdi), %ecx
20384@@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
20385 movl 28(%rdi), %edi
20386 CFI_REMEMBER_STATE
20387 1: \op
20388-2: movl %eax, (%r10)
20389+2: movl %eax, (%r9)
20390 movl %r11d, %eax /* Return value */
20391- movl %ecx, 4(%r10)
20392- movl %edx, 8(%r10)
20393- movl %ebx, 12(%r10)
20394- movl %ebp, 20(%r10)
20395- movl %esi, 24(%r10)
20396- movl %edi, 28(%r10)
20397+ movl %ecx, 4(%r9)
20398+ movl %edx, 8(%r9)
20399+ movl %ebx, 12(%r9)
20400+ movl %ebp, 20(%r9)
20401+ movl %esi, 24(%r9)
20402+ movl %edi, 28(%r9)
6e9df6a3
MT
20403 popq_cfi %rbp
20404 popq_cfi %rbx
20405+ pax_force_retaddr
20406 ret
20407 3:
20408 CFI_RESTORE_STATE
fe2de317
MT
20409diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
20410index 36b0d15..d381858 100644
20411--- a/arch/x86/lib/putuser.S
20412+++ b/arch/x86/lib/putuser.S
6e9df6a3 20413@@ -15,7 +15,9 @@
58c5fc13
MT
20414 #include <asm/thread_info.h>
20415 #include <asm/errno.h>
20416 #include <asm/asm.h>
df50ba0c 20417-
58c5fc13 20418+#include <asm/segment.h>
df50ba0c 20419+#include <asm/pgtable.h>
6e9df6a3 20420+#include <asm/alternative-asm.h>
58c5fc13
MT
20421
20422 /*
df50ba0c 20423 * __put_user_X
6e9df6a3 20424@@ -29,52 +31,119 @@
ae4e228f
MT
20425 * as they get called from within inline assembly.
20426 */
20427
20428-#define ENTER CFI_STARTPROC ; \
20429- GET_THREAD_INFO(%_ASM_BX)
6e9df6a3 20430-#define EXIT ret ; \
ae4e228f 20431+#define ENTER CFI_STARTPROC
6e9df6a3 20432+#define EXIT pax_force_retaddr; ret ; \
ae4e228f
MT
20433 CFI_ENDPROC
20434
57199397
MT
20435+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20436+#define _DEST %_ASM_CX,%_ASM_BX
20437+#else
20438+#define _DEST %_ASM_CX
20439+#endif
bc901d79
MT
20440+
20441+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16454cff 20442+#define __copyuser_seg gs;
bc901d79
MT
20443+#else
20444+#define __copyuser_seg
20445+#endif
57199397 20446+
ae4e228f
MT
20447 .text
20448 ENTRY(__put_user_1)
58c5fc13 20449 ENTER
58c5fc13 20450+
bc901d79 20451+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
ae4e228f
MT
20452+ GET_THREAD_INFO(%_ASM_BX)
20453 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
20454 jae bad_put_user
57199397 20455-1: movb %al,(%_ASM_CX)
df50ba0c
MT
20456+
20457+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20458+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20459+ cmp %_ASM_BX,%_ASM_CX
57199397
MT
20460+ jb 1234f
20461+ xor %ebx,%ebx
df50ba0c
MT
20462+1234:
20463+#endif
20464+
58c5fc13
MT
20465+#endif
20466+
16454cff 20467+1: __copyuser_seg movb %al,(_DEST)
58c5fc13
MT
20468 xor %eax,%eax
20469 EXIT
20470 ENDPROC(__put_user_1)
ae4e228f
MT
20471
20472 ENTRY(__put_user_2)
20473 ENTER
58c5fc13 20474+
bc901d79 20475+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
ae4e228f
MT
20476+ GET_THREAD_INFO(%_ASM_BX)
20477 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20478 sub $1,%_ASM_BX
20479 cmp %_ASM_BX,%_ASM_CX
20480 jae bad_put_user
57199397 20481-2: movw %ax,(%_ASM_CX)
df50ba0c
MT
20482+
20483+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20484+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20485+ cmp %_ASM_BX,%_ASM_CX
57199397
MT
20486+ jb 1234f
20487+ xor %ebx,%ebx
df50ba0c
MT
20488+1234:
20489+#endif
20490+
58c5fc13
MT
20491+#endif
20492+
16454cff 20493+2: __copyuser_seg movw %ax,(_DEST)
58c5fc13
MT
20494 xor %eax,%eax
20495 EXIT
20496 ENDPROC(__put_user_2)
ae4e228f
MT
20497
20498 ENTRY(__put_user_4)
20499 ENTER
58c5fc13 20500+
bc901d79 20501+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
ae4e228f
MT
20502+ GET_THREAD_INFO(%_ASM_BX)
20503 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20504 sub $3,%_ASM_BX
20505 cmp %_ASM_BX,%_ASM_CX
20506 jae bad_put_user
57199397 20507-3: movl %eax,(%_ASM_CX)
df50ba0c
MT
20508+
20509+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20510+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20511+ cmp %_ASM_BX,%_ASM_CX
57199397
MT
20512+ jb 1234f
20513+ xor %ebx,%ebx
df50ba0c
MT
20514+1234:
20515+#endif
20516+
58c5fc13
MT
20517+#endif
20518+
16454cff 20519+3: __copyuser_seg movl %eax,(_DEST)
58c5fc13
MT
20520 xor %eax,%eax
20521 EXIT
20522 ENDPROC(__put_user_4)
ae4e228f
MT
20523
20524 ENTRY(__put_user_8)
20525 ENTER
58c5fc13 20526+
bc901d79 20527+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
ae4e228f
MT
20528+ GET_THREAD_INFO(%_ASM_BX)
20529 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20530 sub $7,%_ASM_BX
20531 cmp %_ASM_BX,%_ASM_CX
20532 jae bad_put_user
57199397 20533-4: mov %_ASM_AX,(%_ASM_CX)
df50ba0c
MT
20534+
20535+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20536+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20537+ cmp %_ASM_BX,%_ASM_CX
57199397
MT
20538+ jb 1234f
20539+ xor %ebx,%ebx
df50ba0c
MT
20540+1234:
20541+#endif
20542+
58c5fc13
MT
20543+#endif
20544+
16454cff 20545+4: __copyuser_seg mov %_ASM_AX,(_DEST)
58c5fc13 20546 #ifdef CONFIG_X86_32
57199397 20547-5: movl %edx,4(%_ASM_CX)
16454cff 20548+5: __copyuser_seg movl %edx,4(_DEST)
58c5fc13 20549 #endif
58c5fc13
MT
20550 xor %eax,%eax
20551 EXIT
fe2de317
MT
20552diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
20553index 1cad221..de671ee 100644
20554--- a/arch/x86/lib/rwlock.S
20555+++ b/arch/x86/lib/rwlock.S
20556@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
20557 FRAME
20558 0: LOCK_PREFIX
20559 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
20560+
20561+#ifdef CONFIG_PAX_REFCOUNT
20562+ jno 1234f
20563+ LOCK_PREFIX
20564+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
20565+ int $4
20566+1234:
20567+ _ASM_EXTABLE(1234b, 1234b)
20568+#endif
20569+
20570 1: rep; nop
20571 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
20572 jne 1b
20573 LOCK_PREFIX
6e9df6a3 20574 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
fe2de317
MT
20575+
20576+#ifdef CONFIG_PAX_REFCOUNT
20577+ jno 1234f
20578+ LOCK_PREFIX
20579+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
20580+ int $4
20581+1234:
20582+ _ASM_EXTABLE(1234b, 1234b)
20583+#endif
20584+
6e9df6a3
MT
20585 jnz 0b
20586 ENDFRAME
20587+ pax_force_retaddr
15a11c5b
MT
20588 ret
20589 CFI_ENDPROC
20590 END(__write_lock_failed)
fe2de317
MT
20591@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
20592 FRAME
20593 0: LOCK_PREFIX
20594 READ_LOCK_SIZE(inc) (%__lock_ptr)
20595+
20596+#ifdef CONFIG_PAX_REFCOUNT
20597+ jno 1234f
20598+ LOCK_PREFIX
20599+ READ_LOCK_SIZE(dec) (%__lock_ptr)
20600+ int $4
20601+1234:
20602+ _ASM_EXTABLE(1234b, 1234b)
20603+#endif
20604+
20605 1: rep; nop
20606 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
20607 js 1b
20608 LOCK_PREFIX
6e9df6a3 20609 READ_LOCK_SIZE(dec) (%__lock_ptr)
fe2de317
MT
20610+
20611+#ifdef CONFIG_PAX_REFCOUNT
20612+ jno 1234f
20613+ LOCK_PREFIX
20614+ READ_LOCK_SIZE(inc) (%__lock_ptr)
20615+ int $4
20616+1234:
20617+ _ASM_EXTABLE(1234b, 1234b)
20618+#endif
20619+
6e9df6a3
MT
20620 js 0b
20621 ENDFRAME
20622+ pax_force_retaddr
15a11c5b
MT
20623 ret
20624 CFI_ENDPROC
20625 END(__read_lock_failed)
fe2de317
MT
20626diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
20627index 5dff5f0..cadebf4 100644
20628--- a/arch/x86/lib/rwsem.S
20629+++ b/arch/x86/lib/rwsem.S
6e9df6a3
MT
20630@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
20631 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
20632 CFI_RESTORE __ASM_REG(dx)
15a11c5b 20633 restore_common_regs
6e9df6a3 20634+ pax_force_retaddr
15a11c5b
MT
20635 ret
20636 CFI_ENDPROC
20637 ENDPROC(call_rwsem_down_read_failed)
6e9df6a3 20638@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
15a11c5b
MT
20639 movq %rax,%rdi
20640 call rwsem_down_write_failed
20641 restore_common_regs
6e9df6a3 20642+ pax_force_retaddr
15a11c5b
MT
20643 ret
20644 CFI_ENDPROC
20645 ENDPROC(call_rwsem_down_write_failed)
6e9df6a3 20646@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
15a11c5b
MT
20647 movq %rax,%rdi
20648 call rwsem_wake
20649 restore_common_regs
6e9df6a3
MT
20650-1: ret
20651+1: pax_force_retaddr
20652+ ret
15a11c5b
MT
20653 CFI_ENDPROC
20654 ENDPROC(call_rwsem_wake)
6e9df6a3
MT
20655
20656@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
20657 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
20658 CFI_RESTORE __ASM_REG(dx)
15a11c5b 20659 restore_common_regs
6e9df6a3 20660+ pax_force_retaddr
15a11c5b
MT
20661 ret
20662 CFI_ENDPROC
20663 ENDPROC(call_rwsem_downgrade_wake)
fe2de317
MT
20664diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
20665index a63efd6..ccecad8 100644
20666--- a/arch/x86/lib/thunk_64.S
20667+++ b/arch/x86/lib/thunk_64.S
6e9df6a3
MT
20668@@ -8,6 +8,7 @@
20669 #include <linux/linkage.h>
20670 #include <asm/dwarf2.h>
20671 #include <asm/calling.h>
20672+#include <asm/alternative-asm.h>
20673
20674 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
20675 .macro THUNK name, func, put_ret_addr_in_rdi=0
20676@@ -41,5 +42,6 @@
15a11c5b
MT
20677 SAVE_ARGS
20678 restore:
20679 RESTORE_ARGS
6e9df6a3
MT
20680+ pax_force_retaddr
20681 ret
15a11c5b 20682 CFI_ENDPROC
fe2de317
MT
20683diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
20684index e218d5d..35679b4 100644
20685--- a/arch/x86/lib/usercopy_32.c
20686+++ b/arch/x86/lib/usercopy_32.c
bc901d79
MT
20687@@ -43,7 +43,7 @@ do { \
20688 __asm__ __volatile__( \
20689 " testl %1,%1\n" \
20690 " jz 2f\n" \
58c5fc13 20691- "0: lodsb\n" \
16454cff 20692+ "0: "__copyuser_seg"lodsb\n" \
bc901d79
MT
20693 " stosb\n" \
20694 " testb %%al,%%al\n" \
20695 " jz 1f\n" \
20696@@ -128,10 +128,12 @@ do { \
20697 int __d0; \
20698 might_fault(); \
20699 __asm__ __volatile__( \
20700+ __COPYUSER_SET_ES \
20701 "0: rep; stosl\n" \
20702 " movl %2,%0\n" \
20703 "1: rep; stosb\n" \
20704 "2:\n" \
20705+ __COPYUSER_RESTORE_ES \
20706 ".section .fixup,\"ax\"\n" \
20707 "3: lea 0(%2,%0,4),%0\n" \
20708 " jmp 2b\n" \
fe2de317 20709@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
58c5fc13
MT
20710 might_fault();
20711
20712 __asm__ __volatile__(
bc901d79 20713+ __COPYUSER_SET_ES
58c5fc13
MT
20714 " testl %0, %0\n"
20715 " jz 3f\n"
bc901d79 20716 " andl %0,%%ecx\n"
fe2de317 20717@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
58c5fc13
MT
20718 " subl %%ecx,%0\n"
20719 " addl %0,%%eax\n"
20720 "1:\n"
bc901d79 20721+ __COPYUSER_RESTORE_ES
58c5fc13
MT
20722 ".section .fixup,\"ax\"\n"
20723 "2: xorl %%eax,%%eax\n"
20724 " jmp 1b\n"
bc901d79 20725@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
58c5fc13
MT
20726
20727 #ifdef CONFIG_X86_INTEL_USERCOPY
20728 static unsigned long
20729-__copy_user_intel(void __user *to, const void *from, unsigned long size)
20730+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
bc901d79
MT
20731 {
20732 int d0, d1;
20733 __asm__ __volatile__(
fe2de317 20734@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
bc901d79
MT
20735 " .align 2,0x90\n"
20736 "3: movl 0(%4), %%eax\n"
20737 "4: movl 4(%4), %%edx\n"
20738- "5: movl %%eax, 0(%3)\n"
20739- "6: movl %%edx, 4(%3)\n"
16454cff
MT
20740+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
20741+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
bc901d79
MT
20742 "7: movl 8(%4), %%eax\n"
20743 "8: movl 12(%4),%%edx\n"
20744- "9: movl %%eax, 8(%3)\n"
20745- "10: movl %%edx, 12(%3)\n"
16454cff
MT
20746+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
20747+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
bc901d79
MT
20748 "11: movl 16(%4), %%eax\n"
20749 "12: movl 20(%4), %%edx\n"
20750- "13: movl %%eax, 16(%3)\n"
20751- "14: movl %%edx, 20(%3)\n"
16454cff
MT
20752+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
20753+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
bc901d79
MT
20754 "15: movl 24(%4), %%eax\n"
20755 "16: movl 28(%4), %%edx\n"
20756- "17: movl %%eax, 24(%3)\n"
20757- "18: movl %%edx, 28(%3)\n"
16454cff
MT
20758+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
20759+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
bc901d79
MT
20760 "19: movl 32(%4), %%eax\n"
20761 "20: movl 36(%4), %%edx\n"
20762- "21: movl %%eax, 32(%3)\n"
20763- "22: movl %%edx, 36(%3)\n"
16454cff
MT
20764+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
20765+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
bc901d79
MT
20766 "23: movl 40(%4), %%eax\n"
20767 "24: movl 44(%4), %%edx\n"
20768- "25: movl %%eax, 40(%3)\n"
20769- "26: movl %%edx, 44(%3)\n"
16454cff
MT
20770+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
20771+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
bc901d79
MT
20772 "27: movl 48(%4), %%eax\n"
20773 "28: movl 52(%4), %%edx\n"
20774- "29: movl %%eax, 48(%3)\n"
20775- "30: movl %%edx, 52(%3)\n"
16454cff
MT
20776+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
20777+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
bc901d79
MT
20778 "31: movl 56(%4), %%eax\n"
20779 "32: movl 60(%4), %%edx\n"
20780- "33: movl %%eax, 56(%3)\n"
20781- "34: movl %%edx, 60(%3)\n"
16454cff
MT
20782+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
20783+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
bc901d79
MT
20784 " addl $-64, %0\n"
20785 " addl $64, %4\n"
20786 " addl $64, %3\n"
fe2de317 20787@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
bc901d79
MT
20788 " shrl $2, %0\n"
20789 " andl $3, %%eax\n"
20790 " cld\n"
20791+ __COPYUSER_SET_ES
20792 "99: rep; movsl\n"
20793 "36: movl %%eax, %0\n"
20794 "37: rep; movsb\n"
20795 "100:\n"
20796+ __COPYUSER_RESTORE_ES
58c5fc13
MT
20797+ ".section .fixup,\"ax\"\n"
20798+ "101: lea 0(%%eax,%0,4),%0\n"
20799+ " jmp 100b\n"
20800+ ".previous\n"
20801+ ".section __ex_table,\"a\"\n"
20802+ " .align 4\n"
20803+ " .long 1b,100b\n"
20804+ " .long 2b,100b\n"
20805+ " .long 3b,100b\n"
20806+ " .long 4b,100b\n"
20807+ " .long 5b,100b\n"
20808+ " .long 6b,100b\n"
20809+ " .long 7b,100b\n"
20810+ " .long 8b,100b\n"
20811+ " .long 9b,100b\n"
20812+ " .long 10b,100b\n"
20813+ " .long 11b,100b\n"
20814+ " .long 12b,100b\n"
20815+ " .long 13b,100b\n"
20816+ " .long 14b,100b\n"
20817+ " .long 15b,100b\n"
20818+ " .long 16b,100b\n"
20819+ " .long 17b,100b\n"
20820+ " .long 18b,100b\n"
20821+ " .long 19b,100b\n"
20822+ " .long 20b,100b\n"
20823+ " .long 21b,100b\n"
20824+ " .long 22b,100b\n"
20825+ " .long 23b,100b\n"
20826+ " .long 24b,100b\n"
20827+ " .long 25b,100b\n"
20828+ " .long 26b,100b\n"
20829+ " .long 27b,100b\n"
20830+ " .long 28b,100b\n"
20831+ " .long 29b,100b\n"
20832+ " .long 30b,100b\n"
20833+ " .long 31b,100b\n"
20834+ " .long 32b,100b\n"
20835+ " .long 33b,100b\n"
20836+ " .long 34b,100b\n"
20837+ " .long 35b,100b\n"
20838+ " .long 36b,100b\n"
20839+ " .long 37b,100b\n"
20840+ " .long 99b,101b\n"
20841+ ".previous"
20842+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
bc901d79 20843+ : "1"(to), "2"(from), "0"(size)
58c5fc13
MT
20844+ : "eax", "edx", "memory");
20845+ return size;
20846+}
20847+
20848+static unsigned long
20849+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
bc901d79
MT
20850+{
20851+ int d0, d1;
20852+ __asm__ __volatile__(
20853+ " .align 2,0x90\n"
16454cff 20854+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
bc901d79
MT
20855+ " cmpl $67, %0\n"
20856+ " jbe 3f\n"
16454cff 20857+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
bc901d79 20858+ " .align 2,0x90\n"
16454cff
MT
20859+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
20860+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
bc901d79
MT
20861+ "5: movl %%eax, 0(%3)\n"
20862+ "6: movl %%edx, 4(%3)\n"
16454cff
MT
20863+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
20864+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
bc901d79
MT
20865+ "9: movl %%eax, 8(%3)\n"
20866+ "10: movl %%edx, 12(%3)\n"
16454cff
MT
20867+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
20868+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
bc901d79
MT
20869+ "13: movl %%eax, 16(%3)\n"
20870+ "14: movl %%edx, 20(%3)\n"
16454cff
MT
20871+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
20872+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
bc901d79
MT
20873+ "17: movl %%eax, 24(%3)\n"
20874+ "18: movl %%edx, 28(%3)\n"
16454cff
MT
20875+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
20876+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
bc901d79
MT
20877+ "21: movl %%eax, 32(%3)\n"
20878+ "22: movl %%edx, 36(%3)\n"
16454cff
MT
20879+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
20880+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
bc901d79
MT
20881+ "25: movl %%eax, 40(%3)\n"
20882+ "26: movl %%edx, 44(%3)\n"
16454cff
MT
20883+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
20884+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
bc901d79
MT
20885+ "29: movl %%eax, 48(%3)\n"
20886+ "30: movl %%edx, 52(%3)\n"
16454cff
MT
20887+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
20888+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
bc901d79
MT
20889+ "33: movl %%eax, 56(%3)\n"
20890+ "34: movl %%edx, 60(%3)\n"
20891+ " addl $-64, %0\n"
20892+ " addl $64, %4\n"
20893+ " addl $64, %3\n"
20894+ " cmpl $63, %0\n"
20895+ " ja 1b\n"
20896+ "35: movl %0, %%eax\n"
20897+ " shrl $2, %0\n"
20898+ " andl $3, %%eax\n"
20899+ " cld\n"
16454cff 20900+ "99: rep; "__copyuser_seg" movsl\n"
bc901d79 20901+ "36: movl %%eax, %0\n"
16454cff 20902+ "37: rep; "__copyuser_seg" movsb\n"
bc901d79 20903+ "100:\n"
58c5fc13
MT
20904 ".section .fixup,\"ax\"\n"
20905 "101: lea 0(%%eax,%0,4),%0\n"
20906 " jmp 100b\n"
fe2de317 20907@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
58c5fc13
MT
20908 int d0, d1;
20909 __asm__ __volatile__(
58c5fc13 20910 " .align 2,0x90\n"
bc901d79 20911- "0: movl 32(%4), %%eax\n"
16454cff 20912+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
58c5fc13 20913 " cmpl $67, %0\n"
bc901d79
MT
20914 " jbe 2f\n"
20915- "1: movl 64(%4), %%eax\n"
16454cff 20916+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
58c5fc13 20917 " .align 2,0x90\n"
bc901d79
MT
20918- "2: movl 0(%4), %%eax\n"
20919- "21: movl 4(%4), %%edx\n"
16454cff
MT
20920+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20921+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
bc901d79
MT
20922 " movl %%eax, 0(%3)\n"
20923 " movl %%edx, 4(%3)\n"
20924- "3: movl 8(%4), %%eax\n"
20925- "31: movl 12(%4),%%edx\n"
16454cff
MT
20926+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20927+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
bc901d79
MT
20928 " movl %%eax, 8(%3)\n"
20929 " movl %%edx, 12(%3)\n"
20930- "4: movl 16(%4), %%eax\n"
20931- "41: movl 20(%4), %%edx\n"
16454cff
MT
20932+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20933+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
bc901d79
MT
20934 " movl %%eax, 16(%3)\n"
20935 " movl %%edx, 20(%3)\n"
20936- "10: movl 24(%4), %%eax\n"
20937- "51: movl 28(%4), %%edx\n"
16454cff
MT
20938+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20939+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
bc901d79
MT
20940 " movl %%eax, 24(%3)\n"
20941 " movl %%edx, 28(%3)\n"
20942- "11: movl 32(%4), %%eax\n"
20943- "61: movl 36(%4), %%edx\n"
16454cff
MT
20944+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20945+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
bc901d79
MT
20946 " movl %%eax, 32(%3)\n"
20947 " movl %%edx, 36(%3)\n"
20948- "12: movl 40(%4), %%eax\n"
20949- "71: movl 44(%4), %%edx\n"
16454cff
MT
20950+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20951+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
bc901d79
MT
20952 " movl %%eax, 40(%3)\n"
20953 " movl %%edx, 44(%3)\n"
20954- "13: movl 48(%4), %%eax\n"
20955- "81: movl 52(%4), %%edx\n"
16454cff
MT
20956+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20957+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
bc901d79
MT
20958 " movl %%eax, 48(%3)\n"
20959 " movl %%edx, 52(%3)\n"
20960- "14: movl 56(%4), %%eax\n"
20961- "91: movl 60(%4), %%edx\n"
16454cff
MT
20962+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20963+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
bc901d79
MT
20964 " movl %%eax, 56(%3)\n"
20965 " movl %%edx, 60(%3)\n"
58c5fc13 20966 " addl $-64, %0\n"
fe2de317 20967@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
bc901d79
MT
20968 " shrl $2, %0\n"
20969 " andl $3, %%eax\n"
20970 " cld\n"
20971- "6: rep; movsl\n"
16454cff 20972+ "6: rep; "__copyuser_seg" movsl\n"
58c5fc13 20973 " movl %%eax,%0\n"
bc901d79 20974- "7: rep; movsb\n"
16454cff 20975+ "7: rep; "__copyuser_seg" movsb\n"
58c5fc13 20976 "8:\n"
58c5fc13
MT
20977 ".section .fixup,\"ax\"\n"
20978 "9: lea 0(%%eax,%0,4),%0\n"
fe2de317 20979@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
58c5fc13
MT
20980
20981 __asm__ __volatile__(
58c5fc13 20982 " .align 2,0x90\n"
bc901d79 20983- "0: movl 32(%4), %%eax\n"
16454cff 20984+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
58c5fc13 20985 " cmpl $67, %0\n"
bc901d79
MT
20986 " jbe 2f\n"
20987- "1: movl 64(%4), %%eax\n"
16454cff 20988+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
58c5fc13 20989 " .align 2,0x90\n"
bc901d79
MT
20990- "2: movl 0(%4), %%eax\n"
20991- "21: movl 4(%4), %%edx\n"
16454cff
MT
20992+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20993+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
bc901d79
MT
20994 " movnti %%eax, 0(%3)\n"
20995 " movnti %%edx, 4(%3)\n"
20996- "3: movl 8(%4), %%eax\n"
20997- "31: movl 12(%4),%%edx\n"
16454cff
MT
20998+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20999+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
bc901d79
MT
21000 " movnti %%eax, 8(%3)\n"
21001 " movnti %%edx, 12(%3)\n"
21002- "4: movl 16(%4), %%eax\n"
21003- "41: movl 20(%4), %%edx\n"
16454cff
MT
21004+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
21005+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
bc901d79
MT
21006 " movnti %%eax, 16(%3)\n"
21007 " movnti %%edx, 20(%3)\n"
21008- "10: movl 24(%4), %%eax\n"
21009- "51: movl 28(%4), %%edx\n"
16454cff
MT
21010+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
21011+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
bc901d79
MT
21012 " movnti %%eax, 24(%3)\n"
21013 " movnti %%edx, 28(%3)\n"
21014- "11: movl 32(%4), %%eax\n"
21015- "61: movl 36(%4), %%edx\n"
16454cff
MT
21016+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
21017+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
bc901d79
MT
21018 " movnti %%eax, 32(%3)\n"
21019 " movnti %%edx, 36(%3)\n"
21020- "12: movl 40(%4), %%eax\n"
21021- "71: movl 44(%4), %%edx\n"
16454cff
MT
21022+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
21023+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
bc901d79
MT
21024 " movnti %%eax, 40(%3)\n"
21025 " movnti %%edx, 44(%3)\n"
21026- "13: movl 48(%4), %%eax\n"
21027- "81: movl 52(%4), %%edx\n"
16454cff
MT
21028+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
21029+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
bc901d79
MT
21030 " movnti %%eax, 48(%3)\n"
21031 " movnti %%edx, 52(%3)\n"
21032- "14: movl 56(%4), %%eax\n"
21033- "91: movl 60(%4), %%edx\n"
16454cff
MT
21034+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21035+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
bc901d79
MT
21036 " movnti %%eax, 56(%3)\n"
21037 " movnti %%edx, 60(%3)\n"
58c5fc13 21038 " addl $-64, %0\n"
fe2de317 21039@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
bc901d79
MT
21040 " shrl $2, %0\n"
21041 " andl $3, %%eax\n"
21042 " cld\n"
21043- "6: rep; movsl\n"
16454cff 21044+ "6: rep; "__copyuser_seg" movsl\n"
58c5fc13 21045 " movl %%eax,%0\n"
bc901d79 21046- "7: rep; movsb\n"
16454cff 21047+ "7: rep; "__copyuser_seg" movsb\n"
58c5fc13 21048 "8:\n"
58c5fc13
MT
21049 ".section .fixup,\"ax\"\n"
21050 "9: lea 0(%%eax,%0,4),%0\n"
fe2de317 21051@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
58c5fc13
MT
21052
21053 __asm__ __volatile__(
58c5fc13 21054 " .align 2,0x90\n"
bc901d79 21055- "0: movl 32(%4), %%eax\n"
16454cff 21056+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
58c5fc13 21057 " cmpl $67, %0\n"
bc901d79
MT
21058 " jbe 2f\n"
21059- "1: movl 64(%4), %%eax\n"
16454cff 21060+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
58c5fc13 21061 " .align 2,0x90\n"
bc901d79
MT
21062- "2: movl 0(%4), %%eax\n"
21063- "21: movl 4(%4), %%edx\n"
16454cff
MT
21064+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
21065+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
bc901d79
MT
21066 " movnti %%eax, 0(%3)\n"
21067 " movnti %%edx, 4(%3)\n"
21068- "3: movl 8(%4), %%eax\n"
21069- "31: movl 12(%4),%%edx\n"
16454cff
MT
21070+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
21071+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
bc901d79
MT
21072 " movnti %%eax, 8(%3)\n"
21073 " movnti %%edx, 12(%3)\n"
21074- "4: movl 16(%4), %%eax\n"
21075- "41: movl 20(%4), %%edx\n"
16454cff
MT
21076+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
21077+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
bc901d79
MT
21078 " movnti %%eax, 16(%3)\n"
21079 " movnti %%edx, 20(%3)\n"
21080- "10: movl 24(%4), %%eax\n"
21081- "51: movl 28(%4), %%edx\n"
16454cff
MT
21082+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
21083+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
bc901d79
MT
21084 " movnti %%eax, 24(%3)\n"
21085 " movnti %%edx, 28(%3)\n"
21086- "11: movl 32(%4), %%eax\n"
21087- "61: movl 36(%4), %%edx\n"
16454cff
MT
21088+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
21089+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
bc901d79
MT
21090 " movnti %%eax, 32(%3)\n"
21091 " movnti %%edx, 36(%3)\n"
21092- "12: movl 40(%4), %%eax\n"
21093- "71: movl 44(%4), %%edx\n"
16454cff
MT
21094+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
21095+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
bc901d79
MT
21096 " movnti %%eax, 40(%3)\n"
21097 " movnti %%edx, 44(%3)\n"
21098- "13: movl 48(%4), %%eax\n"
21099- "81: movl 52(%4), %%edx\n"
16454cff
MT
21100+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
21101+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
bc901d79
MT
21102 " movnti %%eax, 48(%3)\n"
21103 " movnti %%edx, 52(%3)\n"
21104- "14: movl 56(%4), %%eax\n"
21105- "91: movl 60(%4), %%edx\n"
16454cff
MT
21106+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21107+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
bc901d79
MT
21108 " movnti %%eax, 56(%3)\n"
21109 " movnti %%edx, 60(%3)\n"
58c5fc13 21110 " addl $-64, %0\n"
fe2de317 21111@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
bc901d79
MT
21112 " shrl $2, %0\n"
21113 " andl $3, %%eax\n"
21114 " cld\n"
21115- "6: rep; movsl\n"
16454cff 21116+ "6: rep; "__copyuser_seg" movsl\n"
58c5fc13 21117 " movl %%eax,%0\n"
bc901d79 21118- "7: rep; movsb\n"
16454cff 21119+ "7: rep; "__copyuser_seg" movsb\n"
58c5fc13 21120 "8:\n"
58c5fc13
MT
21121 ".section .fixup,\"ax\"\n"
21122 "9: lea 0(%%eax,%0,4),%0\n"
fe2de317 21123@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
58c5fc13
MT
21124 */
21125 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
21126 unsigned long size);
21127-unsigned long __copy_user_intel(void __user *to, const void *from,
21128+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
21129+ unsigned long size);
21130+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
21131 unsigned long size);
21132 unsigned long __copy_user_zeroing_intel_nocache(void *to,
21133 const void __user *from, unsigned long size);
21134 #endif /* CONFIG_X86_INTEL_USERCOPY */
21135
21136 /* Generic arbitrary sized copy. */
21137-#define __copy_user(to, from, size) \
bc901d79
MT
21138+#define __copy_user(to, from, size, prefix, set, restore) \
21139 do { \
21140 int __d0, __d1, __d2; \
21141 __asm__ __volatile__( \
21142+ set \
21143 " cmp $7,%0\n" \
21144 " jbe 1f\n" \
21145 " movl %1,%0\n" \
21146 " negl %0\n" \
21147 " andl $7,%0\n" \
21148 " subl %0,%3\n" \
58c5fc13 21149- "4: rep; movsb\n" \
16454cff 21150+ "4: rep; "prefix"movsb\n" \
bc901d79
MT
21151 " movl %3,%0\n" \
21152 " shrl $2,%0\n" \
21153 " andl $3,%3\n" \
21154 " .align 2,0x90\n" \
58c5fc13 21155- "0: rep; movsl\n" \
16454cff 21156+ "0: rep; "prefix"movsl\n" \
bc901d79 21157 " movl %3,%0\n" \
58c5fc13 21158- "1: rep; movsb\n" \
16454cff 21159+ "1: rep; "prefix"movsb\n" \
bc901d79
MT
21160 "2:\n" \
21161+ restore \
21162 ".section .fixup,\"ax\"\n" \
21163 "5: addl %3,%0\n" \
21164 " jmp 2b\n" \
21165@@ -682,14 +799,14 @@ do { \
21166 " negl %0\n" \
21167 " andl $7,%0\n" \
21168 " subl %0,%3\n" \
58c5fc13 21169- "4: rep; movsb\n" \
16454cff 21170+ "4: rep; "__copyuser_seg"movsb\n" \
bc901d79
MT
21171 " movl %3,%0\n" \
21172 " shrl $2,%0\n" \
21173 " andl $3,%3\n" \
21174 " .align 2,0x90\n" \
58c5fc13 21175- "0: rep; movsl\n" \
16454cff 21176+ "0: rep; "__copyuser_seg"movsl\n" \
bc901d79 21177 " movl %3,%0\n" \
58c5fc13 21178- "1: rep; movsb\n" \
16454cff 21179+ "1: rep; "__copyuser_seg"movsb\n" \
bc901d79
MT
21180 "2:\n" \
21181 ".section .fixup,\"ax\"\n" \
21182 "5: addl %3,%0\n" \
21183@@ -775,9 +892,9 @@ survive:
58c5fc13
MT
21184 }
21185 #endif
21186 if (movsl_is_ok(to, from, n))
21187- __copy_user(to, from, n);
bc901d79 21188+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
58c5fc13
MT
21189 else
21190- n = __copy_user_intel(to, from, n);
21191+ n = __generic_copy_to_user_intel(to, from, n);
21192 return n;
21193 }
21194 EXPORT_SYMBOL(__copy_to_user_ll);
fe2de317 21195@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
58c5fc13
MT
21196 unsigned long n)
21197 {
21198 if (movsl_is_ok(to, from, n))
21199- __copy_user(to, from, n);
bc901d79 21200+ __copy_user(to, from, n, __copyuser_seg, "", "");
58c5fc13
MT
21201 else
21202- n = __copy_user_intel((void __user *)to,
21203- (const void *)from, n);
21204+ n = __generic_copy_from_user_intel(to, from, n);
21205 return n;
21206 }
21207 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
fe2de317 21208@@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
58c5fc13
MT
21209 if (n > 64 && cpu_has_xmm2)
21210 n = __copy_user_intel_nocache(to, from, n);
21211 else
21212- __copy_user(to, from, n);
bc901d79 21213+ __copy_user(to, from, n, __copyuser_seg, "", "");
58c5fc13
MT
21214 #else
21215- __copy_user(to, from, n);
bc901d79 21216+ __copy_user(to, from, n, __copyuser_seg, "", "");
58c5fc13
MT
21217 #endif
21218 return n;
21219 }
21220 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
21221
21222-/**
21223- * copy_to_user: - Copy a block of data into user space.
21224- * @to: Destination address, in user space.
21225- * @from: Source address, in kernel space.
21226- * @n: Number of bytes to copy.
21227- *
21228- * Context: User context only. This function may sleep.
21229- *
21230- * Copy data from kernel space to user space.
21231- *
21232- * Returns number of bytes that could not be copied.
21233- * On success, this will be zero.
21234- */
21235-unsigned long
21236-copy_to_user(void __user *to, const void *from, unsigned long n)
fe2de317 21237-{
58c5fc13
MT
21238- if (access_ok(VERIFY_WRITE, to, n))
21239- n = __copy_to_user(to, from, n);
21240- return n;
fe2de317 21241-}
58c5fc13 21242-EXPORT_SYMBOL(copy_to_user);
fe2de317 21243-
58c5fc13
MT
21244-/**
21245- * copy_from_user: - Copy a block of data from user space.
21246- * @to: Destination address, in kernel space.
21247- * @from: Source address, in user space.
21248- * @n: Number of bytes to copy.
21249- *
21250- * Context: User context only. This function may sleep.
21251- *
21252- * Copy data from user space to kernel space.
21253- *
21254- * Returns number of bytes that could not be copied.
21255- * On success, this will be zero.
21256- *
21257- * If some data could not be copied, this function will pad the copied
21258- * data to the requested size using zero bytes.
21259- */
21260-unsigned long
ae4e228f 21261-_copy_from_user(void *to, const void __user *from, unsigned long n)
fe2de317 21262-{
58c5fc13
MT
21263- if (access_ok(VERIFY_READ, from, n))
21264- n = __copy_from_user(to, from, n);
21265- else
21266- memset(to, 0, n);
21267- return n;
fe2de317 21268-}
ae4e228f 21269-EXPORT_SYMBOL(_copy_from_user);
fe2de317
MT
21270-
21271 void copy_from_user_overflow(void)
21272 {
21273 WARN(1, "Buffer overflow detected!\n");
21274 }
21275 EXPORT_SYMBOL(copy_from_user_overflow);
21276+
21277+void copy_to_user_overflow(void)
21278+{
21279+ WARN(1, "Buffer overflow detected!\n");
21280+}
ae4e228f 21281+EXPORT_SYMBOL(copy_to_user_overflow);
fe2de317 21282+
ae4e228f 21283+#ifdef CONFIG_PAX_MEMORY_UDEREF
bc901d79 21284+void __set_fs(mm_segment_t x)
fe2de317 21285+{
bc901d79
MT
21286+ switch (x.seg) {
21287+ case 0:
21288+ loadsegment(gs, 0);
21289+ break;
21290+ case TASK_SIZE_MAX:
21291+ loadsegment(gs, __USER_DS);
21292+ break;
21293+ case -1UL:
21294+ loadsegment(gs, __KERNEL_DS);
21295+ break;
21296+ default:
21297+ BUG();
21298+ }
21299+ return;
fe2de317 21300+}
71d190be 21301+EXPORT_SYMBOL(__set_fs);
ae4e228f
MT
21302+
21303+void set_fs(mm_segment_t x)
21304+{
58c5fc13 21305+ current_thread_info()->addr_limit = x;
bc901d79 21306+ __set_fs(x);
58c5fc13 21307+}
58c5fc13 21308+EXPORT_SYMBOL(set_fs);
bc901d79 21309+#endif
fe2de317
MT
21310diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
21311index b7c2849..8633ad8 100644
21312--- a/arch/x86/lib/usercopy_64.c
21313+++ b/arch/x86/lib/usercopy_64.c
8308f9c9 21314@@ -42,6 +42,12 @@ long
df50ba0c
MT
21315 __strncpy_from_user(char *dst, const char __user *src, long count)
21316 {
21317 long res;
8308f9c9
MT
21318+
21319+#ifdef CONFIG_PAX_MEMORY_UDEREF
df50ba0c
MT
21320+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
21321+ src += PAX_USER_SHADOW_BASE;
8308f9c9
MT
21322+#endif
21323+
df50ba0c
MT
21324 __do_strncpy_from_user(dst, src, count, res);
21325 return res;
21326 }
fe2de317 21327@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
df50ba0c
MT
21328 {
21329 long __d0;
21330 might_fault();
8308f9c9
MT
21331+
21332+#ifdef CONFIG_PAX_MEMORY_UDEREF
df50ba0c
MT
21333+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
21334+ addr += PAX_USER_SHADOW_BASE;
8308f9c9
MT
21335+#endif
21336+
df50ba0c
MT
21337 /* no memory constraint because it doesn't change any memory gcc knows
21338 about */
21339 asm volatile(
fe2de317
MT
21340@@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
21341 }
21342 EXPORT_SYMBOL(strlen_user);
df50ba0c 21343
fe2de317
MT
21344-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
21345+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
df50ba0c
MT
21346 {
21347- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
6e9df6a3
MT
21348- return copy_user_generic((__force void *)to, (__force void *)from, len);
21349- }
21350- return len;
df50ba0c 21351+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
8308f9c9
MT
21352+
21353+#ifdef CONFIG_PAX_MEMORY_UDEREF
df50ba0c
MT
21354+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
21355+ to += PAX_USER_SHADOW_BASE;
21356+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
21357+ from += PAX_USER_SHADOW_BASE;
8308f9c9
MT
21358+#endif
21359+
6e9df6a3 21360+ return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
df50ba0c
MT
21361+ }
21362+ return len;
21363 }
21364 EXPORT_SYMBOL(copy_in_user);
21365
6e9df6a3
MT
21366@@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
21367 * it is not necessary to optimize tail handling.
21368 */
21369 unsigned long
21370-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
fe2de317 21371+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
6e9df6a3
MT
21372 {
21373 char c;
21374 unsigned zero_len;
fe2de317
MT
21375diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
21376index d0474ad..36e9257 100644
21377--- a/arch/x86/mm/extable.c
21378+++ b/arch/x86/mm/extable.c
21379@@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs)
58c5fc13
MT
21380 const struct exception_table_entry *fixup;
21381
21382 #ifdef CONFIG_PNPBIOS
21383- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
21384+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
21385 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
21386 extern u32 pnp_bios_is_utter_crap;
21387 pnp_bios_is_utter_crap = 1;
fe2de317
MT
21388diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
21389index 0d17c8c..4f4764f 100644
21390--- a/arch/x86/mm/fault.c
21391+++ b/arch/x86/mm/fault.c
6e9df6a3 21392@@ -13,11 +13,18 @@
ae4e228f 21393 #include <linux/perf_event.h> /* perf_sw_event */
bc901d79 21394 #include <linux/hugetlb.h> /* hstate_index_to_shift */
15a11c5b 21395 #include <linux/prefetch.h> /* prefetchw */
58c5fc13
MT
21396+#include <linux/unistd.h>
21397+#include <linux/compiler.h>
21398
21399 #include <asm/traps.h> /* dotraplinkage, ... */
21400 #include <asm/pgalloc.h> /* pgd_*(), ... */
21401 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
6e9df6a3 21402 #include <asm/vsyscall.h>
58c5fc13 21403+#include <asm/tlbflush.h>
df50ba0c
MT
21404+
21405+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21406+#include <asm/stacktrace.h>
df50ba0c 21407+#endif
58c5fc13
MT
21408
21409 /*
21410 * Page fault error code bits:
fe2de317 21411@@ -55,7 +62,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
58c5fc13
MT
21412 int ret = 0;
21413
21414 /* kprobe_running() needs smp_processor_id() */
21415- if (kprobes_built_in() && !user_mode_vm(regs)) {
21416+ if (kprobes_built_in() && !user_mode(regs)) {
21417 preempt_disable();
21418 if (kprobe_running() && kprobe_fault_handler(regs, 14))
21419 ret = 1;
fe2de317 21420@@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
bc901d79
MT
21421 return !instr_lo || (instr_lo>>1) == 1;
21422 case 0x00:
21423 /* Prefetch instruction is 0x0F0D or 0x0F18 */
21424- if (probe_kernel_address(instr, opcode))
21425+ if (user_mode(regs)) {
6e9df6a3 21426+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
bc901d79
MT
21427+ return 0;
21428+ } else if (probe_kernel_address(instr, opcode))
21429 return 0;
21430
21431 *prefetch = (instr_lo == 0xF) &&
fe2de317 21432@@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
bc901d79
MT
21433 while (instr < max_instr) {
21434 unsigned char opcode;
21435
21436- if (probe_kernel_address(instr, opcode))
21437+ if (user_mode(regs)) {
6e9df6a3 21438+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
bc901d79
MT
21439+ break;
21440+ } else if (probe_kernel_address(instr, opcode))
21441 break;
21442
21443 instr++;
fe2de317 21444@@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
58c5fc13
MT
21445 force_sig_info(si_signo, &info, tsk);
21446 }
21447
6e9df6a3
MT
21448+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21449+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
21450+#endif
21451+
58c5fc13
MT
21452+#ifdef CONFIG_PAX_EMUTRAMP
21453+static int pax_handle_fetch_fault(struct pt_regs *regs);
21454+#endif
21455+
21456+#ifdef CONFIG_PAX_PAGEEXEC
21457+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
21458+{
21459+ pgd_t *pgd;
21460+ pud_t *pud;
21461+ pmd_t *pmd;
21462+
21463+ pgd = pgd_offset(mm, address);
21464+ if (!pgd_present(*pgd))
21465+ return NULL;
21466+ pud = pud_offset(pgd, address);
21467+ if (!pud_present(*pud))
21468+ return NULL;
21469+ pmd = pmd_offset(pud, address);
21470+ if (!pmd_present(*pmd))
21471+ return NULL;
21472+ return pmd;
21473+}
21474+#endif
21475+
21476 DEFINE_SPINLOCK(pgd_lock);
21477 LIST_HEAD(pgd_list);
21478
6e9df6a3 21479@@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
16454cff
MT
21480 for (address = VMALLOC_START & PMD_MASK;
21481 address >= TASK_SIZE && address < FIXADDR_TOP;
df50ba0c 21482 address += PMD_SIZE) {
df50ba0c
MT
21483+
21484+#ifdef CONFIG_PAX_PER_CPU_PGD
21485+ unsigned long cpu;
21486+#else
21487 struct page *page;
21488+#endif
21489
16454cff 21490 spin_lock(&pgd_lock);
df50ba0c
MT
21491+
21492+#ifdef CONFIG_PAX_PER_CPU_PGD
21493+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
21494+ pgd_t *pgd = get_cpu_pgd(cpu);
bc901d79 21495+ pmd_t *ret;
df50ba0c
MT
21496+#else
21497 list_for_each_entry(page, &pgd_list, lru) {
df50ba0c 21498+ pgd_t *pgd = page_address(page);
bc901d79
MT
21499 spinlock_t *pgt_lock;
21500 pmd_t *ret;
21501
6e9df6a3 21502@@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
bc901d79
MT
21503 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
21504
21505 spin_lock(pgt_lock);
21506- ret = vmalloc_sync_one(page_address(page), address);
df50ba0c
MT
21507+#endif
21508+
bc901d79
MT
21509+ ret = vmalloc_sync_one(pgd, address);
21510+
21511+#ifndef CONFIG_PAX_PER_CPU_PGD
21512 spin_unlock(pgt_lock);
21513+#endif
21514
21515 if (!ret)
df50ba0c 21516 break;
fe2de317 21517@@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
df50ba0c
MT
21518 * an interrupt in the middle of a task switch..
21519 */
21520 pgd_paddr = read_cr3();
21521+
21522+#ifdef CONFIG_PAX_PER_CPU_PGD
21523+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
21524+#endif
21525+
21526 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
21527 if (!pmd_k)
21528 return -1;
fe2de317 21529@@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
df50ba0c
MT
21530 * happen within a race in page table update. In the later
21531 * case just flush:
21532 */
21533+
21534+#ifdef CONFIG_PAX_PER_CPU_PGD
21535+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
21536+ pgd = pgd_offset_cpu(smp_processor_id(), address);
21537+#else
21538 pgd = pgd_offset(current->active_mm, address);
21539+#endif
21540+
21541 pgd_ref = pgd_offset_k(address);
21542 if (pgd_none(*pgd_ref))
21543 return -1;
fe2de317 21544@@ -534,7 +604,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
58c5fc13
MT
21545 static int is_errata100(struct pt_regs *regs, unsigned long address)
21546 {
21547 #ifdef CONFIG_X86_64
21548- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
21549+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
21550 return 1;
21551 #endif
21552 return 0;
fe2de317 21553@@ -561,7 +631,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
58c5fc13
MT
21554 }
21555
21556 static const char nx_warning[] = KERN_CRIT
21557-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
21558+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
21559
21560 static void
21561 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
fe2de317 21562@@ -570,15 +640,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
58c5fc13
MT
21563 if (!oops_may_print())
21564 return;
21565
21566- if (error_code & PF_INSTR) {
ae4e228f 21567+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
58c5fc13
MT
21568 unsigned int level;
21569
21570 pte_t *pte = lookup_address(address, &level);
21571
21572 if (pte && pte_present(*pte) && !pte_exec(*pte))
21573- printk(nx_warning, current_uid());
21574+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
fe2de317
MT
21575 }
21576
58c5fc13 21577+#ifdef CONFIG_PAX_KERNEXEC
ae4e228f 21578+ if (init_mm.start_code <= address && address < init_mm.end_code) {
58c5fc13 21579+ if (current->signal->curr_ip)
ae4e228f
MT
21580+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21581+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
58c5fc13
MT
21582+ else
21583+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21584+ current->comm, task_pid_nr(current), current_uid(), current_euid());
fe2de317 21585+ }
58c5fc13 21586+#endif
fe2de317 21587+
58c5fc13
MT
21588 printk(KERN_ALERT "BUG: unable to handle kernel ");
21589 if (address < PAGE_SIZE)
fe2de317
MT
21590 printk(KERN_CONT "NULL pointer dereference");
21591@@ -733,6 +814,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
6e9df6a3
MT
21592 }
21593 #endif
21594
58c5fc13 21595+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
6e9df6a3 21596+ if (pax_is_fetch_fault(regs, error_code, address)) {
58c5fc13
MT
21597+
21598+#ifdef CONFIG_PAX_EMUTRAMP
21599+ switch (pax_handle_fetch_fault(regs)) {
21600+ case 2:
21601+ return;
21602+ }
21603+#endif
21604+
6e9df6a3 21605+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
58c5fc13
MT
21606+ do_group_exit(SIGKILL);
21607+ }
21608+#endif
21609+
6e9df6a3
MT
21610 if (unlikely(show_unhandled_signals))
21611 show_signal_msg(regs, error_code, address, tsk);
58c5fc13 21612
fe2de317 21613@@ -829,7 +925,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
6e9df6a3
MT
21614 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
21615 printk(KERN_ERR
21616 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
21617- tsk->comm, tsk->pid, address);
21618+ tsk->comm, task_pid_nr(tsk), address);
21619 code = BUS_MCEERR_AR;
21620 }
21621 #endif
fe2de317 21622@@ -884,6 +980,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
58c5fc13
MT
21623 return 1;
21624 }
21625
21626+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21627+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
21628+{
21629+ pte_t *pte;
21630+ pmd_t *pmd;
21631+ spinlock_t *ptl;
21632+ unsigned char pte_mask;
21633+
ae4e228f 21634+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
58c5fc13
MT
21635+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
21636+ return 0;
21637+
21638+ /* PaX: it's our fault, let's handle it if we can */
21639+
21640+ /* PaX: take a look at read faults before acquiring any locks */
21641+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
21642+ /* instruction fetch attempt from a protected page in user mode */
21643+ up_read(&mm->mmap_sem);
21644+
21645+#ifdef CONFIG_PAX_EMUTRAMP
21646+ switch (pax_handle_fetch_fault(regs)) {
21647+ case 2:
21648+ return 1;
21649+ }
21650+#endif
21651+
21652+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
21653+ do_group_exit(SIGKILL);
21654+ }
21655+
21656+ pmd = pax_get_pmd(mm, address);
21657+ if (unlikely(!pmd))
21658+ return 0;
21659+
21660+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
21661+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
21662+ pte_unmap_unlock(pte, ptl);
21663+ return 0;
21664+ }
21665+
21666+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
21667+ /* write attempt to a protected page in user mode */
21668+ pte_unmap_unlock(pte, ptl);
21669+ return 0;
21670+ }
21671+
21672+#ifdef CONFIG_SMP
21673+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
21674+#else
21675+ if (likely(address > get_limit(regs->cs)))
21676+#endif
21677+ {
21678+ set_pte(pte, pte_mkread(*pte));
21679+ __flush_tlb_one(address);
21680+ pte_unmap_unlock(pte, ptl);
21681+ up_read(&mm->mmap_sem);
21682+ return 1;
21683+ }
21684+
21685+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
21686+
21687+ /*
21688+ * PaX: fill DTLB with user rights and retry
21689+ */
21690+ __asm__ __volatile__ (
58c5fc13
MT
21691+ "orb %2,(%1)\n"
21692+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
21693+/*
21694+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
21695+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
21696+ * page fault when examined during a TLB load attempt. this is true not only
21697+ * for PTEs holding a non-present entry but also present entries that will
21698+ * raise a page fault (such as those set up by PaX, or the copy-on-write
21699+ * mechanism). in effect it means that we do *not* need to flush the TLBs
21700+ * for our target pages since their PTEs are simply not in the TLBs at all.
21701+
21702+ * the best thing in omitting it is that we gain around 15-20% speed in the
21703+ * fast path of the page fault handler and can get rid of tracing since we
21704+ * can no longer flush unintended entries.
21705+ */
21706+ "invlpg (%0)\n"
21707+#endif
16454cff 21708+ __copyuser_seg"testb $0,(%0)\n"
58c5fc13 21709+ "xorb %3,(%1)\n"
58c5fc13 21710+ :
bc901d79 21711+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
58c5fc13
MT
21712+ : "memory", "cc");
21713+ pte_unmap_unlock(pte, ptl);
21714+ up_read(&mm->mmap_sem);
21715+ return 1;
21716+}
21717+#endif
21718+
21719 /*
21720 * Handle a spurious fault caused by a stale TLB entry.
21721 *
6e9df6a3 21722@@ -956,6 +1145,9 @@ int show_unhandled_signals = 1;
58c5fc13 21723 static inline int
bc901d79 21724 access_error(unsigned long error_code, struct vm_area_struct *vma)
58c5fc13 21725 {
ae4e228f 21726+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
58c5fc13
MT
21727+ return 1;
21728+
bc901d79 21729 if (error_code & PF_WRITE) {
58c5fc13
MT
21730 /* write, present and write, not present: */
21731 if (unlikely(!(vma->vm_flags & VM_WRITE)))
fe2de317 21732@@ -989,18 +1181,32 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
58c5fc13
MT
21733 {
21734 struct vm_area_struct *vma;
21735 struct task_struct *tsk;
21736- unsigned long address;
21737 struct mm_struct *mm;
58c5fc13 21738 int fault;
bc901d79 21739 int write = error_code & PF_WRITE;
15a11c5b 21740 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
bc901d79 21741 (write ? FAULT_FLAG_WRITE : 0);
58c5fc13 21742
fe2de317
MT
21743- tsk = current;
21744- mm = tsk->mm;
21745-
21746 /* Get the faulting address: */
21747- address = read_cr2();
df50ba0c
MT
21748+ unsigned long address = read_cr2();
21749+
21750+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21751+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
21752+ if (!search_exception_tables(regs->ip)) {
21753+ bad_area_nosemaphore(regs, error_code, address);
21754+ return;
21755+ }
21756+ if (address < PAX_USER_SHADOW_BASE) {
21757+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
6e9df6a3 21758+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
66a7e928 21759+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
df50ba0c
MT
21760+ } else
21761+ address -= PAX_USER_SHADOW_BASE;
21762+ }
21763+#endif
58c5fc13 21764+
fe2de317
MT
21765+ tsk = current;
21766+ mm = tsk->mm;
58c5fc13 21767
58c5fc13
MT
21768 /*
21769 * Detect and handle instructions that would cause a page fault for
fe2de317 21770@@ -1061,7 +1267,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
58c5fc13
MT
21771 * User-mode registers count as a user access even for any
21772 * potential system fault or CPU buglet:
21773 */
21774- if (user_mode_vm(regs)) {
21775+ if (user_mode(regs)) {
21776 local_irq_enable();
21777 error_code |= PF_USER;
21778 } else {
6e9df6a3 21779@@ -1116,6 +1322,11 @@ retry:
58c5fc13
MT
21780 might_sleep();
21781 }
21782
21783+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21784+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
21785+ return;
21786+#endif
21787+
21788 vma = find_vma(mm, address);
21789 if (unlikely(!vma)) {
21790 bad_area(regs, error_code, address);
6e9df6a3 21791@@ -1127,18 +1338,24 @@ retry:
58c5fc13
MT
21792 bad_area(regs, error_code, address);
21793 return;
21794 }
21795- if (error_code & PF_USER) {
21796- /*
21797- * Accessing the stack below %sp is always a bug.
21798- * The large cushion allows instructions like enter
21799- * and pusha to work. ("enter $65535, $31" pushes
21800- * 32 pointers and then decrements %sp by 65535.)
21801- */
21802- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
21803- bad_area(regs, error_code, address);
21804- return;
21805- }
21806+ /*
21807+ * Accessing the stack below %sp is always a bug.
21808+ * The large cushion allows instructions like enter
21809+ * and pusha to work. ("enter $65535, $31" pushes
21810+ * 32 pointers and then decrements %sp by 65535.)
21811+ */
21812+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
21813+ bad_area(regs, error_code, address);
21814+ return;
df50ba0c 21815 }
58c5fc13
MT
21816+
21817+#ifdef CONFIG_PAX_SEGMEXEC
21818+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
21819+ bad_area(regs, error_code, address);
21820+ return;
df50ba0c 21821+ }
58c5fc13
MT
21822+#endif
21823+
21824 if (unlikely(expand_stack(vma, address))) {
21825 bad_area(regs, error_code, address);
21826 return;
6e9df6a3 21827@@ -1193,3 +1410,240 @@ good_area:
58c5fc13
MT
21828
21829 up_read(&mm->mmap_sem);
21830 }
21831+
6e9df6a3
MT
21832+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21833+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
21834+{
21835+ struct mm_struct *mm = current->mm;
21836+ unsigned long ip = regs->ip;
21837+
21838+ if (v8086_mode(regs))
21839+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
21840+
21841+#ifdef CONFIG_PAX_PAGEEXEC
21842+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
21843+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
21844+ return true;
21845+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
21846+ return true;
21847+ return false;
21848+ }
21849+#endif
21850+
21851+#ifdef CONFIG_PAX_SEGMEXEC
21852+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
21853+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
21854+ return true;
21855+ return false;
21856+ }
21857+#endif
21858+
21859+ return false;
21860+}
21861+#endif
21862+
58c5fc13
MT
21863+#ifdef CONFIG_PAX_EMUTRAMP
21864+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
21865+{
21866+ int err;
21867+
21868+ do { /* PaX: gcc trampoline emulation #1 */
21869+ unsigned char mov1, mov2;
21870+ unsigned short jmp;
21871+ unsigned int addr1, addr2;
21872+
21873+#ifdef CONFIG_X86_64
21874+ if ((regs->ip + 11) >> 32)
21875+ break;
21876+#endif
21877+
21878+ err = get_user(mov1, (unsigned char __user *)regs->ip);
21879+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21880+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
21881+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21882+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
21883+
21884+ if (err)
21885+ break;
21886+
21887+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
21888+ regs->cx = addr1;
21889+ regs->ax = addr2;
21890+ regs->ip = addr2;
21891+ return 2;
21892+ }
21893+ } while (0);
21894+
21895+ do { /* PaX: gcc trampoline emulation #2 */
21896+ unsigned char mov, jmp;
21897+ unsigned int addr1, addr2;
21898+
21899+#ifdef CONFIG_X86_64
21900+ if ((regs->ip + 9) >> 32)
21901+ break;
21902+#endif
21903+
21904+ err = get_user(mov, (unsigned char __user *)regs->ip);
21905+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21906+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
21907+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21908+
21909+ if (err)
21910+ break;
21911+
21912+ if (mov == 0xB9 && jmp == 0xE9) {
21913+ regs->cx = addr1;
21914+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
21915+ return 2;
21916+ }
21917+ } while (0);
21918+
21919+ return 1; /* PaX in action */
21920+}
21921+
21922+#ifdef CONFIG_X86_64
21923+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
21924+{
21925+ int err;
21926+
21927+ do { /* PaX: gcc trampoline emulation #1 */
21928+ unsigned short mov1, mov2, jmp1;
21929+ unsigned char jmp2;
21930+ unsigned int addr1;
21931+ unsigned long addr2;
21932+
21933+ err = get_user(mov1, (unsigned short __user *)regs->ip);
21934+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
21935+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
21936+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
21937+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
21938+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
21939+
21940+ if (err)
21941+ break;
21942+
21943+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
21944+ regs->r11 = addr1;
21945+ regs->r10 = addr2;
21946+ regs->ip = addr1;
21947+ return 2;
21948+ }
21949+ } while (0);
21950+
21951+ do { /* PaX: gcc trampoline emulation #2 */
21952+ unsigned short mov1, mov2, jmp1;
21953+ unsigned char jmp2;
21954+ unsigned long addr1, addr2;
21955+
21956+ err = get_user(mov1, (unsigned short __user *)regs->ip);
21957+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
21958+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
21959+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
21960+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
21961+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
21962+
21963+ if (err)
21964+ break;
21965+
21966+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
21967+ regs->r11 = addr1;
21968+ regs->r10 = addr2;
21969+ regs->ip = addr1;
21970+ return 2;
21971+ }
21972+ } while (0);
21973+
21974+ return 1; /* PaX in action */
21975+}
21976+#endif
21977+
21978+/*
21979+ * PaX: decide what to do with offenders (regs->ip = fault address)
21980+ *
21981+ * returns 1 when task should be killed
21982+ * 2 when gcc trampoline was detected
21983+ */
21984+static int pax_handle_fetch_fault(struct pt_regs *regs)
21985+{
21986+ if (v8086_mode(regs))
21987+ return 1;
21988+
21989+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
21990+ return 1;
21991+
21992+#ifdef CONFIG_X86_32
21993+ return pax_handle_fetch_fault_32(regs);
21994+#else
21995+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
21996+ return pax_handle_fetch_fault_32(regs);
21997+ else
21998+ return pax_handle_fetch_fault_64(regs);
21999+#endif
22000+}
22001+#endif
22002+
22003+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
6e9df6a3 22004+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
58c5fc13
MT
22005+{
22006+ long i;
22007+
22008+ printk(KERN_ERR "PAX: bytes at PC: ");
22009+ for (i = 0; i < 20; i++) {
22010+ unsigned char c;
6e9df6a3 22011+ if (get_user(c, (unsigned char __force_user *)pc+i))
58c5fc13
MT
22012+ printk(KERN_CONT "?? ");
22013+ else
22014+ printk(KERN_CONT "%02x ", c);
22015+ }
22016+ printk("\n");
22017+
22018+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
ae4e228f 22019+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
58c5fc13 22020+ unsigned long c;
6e9df6a3 22021+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
58c5fc13
MT
22022+#ifdef CONFIG_X86_32
22023+ printk(KERN_CONT "???????? ");
22024+#else
6e9df6a3
MT
22025+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
22026+ printk(KERN_CONT "???????? ???????? ");
22027+ else
22028+ printk(KERN_CONT "???????????????? ");
58c5fc13 22029+#endif
6e9df6a3
MT
22030+ } else {
22031+#ifdef CONFIG_X86_64
22032+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
22033+ printk(KERN_CONT "%08x ", (unsigned int)c);
22034+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
22035+ } else
22036+#endif
22037+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
22038+ }
58c5fc13
MT
22039+ }
22040+ printk("\n");
22041+}
22042+#endif
58c5fc13 22043+
ae4e228f
MT
22044+/**
22045+ * probe_kernel_write(): safely attempt to write to a location
22046+ * @dst: address to write to
22047+ * @src: pointer to the data that shall be written
22048+ * @size: size of the data chunk
22049+ *
22050+ * Safely write to address @dst from the buffer at @src. If a kernel fault
22051+ * happens, handle that and return -EFAULT.
22052+ */
22053+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
22054+{
22055+ long ret;
22056+ mm_segment_t old_fs = get_fs();
22057+
22058+ set_fs(KERNEL_DS);
22059+ pagefault_disable();
22060+ pax_open_kernel();
6e9df6a3 22061+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
ae4e228f
MT
22062+ pax_close_kernel();
22063+ pagefault_enable();
22064+ set_fs(old_fs);
22065+
22066+ return ret ? -EFAULT : 0;
22067+}
fe2de317
MT
22068diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
22069index ea30585..7d26398 100644
22070--- a/arch/x86/mm/gup.c
22071+++ b/arch/x86/mm/gup.c
22072@@ -201,6 +201,8 @@ static noinline int gup_huge_pud(pud_t pud, unsigned long addr,
22073 do {
22074 VM_BUG_ON(compound_head(page) != head);
22075 pages[*nr] = page;
22076+ if (PageTail(page))
22077+ get_huge_page_tail(page);
22078 (*nr)++;
22079 page++;
22080 refs++;
22081@@ -253,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
ae4e228f
MT
22082 addr = start;
22083 len = (unsigned long) nr_pages << PAGE_SHIFT;
22084 end = start + len;
22085- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
22086+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
22087 (void __user *)start, len)))
22088 return 0;
58c5fc13 22089
fe2de317
MT
22090diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
22091index f4f29b1..5cac4fb 100644
22092--- a/arch/x86/mm/highmem_32.c
22093+++ b/arch/x86/mm/highmem_32.c
22094@@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
58c5fc13
MT
22095 idx = type + KM_TYPE_NR*smp_processor_id();
22096 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
22097 BUG_ON(!pte_none(*(kmap_pte-idx)));
22098+
ae4e228f 22099+ pax_open_kernel();
58c5fc13 22100 set_pte(kmap_pte-idx, mk_pte(page, prot));
ae4e228f 22101+ pax_close_kernel();
fe2de317
MT
22102+
22103 arch_flush_lazy_mmu_mode();
58c5fc13 22104
58c5fc13 22105 return (void *)vaddr;
fe2de317
MT
22106diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
22107index f581a18..29efd37 100644
22108--- a/arch/x86/mm/hugetlbpage.c
22109+++ b/arch/x86/mm/hugetlbpage.c
22110@@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
58c5fc13
MT
22111 struct hstate *h = hstate_file(file);
22112 struct mm_struct *mm = current->mm;
22113 struct vm_area_struct *vma;
22114- unsigned long start_addr;
22115+ unsigned long start_addr, pax_task_size = TASK_SIZE;
22116+
22117+#ifdef CONFIG_PAX_SEGMEXEC
22118+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22119+ pax_task_size = SEGMEXEC_TASK_SIZE;
22120+#endif
6892158b
MT
22121+
22122+ pax_task_size -= PAGE_SIZE;
58c5fc13
MT
22123
22124 if (len > mm->cached_hole_size) {
22125- start_addr = mm->free_area_cache;
22126+ start_addr = mm->free_area_cache;
22127 } else {
22128- start_addr = TASK_UNMAPPED_BASE;
22129- mm->cached_hole_size = 0;
22130+ start_addr = mm->mmap_base;
22131+ mm->cached_hole_size = 0;
22132 }
22133
22134 full_search:
6892158b 22135@@ -280,26 +287,27 @@ full_search:
58c5fc13
MT
22136
22137 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
22138 /* At this point: (!vma || addr < vma->vm_end). */
22139- if (TASK_SIZE - len < addr) {
22140+ if (pax_task_size - len < addr) {
22141 /*
22142 * Start a new search - just in case we missed
22143 * some holes.
22144 */
22145- if (start_addr != TASK_UNMAPPED_BASE) {
22146- start_addr = TASK_UNMAPPED_BASE;
22147+ if (start_addr != mm->mmap_base) {
22148+ start_addr = mm->mmap_base;
22149 mm->cached_hole_size = 0;
22150 goto full_search;
22151 }
57199397
MT
22152 return -ENOMEM;
22153 }
22154- if (!vma || addr + len <= vma->vm_start) {
22155- mm->free_area_cache = addr + len;
22156- return addr;
22157- }
22158+ if (check_heap_stack_gap(vma, addr, len))
22159+ break;
22160 if (addr + mm->cached_hole_size < vma->vm_start)
22161 mm->cached_hole_size = vma->vm_start - addr;
22162 addr = ALIGN(vma->vm_end, huge_page_size(h));
22163 }
22164+
22165+ mm->free_area_cache = addr + len;
22166+ return addr;
22167 }
22168
22169 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
fe2de317 22170@@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
57199397 22171 {
58c5fc13
MT
22172 struct hstate *h = hstate_file(file);
22173 struct mm_struct *mm = current->mm;
57199397 22174- struct vm_area_struct *vma, *prev_vma;
58c5fc13 22175- unsigned long base = mm->mmap_base, addr = addr0;
57199397 22176+ struct vm_area_struct *vma;
58c5fc13
MT
22177+ unsigned long base = mm->mmap_base, addr;
22178 unsigned long largest_hole = mm->cached_hole_size;
22179- int first_time = 1;
22180
22181 /* don't allow allocations above current base */
22182 if (mm->free_area_cache > base)
fe2de317 22183@@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
58c5fc13
MT
22184 largest_hole = 0;
22185 mm->free_area_cache = base;
22186 }
22187-try_again:
22188+
22189 /* make sure it can fit in the remaining address space */
22190 if (mm->free_area_cache < len)
22191 goto fail;
16454cff 22192
66a7e928 22193 /* either no address requested or can't fit in requested address hole */
16454cff
MT
22194- addr = (mm->free_area_cache - len) & huge_page_mask(h);
22195+ addr = (mm->free_area_cache - len);
57199397 22196 do {
16454cff 22197+ addr &= huge_page_mask(h);
57199397
MT
22198+ vma = find_vma(mm, addr);
22199 /*
22200 * Lookup failure means no vma is above this address,
22201 * i.e. return with success:
22202- */
22203- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
22204- return addr;
22205-
22206- /*
22207 * new region fits between prev_vma->vm_end and
22208 * vma->vm_start, use it:
22209 */
22210- if (addr + len <= vma->vm_start &&
22211- (!prev_vma || (addr >= prev_vma->vm_end))) {
22212+ if (check_heap_stack_gap(vma, addr, len)) {
22213 /* remember the address as a hint for next time */
22214- mm->cached_hole_size = largest_hole;
22215- return (mm->free_area_cache = addr);
22216- } else {
22217- /* pull free_area_cache down to the first hole */
22218- if (mm->free_area_cache == vma->vm_end) {
22219- mm->free_area_cache = vma->vm_start;
22220- mm->cached_hole_size = largest_hole;
22221- }
22222+ mm->cached_hole_size = largest_hole;
22223+ return (mm->free_area_cache = addr);
22224+ }
22225+ /* pull free_area_cache down to the first hole */
22226+ if (mm->free_area_cache == vma->vm_end) {
22227+ mm->free_area_cache = vma->vm_start;
22228+ mm->cached_hole_size = largest_hole;
22229 }
22230
22231 /* remember the largest hole we saw so far */
22232 if (addr + largest_hole < vma->vm_start)
22233- largest_hole = vma->vm_start - addr;
22234+ largest_hole = vma->vm_start - addr;
22235
22236 /* try just below the current vma->vm_start */
16454cff
MT
22237- addr = (vma->vm_start - len) & huge_page_mask(h);
22238- } while (len <= vma->vm_start);
22239+ addr = skip_heap_stack_gap(vma, len);
22240+ } while (!IS_ERR_VALUE(addr));
58c5fc13
MT
22241
22242 fail:
22243 /*
22244- * if hint left us with no space for the requested
22245- * mapping then try again:
22246- */
22247- if (first_time) {
22248- mm->free_area_cache = base;
22249- largest_hole = 0;
22250- first_time = 0;
22251- goto try_again;
22252- }
22253- /*
22254 * A failed mmap() very likely causes application failure,
22255 * so fall back to the bottom-up function here. This scenario
22256 * can happen with large stack limits and large mmap()
22257 * allocations.
22258 */
22259- mm->free_area_cache = TASK_UNMAPPED_BASE;
22260+
22261+#ifdef CONFIG_PAX_SEGMEXEC
22262+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22263+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
22264+ else
22265+#endif
22266+
22267+ mm->mmap_base = TASK_UNMAPPED_BASE;
22268+
22269+#ifdef CONFIG_PAX_RANDMMAP
22270+ if (mm->pax_flags & MF_PAX_RANDMMAP)
22271+ mm->mmap_base += mm->delta_mmap;
22272+#endif
22273+
22274+ mm->free_area_cache = mm->mmap_base;
22275 mm->cached_hole_size = ~0UL;
22276 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
22277 len, pgoff, flags);
16454cff 22278@@ -386,6 +392,7 @@ fail:
58c5fc13
MT
22279 /*
22280 * Restore the topdown base:
22281 */
22282+ mm->mmap_base = base;
22283 mm->free_area_cache = base;
22284 mm->cached_hole_size = ~0UL;
22285
fe2de317 22286@@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
58c5fc13
MT
22287 struct hstate *h = hstate_file(file);
22288 struct mm_struct *mm = current->mm;
22289 struct vm_area_struct *vma;
22290+ unsigned long pax_task_size = TASK_SIZE;
22291
22292 if (len & ~huge_page_mask(h))
22293 return -EINVAL;
22294- if (len > TASK_SIZE)
22295+
22296+#ifdef CONFIG_PAX_SEGMEXEC
22297+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22298+ pax_task_size = SEGMEXEC_TASK_SIZE;
22299+#endif
22300+
6892158b
MT
22301+ pax_task_size -= PAGE_SIZE;
22302+
58c5fc13
MT
22303+ if (len > pax_task_size)
22304 return -ENOMEM;
22305
22306 if (flags & MAP_FIXED) {
fe2de317 22307@@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
58c5fc13
MT
22308 if (addr) {
22309 addr = ALIGN(addr, huge_page_size(h));
22310 vma = find_vma(mm, addr);
22311- if (TASK_SIZE - len >= addr &&
57199397
MT
22312- (!vma || addr + len <= vma->vm_start))
22313+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
58c5fc13
MT
22314 return addr;
22315 }
57199397 22316 if (mm->get_unmapped_area == arch_get_unmapped_area)
fe2de317
MT
22317diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
22318index 87488b9..7129f32 100644
22319--- a/arch/x86/mm/init.c
22320+++ b/arch/x86/mm/init.c
22321@@ -31,7 +31,7 @@ int direct_gbpages
22322 static void __init find_early_table_space(unsigned long end, int use_pse,
22323 int use_gbpages)
22324 {
22325- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
22326+ unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
22327 phys_addr_t base;
22328
22329 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
22330@@ -312,8 +312,29 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
22331 */
22332 int devmem_is_allowed(unsigned long pagenr)
22333 {
22334+#ifdef CONFIG_GRKERNSEC_KMEM
22335+ /* allow BDA */
22336+ if (!pagenr)
22337+ return 1;
22338+ /* allow EBDA */
22339+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
22340+ return 1;
22341+#else
22342+ if (!pagenr)
22343+ return 1;
22344+#ifdef CONFIG_VM86
22345+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
22346+ return 1;
22347+#endif
22348+#endif
22349+
22350+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
22351+ return 1;
22352+#ifdef CONFIG_GRKERNSEC_KMEM
22353+ /* throw out everything else below 1MB */
22354 if (pagenr <= 256)
22355- return 1;
22356+ return 0;
22357+#endif
22358 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
22359 return 0;
22360 if (!page_is_ram(pagenr))
22361@@ -372,6 +393,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
22362
22363 void free_initmem(void)
22364 {
22365+
22366+#ifdef CONFIG_PAX_KERNEXEC
22367+#ifdef CONFIG_X86_32
22368+ /* PaX: limit KERNEL_CS to actual size */
22369+ unsigned long addr, limit;
22370+ struct desc_struct d;
22371+ int cpu;
22372+
22373+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
22374+ limit = (limit - 1UL) >> PAGE_SHIFT;
22375+
22376+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
22377+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
22378+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
22379+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
22380+ }
22381+
22382+ /* PaX: make KERNEL_CS read-only */
22383+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
22384+ if (!paravirt_enabled())
22385+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
22386+/*
22387+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
22388+ pgd = pgd_offset_k(addr);
22389+ pud = pud_offset(pgd, addr);
22390+ pmd = pmd_offset(pud, addr);
22391+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22392+ }
22393+*/
22394+#ifdef CONFIG_X86_PAE
22395+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
22396+/*
22397+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
22398+ pgd = pgd_offset_k(addr);
22399+ pud = pud_offset(pgd, addr);
22400+ pmd = pmd_offset(pud, addr);
22401+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22402+ }
22403+*/
22404+#endif
22405+
22406+#ifdef CONFIG_MODULES
22407+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
22408+#endif
22409+
22410+#else
22411+ pgd_t *pgd;
22412+ pud_t *pud;
22413+ pmd_t *pmd;
22414+ unsigned long addr, end;
22415+
22416+ /* PaX: make kernel code/rodata read-only, rest non-executable */
22417+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
22418+ pgd = pgd_offset_k(addr);
22419+ pud = pud_offset(pgd, addr);
22420+ pmd = pmd_offset(pud, addr);
22421+ if (!pmd_present(*pmd))
22422+ continue;
22423+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
22424+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22425+ else
22426+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22427+ }
22428+
22429+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
22430+ end = addr + KERNEL_IMAGE_SIZE;
22431+ for (; addr < end; addr += PMD_SIZE) {
22432+ pgd = pgd_offset_k(addr);
22433+ pud = pud_offset(pgd, addr);
22434+ pmd = pmd_offset(pud, addr);
22435+ if (!pmd_present(*pmd))
22436+ continue;
22437+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
22438+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22439+ }
22440+#endif
22441+
22442+ flush_tlb_all();
22443+#endif
22444+
22445 free_init_pages("unused kernel memory",
22446 (unsigned long)(&__init_begin),
22447 (unsigned long)(&__init_end));
22448diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
22449index 29f7c6d..b46b35b 100644
22450--- a/arch/x86/mm/init_32.c
22451+++ b/arch/x86/mm/init_32.c
16454cff 22452@@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
58c5fc13
MT
22453 }
22454
22455 /*
22456- * Creates a middle page table and puts a pointer to it in the
22457- * given global directory entry. This only returns the gd entry
22458- * in non-PAE compilation mode, since the middle layer is folded.
22459- */
22460-static pmd_t * __init one_md_table_init(pgd_t *pgd)
22461-{
22462- pud_t *pud;
22463- pmd_t *pmd_table;
22464-
22465-#ifdef CONFIG_X86_PAE
22466- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
22467- if (after_bootmem)
ae4e228f 22468- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
58c5fc13
MT
22469- else
22470- pmd_table = (pmd_t *)alloc_low_page();
22471- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
22472- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
22473- pud = pud_offset(pgd, 0);
22474- BUG_ON(pmd_table != pmd_offset(pud, 0));
22475-
22476- return pmd_table;
22477- }
22478-#endif
22479- pud = pud_offset(pgd, 0);
22480- pmd_table = pmd_offset(pud, 0);
22481-
22482- return pmd_table;
22483-}
22484-
22485-/*
22486 * Create a page table and place a pointer to it in a middle page
22487 * directory entry:
22488 */
fe2de317 22489@@ -123,13 +93,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
58c5fc13
MT
22490 page_table = (pte_t *)alloc_low_page();
22491
22492 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
22493+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
22494+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
22495+#else
22496 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
22497+#endif
22498 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
22499 }
22500
22501 return pte_offset_kernel(pmd, 0);
22502 }
22503
22504+static pmd_t * __init one_md_table_init(pgd_t *pgd)
22505+{
22506+ pud_t *pud;
22507+ pmd_t *pmd_table;
22508+
22509+ pud = pud_offset(pgd, 0);
22510+ pmd_table = pmd_offset(pud, 0);
22511+
22512+ return pmd_table;
22513+}
22514+
22515 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
22516 {
22517 int pgd_idx = pgd_index(vaddr);
fe2de317 22518@@ -203,6 +188,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
58c5fc13
MT
22519 int pgd_idx, pmd_idx;
22520 unsigned long vaddr;
22521 pgd_t *pgd;
22522+ pud_t *pud;
22523 pmd_t *pmd;
22524 pte_t *pte = NULL;
22525
fe2de317 22526@@ -212,8 +198,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
58c5fc13
MT
22527 pgd = pgd_base + pgd_idx;
22528
22529 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
22530- pmd = one_md_table_init(pgd);
22531- pmd = pmd + pmd_index(vaddr);
22532+ pud = pud_offset(pgd, vaddr);
22533+ pmd = pmd_offset(pud, vaddr);
22534+
22535+#ifdef CONFIG_X86_PAE
22536+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
22537+#endif
22538+
22539 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
22540 pmd++, pmd_idx++) {
22541 pte = page_table_kmap_check(one_page_table_init(pmd),
fe2de317 22542@@ -225,11 +216,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
58c5fc13
MT
22543 }
22544 }
22545
22546-static inline int is_kernel_text(unsigned long addr)
22547+static inline int is_kernel_text(unsigned long start, unsigned long end)
22548 {
16454cff 22549- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
58c5fc13
MT
22550- return 1;
22551- return 0;
ae4e228f 22552+ if ((start > ktla_ktva((unsigned long)_etext) ||
58c5fc13
MT
22553+ end <= ktla_ktva((unsigned long)_stext)) &&
22554+ (start > ktla_ktva((unsigned long)_einittext) ||
22555+ end <= ktla_ktva((unsigned long)_sinittext)) &&
ae4e228f
MT
22556+
22557+#ifdef CONFIG_ACPI_SLEEP
22558+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
22559+#endif
22560+
58c5fc13
MT
22561+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
22562+ return 0;
22563+ return 1;
22564 }
22565
22566 /*
fe2de317 22567@@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned long start,
df50ba0c 22568 unsigned long last_map_addr = end;
58c5fc13
MT
22569 unsigned long start_pfn, end_pfn;
22570 pgd_t *pgd_base = swapper_pg_dir;
22571- int pgd_idx, pmd_idx, pte_ofs;
22572+ unsigned int pgd_idx, pmd_idx, pte_ofs;
22573 unsigned long pfn;
22574 pgd_t *pgd;
22575+ pud_t *pud;
22576 pmd_t *pmd;
22577 pte_t *pte;
22578 unsigned pages_2m, pages_4k;
16454cff 22579@@ -281,8 +282,13 @@ repeat:
58c5fc13
MT
22580 pfn = start_pfn;
22581 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
22582 pgd = pgd_base + pgd_idx;
22583- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
22584- pmd = one_md_table_init(pgd);
22585+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
22586+ pud = pud_offset(pgd, 0);
22587+ pmd = pmd_offset(pud, 0);
22588+
22589+#ifdef CONFIG_X86_PAE
22590+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
22591+#endif
22592
22593 if (pfn >= end_pfn)
22594 continue;
16454cff 22595@@ -294,14 +300,13 @@ repeat:
58c5fc13
MT
22596 #endif
22597 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
22598 pmd++, pmd_idx++) {
22599- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
22600+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
22601
22602 /*
22603 * Map with big pages if possible, otherwise
22604 * create normal page tables:
22605 */
22606 if (use_pse) {
22607- unsigned int addr2;
22608 pgprot_t prot = PAGE_KERNEL_LARGE;
22609 /*
22610 * first pass will use the same initial
16454cff 22611@@ -311,11 +316,7 @@ repeat:
58c5fc13
MT
22612 __pgprot(PTE_IDENT_ATTR |
22613 _PAGE_PSE);
22614
22615- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
22616- PAGE_OFFSET + PAGE_SIZE-1;
22617-
22618- if (is_kernel_text(addr) ||
22619- is_kernel_text(addr2))
22620+ if (is_kernel_text(address, address + PMD_SIZE))
22621 prot = PAGE_KERNEL_LARGE_EXEC;
22622
22623 pages_2m++;
16454cff 22624@@ -332,7 +333,7 @@ repeat:
58c5fc13
MT
22625 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
22626 pte += pte_ofs;
22627 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
22628- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
22629+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
22630 pgprot_t prot = PAGE_KERNEL;
22631 /*
22632 * first pass will use the same initial
16454cff 22633@@ -340,7 +341,7 @@ repeat:
58c5fc13
MT
22634 */
22635 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
22636
22637- if (is_kernel_text(addr))
22638+ if (is_kernel_text(address, address + PAGE_SIZE))
22639 prot = PAGE_KERNEL_EXEC;
22640
22641 pages_4k++;
fe2de317 22642@@ -472,7 +473,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
58c5fc13
MT
22643
22644 pud = pud_offset(pgd, va);
22645 pmd = pmd_offset(pud, va);
22646- if (!pmd_present(*pmd))
22647+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
22648 break;
22649
22650 pte = pte_offset_kernel(pmd, va);
fe2de317 22651@@ -524,12 +525,10 @@ void __init early_ioremap_page_table_range_init(void)
58c5fc13
MT
22652
22653 static void __init pagetable_init(void)
22654 {
22655- pgd_t *pgd_base = swapper_pg_dir;
22656-
22657- permanent_kmaps_init(pgd_base);
22658+ permanent_kmaps_init(swapper_pg_dir);
22659 }
22660
58c5fc13
MT
22661-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
22662+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
22663 EXPORT_SYMBOL_GPL(__supported_pte_mask);
22664
22665 /* user-defined highmem size */
15a11c5b 22666@@ -757,6 +756,12 @@ void __init mem_init(void)
df50ba0c
MT
22667
22668 pci_iommu_alloc();
22669
22670+#ifdef CONFIG_PAX_PER_CPU_PGD
22671+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
22672+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22673+ KERNEL_PGD_PTRS);
22674+#endif
22675+
22676 #ifdef CONFIG_FLATMEM
22677 BUG_ON(!mem_map);
22678 #endif
15a11c5b 22679@@ -774,7 +779,7 @@ void __init mem_init(void)
58c5fc13
MT
22680 set_highmem_pages_init();
22681
22682 codesize = (unsigned long) &_etext - (unsigned long) &_text;
22683- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
22684+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
22685 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
22686
ae4e228f 22687 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
15a11c5b 22688@@ -815,10 +820,10 @@ void __init mem_init(void)
58c5fc13
MT
22689 ((unsigned long)&__init_end -
22690 (unsigned long)&__init_begin) >> 10,
22691
22692- (unsigned long)&_etext, (unsigned long)&_edata,
22693- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
22694+ (unsigned long)&_sdata, (unsigned long)&_edata,
22695+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
22696
22697- (unsigned long)&_text, (unsigned long)&_etext,
22698+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
22699 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
22700
22701 /*
15a11c5b 22702@@ -896,6 +901,7 @@ void set_kernel_text_rw(void)
ae4e228f
MT
22703 if (!kernel_set_to_readonly)
22704 return;
58c5fc13 22705
ae4e228f
MT
22706+ start = ktla_ktva(start);
22707 pr_debug("Set kernel text: %lx - %lx for read write\n",
22708 start, start+size);
22709
15a11c5b 22710@@ -910,6 +916,7 @@ void set_kernel_text_ro(void)
ae4e228f
MT
22711 if (!kernel_set_to_readonly)
22712 return;
22713
22714+ start = ktla_ktva(start);
22715 pr_debug("Set kernel text: %lx - %lx for read only\n",
22716 start, start+size);
22717
15a11c5b 22718@@ -938,6 +945,7 @@ void mark_rodata_ro(void)
ae4e228f
MT
22719 unsigned long start = PFN_ALIGN(_text);
22720 unsigned long size = PFN_ALIGN(_etext) - start;
22721
22722+ start = ktla_ktva(start);
22723 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
22724 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
22725 size >> 10);
fe2de317
MT
22726diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
22727index bbaaa00..16dffad 100644
22728--- a/arch/x86/mm/init_64.c
22729+++ b/arch/x86/mm/init_64.c
22730@@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpages_on);
ae4e228f
MT
22731 * around without checking the pgd every time.
22732 */
22733
22734-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
22735+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
22736 EXPORT_SYMBOL_GPL(__supported_pte_mask);
22737
22738 int force_personality32;
fe2de317 22739@@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
16454cff 22740
bc901d79
MT
22741 for (address = start; address <= end; address += PGDIR_SIZE) {
22742 const pgd_t *pgd_ref = pgd_offset_k(address);
bc901d79
MT
22743+
22744+#ifdef CONFIG_PAX_PER_CPU_PGD
22745+ unsigned long cpu;
22746+#else
22747 struct page *page;
22748+#endif
22749
22750 if (pgd_none(*pgd_ref))
22751 continue;
22752
16454cff 22753 spin_lock(&pgd_lock);
bc901d79
MT
22754+
22755+#ifdef CONFIG_PAX_PER_CPU_PGD
22756+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
22757+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
22758+#else
22759 list_for_each_entry(page, &pgd_list, lru) {
22760 pgd_t *pgd;
22761 spinlock_t *pgt_lock;
fe2de317 22762@@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
16454cff 22763 /* the pgt_lock only for Xen */
bc901d79
MT
22764 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
22765 spin_lock(pgt_lock);
22766+#endif
22767
22768 if (pgd_none(*pgd))
22769 set_pgd(pgd, *pgd_ref);
fe2de317 22770@@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
bc901d79
MT
22771 BUG_ON(pgd_page_vaddr(*pgd)
22772 != pgd_page_vaddr(*pgd_ref));
22773
22774+#ifndef CONFIG_PAX_PER_CPU_PGD
22775 spin_unlock(pgt_lock);
22776+#endif
22777+
22778 }
16454cff 22779 spin_unlock(&pgd_lock);
bc901d79 22780 }
fe2de317 22781@@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
58c5fc13
MT
22782 pmd = fill_pmd(pud, vaddr);
22783 pte = fill_pte(pmd, vaddr);
22784
ae4e228f 22785+ pax_open_kernel();
58c5fc13 22786 set_pte(pte, new_pte);
ae4e228f 22787+ pax_close_kernel();
58c5fc13 22788
58c5fc13
MT
22789 /*
22790 * It's enough to flush this one mapping.
fe2de317 22791@@ -262,14 +278,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
58c5fc13
MT
22792 pgd = pgd_offset_k((unsigned long)__va(phys));
22793 if (pgd_none(*pgd)) {
22794 pud = (pud_t *) spp_getpage();
22795- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
22796- _PAGE_USER));
22797+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
22798 }
22799 pud = pud_offset(pgd, (unsigned long)__va(phys));
22800 if (pud_none(*pud)) {
22801 pmd = (pmd_t *) spp_getpage();
22802- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
22803- _PAGE_USER));
22804+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
22805 }
22806 pmd = pmd_offset(pud, phys);
22807 BUG_ON(!pmd_none(*pmd));
fe2de317 22808@@ -330,7 +344,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
6e9df6a3
MT
22809 if (pfn >= pgt_buf_top)
22810 panic("alloc_low_page: ran out of memory");
22811
22812- adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
22813+ adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
22814 clear_page(adr);
22815 *phys = pfn * PAGE_SIZE;
22816 return adr;
fe2de317 22817@@ -346,7 +360,7 @@ static __ref void *map_low_page(void *virt)
6e9df6a3
MT
22818
22819 phys = __pa(virt);
22820 left = phys & (PAGE_SIZE - 1);
22821- adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
22822+ adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
22823 adr = (void *)(((unsigned long)adr) | left);
22824
22825 return adr;
15a11c5b 22826@@ -693,6 +707,12 @@ void __init mem_init(void)
df50ba0c
MT
22827
22828 pci_iommu_alloc();
22829
22830+#ifdef CONFIG_PAX_PER_CPU_PGD
22831+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
22832+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22833+ KERNEL_PGD_PTRS);
22834+#endif
22835+
22836 /* clear_bss() already clear the empty_zero_page */
22837
22838 reservedpages = 0;
15a11c5b 22839@@ -853,8 +873,8 @@ int kern_addr_valid(unsigned long addr)
58c5fc13
MT
22840 static struct vm_area_struct gate_vma = {
22841 .vm_start = VSYSCALL_START,
22842 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
22843- .vm_page_prot = PAGE_READONLY_EXEC,
22844- .vm_flags = VM_READ | VM_EXEC
22845+ .vm_page_prot = PAGE_READONLY,
22846+ .vm_flags = VM_READ
22847 };
22848
66a7e928 22849 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
fe2de317 22850@@ -888,7 +908,7 @@ int in_gate_area_no_mm(unsigned long addr)
58c5fc13
MT
22851
22852 const char *arch_vma_name(struct vm_area_struct *vma)
22853 {
22854- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
22855+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
22856 return "[vdso]";
22857 if (vma == &gate_vma)
22858 return "[vsyscall]";
fe2de317
MT
22859diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
22860index 7b179b4..6bd1777 100644
22861--- a/arch/x86/mm/iomap_32.c
22862+++ b/arch/x86/mm/iomap_32.c
22863@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
bc901d79 22864 type = kmap_atomic_idx_push();
58c5fc13
MT
22865 idx = type + KM_TYPE_NR * smp_processor_id();
22866 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
22867+
ae4e228f 22868+ pax_open_kernel();
58c5fc13 22869 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
ae4e228f 22870+ pax_close_kernel();
58c5fc13
MT
22871+
22872 arch_flush_lazy_mmu_mode();
22873
22874 return (void *)vaddr;
fe2de317
MT
22875diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
22876index be1ef57..9680edc 100644
22877--- a/arch/x86/mm/ioremap.c
22878+++ b/arch/x86/mm/ioremap.c
22879@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
6892158b 22880 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
58c5fc13
MT
22881 int is_ram = page_is_ram(pfn);
22882
ae4e228f
MT
22883- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
22884+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
22885 return NULL;
22886 WARN_ON_ONCE(is_ram);
58c5fc13 22887 }
fe2de317 22888@@ -344,7 +344,7 @@ static int __init early_ioremap_debug_setup(char *str)
58c5fc13
MT
22889 early_param("early_ioremap_debug", early_ioremap_debug_setup);
22890
22891 static __initdata int after_paging_init;
22892-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
22893+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
22894
22895 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
22896 {
bc901d79 22897@@ -381,8 +381,7 @@ void __init early_ioremap_init(void)
58c5fc13
MT
22898 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
22899
22900 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
22901- memset(bm_pte, 0, sizeof(bm_pte));
22902- pmd_populate_kernel(&init_mm, pmd, bm_pte);
22903+ pmd_populate_user(&init_mm, pmd, bm_pte);
22904
22905 /*
22906 * The boot-ioremap range spans multiple pmds, for which
fe2de317
MT
22907diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
22908index d87dd6d..bf3fa66 100644
22909--- a/arch/x86/mm/kmemcheck/kmemcheck.c
22910+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
22911@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
ae4e228f
MT
22912 * memory (e.g. tracked pages)? For now, we need this to avoid
22913 * invoking kmemcheck for PnP BIOS calls.
22914 */
22915- if (regs->flags & X86_VM_MASK)
22916+ if (v8086_mode(regs))
22917 return false;
22918- if (regs->cs != __KERNEL_CS)
22919+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
22920 return false;
22921
22922 pte = kmemcheck_pte_lookup(address);
fe2de317
MT
22923diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
22924index 1dab519..60a7e5f 100644
22925--- a/arch/x86/mm/mmap.c
22926+++ b/arch/x86/mm/mmap.c
22927@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size(void)
ae4e228f 22928 * Leave an at least ~128 MB hole with possible stack randomization.
58c5fc13 22929 */
ae4e228f 22930 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
58c5fc13
MT
22931-#define MAX_GAP (TASK_SIZE/6*5)
22932+#define MAX_GAP (pax_task_size/6*5)
22933
22934 /*
22935 * True on X86_32 or when emulating IA32 on X86_64
ae4e228f 22936@@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
58c5fc13
MT
22937 return rnd << PAGE_SHIFT;
22938 }
22939
22940-static unsigned long mmap_base(void)
22941+static unsigned long mmap_base(struct mm_struct *mm)
22942 {
df50ba0c 22943 unsigned long gap = rlimit(RLIMIT_STACK);
58c5fc13
MT
22944+ unsigned long pax_task_size = TASK_SIZE;
22945+
22946+#ifdef CONFIG_PAX_SEGMEXEC
22947+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22948+ pax_task_size = SEGMEXEC_TASK_SIZE;
22949+#endif
22950
22951 if (gap < MIN_GAP)
22952 gap = MIN_GAP;
22953 else if (gap > MAX_GAP)
22954 gap = MAX_GAP;
22955
22956- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
22957+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
22958 }
22959
22960 /*
22961 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
22962 * does, but not when emulating X86_32
22963 */
22964-static unsigned long mmap_legacy_base(void)
22965+static unsigned long mmap_legacy_base(struct mm_struct *mm)
22966 {
22967- if (mmap_is_ia32())
22968+ if (mmap_is_ia32()) {
22969+
22970+#ifdef CONFIG_PAX_SEGMEXEC
22971+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22972+ return SEGMEXEC_TASK_UNMAPPED_BASE;
22973+ else
22974+#endif
22975+
22976 return TASK_UNMAPPED_BASE;
22977- else
22978+ } else
22979 return TASK_UNMAPPED_BASE + mmap_rnd();
22980 }
22981
fe2de317 22982@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(void)
58c5fc13
MT
22983 void arch_pick_mmap_layout(struct mm_struct *mm)
22984 {
22985 if (mmap_is_legacy()) {
22986- mm->mmap_base = mmap_legacy_base();
22987+ mm->mmap_base = mmap_legacy_base(mm);
22988+
22989+#ifdef CONFIG_PAX_RANDMMAP
22990+ if (mm->pax_flags & MF_PAX_RANDMMAP)
22991+ mm->mmap_base += mm->delta_mmap;
22992+#endif
22993+
22994 mm->get_unmapped_area = arch_get_unmapped_area;
22995 mm->unmap_area = arch_unmap_area;
22996 } else {
22997- mm->mmap_base = mmap_base();
22998+ mm->mmap_base = mmap_base(mm);
22999+
23000+#ifdef CONFIG_PAX_RANDMMAP
23001+ if (mm->pax_flags & MF_PAX_RANDMMAP)
23002+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
23003+#endif
23004+
23005 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
23006 mm->unmap_area = arch_unmap_area_topdown;
23007 }
fe2de317
MT
23008diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
23009index 67421f3..8d6b107 100644
23010--- a/arch/x86/mm/mmio-mod.c
23011+++ b/arch/x86/mm/mmio-mod.c
23012@@ -195,7 +195,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
15a11c5b
MT
23013 break;
23014 default:
23015 {
23016- unsigned char *ip = (unsigned char *)instptr;
23017+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
23018 my_trace->opcode = MMIO_UNKNOWN_OP;
23019 my_trace->width = 0;
23020 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
fe2de317 23021@@ -235,7 +235,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
8308f9c9
MT
23022 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
23023 void __iomem *addr)
23024 {
23025- static atomic_t next_id;
23026+ static atomic_unchecked_t next_id;
23027 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
23028 /* These are page-unaligned. */
23029 struct mmiotrace_map map = {
fe2de317 23030@@ -259,7 +259,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
8308f9c9
MT
23031 .private = trace
23032 },
23033 .phys = offset,
23034- .id = atomic_inc_return(&next_id)
23035+ .id = atomic_inc_return_unchecked(&next_id)
23036 };
23037 map.map_id = trace->id;
23038
fe2de317
MT
23039diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
23040index b008656..773eac2 100644
23041--- a/arch/x86/mm/pageattr-test.c
23042+++ b/arch/x86/mm/pageattr-test.c
23043@@ -36,7 +36,7 @@ enum {
23044
23045 static int pte_testbit(pte_t pte)
23046 {
23047- return pte_flags(pte) & _PAGE_UNUSED1;
23048+ return pte_flags(pte) & _PAGE_CPA_TEST;
23049 }
23050
23051 struct split_state {
23052diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
23053index f9e5267..6f6e27f 100644
23054--- a/arch/x86/mm/pageattr.c
23055+++ b/arch/x86/mm/pageattr.c
23056@@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
df50ba0c 23057 */
16454cff
MT
23058 #ifdef CONFIG_PCI_BIOS
23059 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
df50ba0c
MT
23060- pgprot_val(forbidden) |= _PAGE_NX;
23061+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
16454cff 23062 #endif
df50ba0c
MT
23063
23064 /*
fe2de317 23065@@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
58c5fc13
MT
23066 * Does not cover __inittext since that is gone later on. On
23067 * 64bit we do not enforce !NX on the low mapping
23068 */
23069- if (within(address, (unsigned long)_text, (unsigned long)_etext))
df50ba0c 23070- pgprot_val(forbidden) |= _PAGE_NX;
58c5fc13 23071+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
df50ba0c 23072+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
58c5fc13
MT
23073
23074+#ifdef CONFIG_DEBUG_RODATA
23075 /*
23076 * The .rodata section needs to be read-only. Using the pfn
23077 * catches all aliases.
fe2de317 23078@@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
58c5fc13
MT
23079 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
23080 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
23081 pgprot_val(forbidden) |= _PAGE_RW;
23082+#endif
23083
ae4e228f
MT
23084 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
23085 /*
fe2de317 23086@@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
df50ba0c
MT
23087 }
23088 #endif
23089
23090+#ifdef CONFIG_PAX_KERNEXEC
23091+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
23092+ pgprot_val(forbidden) |= _PAGE_RW;
23093+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23094+ }
23095+#endif
23096+
23097 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
23098
23099 return prot;
16454cff 23100@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
58c5fc13
MT
23101 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
23102 {
58c5fc13 23103 /* change init_mm */
ae4e228f 23104+ pax_open_kernel();
58c5fc13 23105 set_pte_atomic(kpte, pte);
58c5fc13
MT
23106+
23107 #ifdef CONFIG_X86_32
23108 if (!SHARED_KERNEL_PMD) {
df50ba0c
MT
23109+
23110+#ifdef CONFIG_PAX_PER_CPU_PGD
23111+ unsigned long cpu;
23112+#else
58c5fc13 23113 struct page *page;
df50ba0c
MT
23114+#endif
23115
23116+#ifdef CONFIG_PAX_PER_CPU_PGD
23117+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
23118+ pgd_t *pgd = get_cpu_pgd(cpu);
23119+#else
23120 list_for_each_entry(page, &pgd_list, lru) {
23121- pgd_t *pgd;
23122+ pgd_t *pgd = (pgd_t *)page_address(page);
23123+#endif
23124+
23125 pud_t *pud;
23126 pmd_t *pmd;
23127
23128- pgd = (pgd_t *)page_address(page) + pgd_index(address);
23129+ pgd += pgd_index(address);
23130 pud = pud_offset(pgd, address);
23131 pmd = pmd_offset(pud, address);
23132 set_pte_atomic((pte_t *)pmd, pte);
23133 }
23134 }
23135 #endif
23136+ pax_close_kernel();
23137 }
23138
23139 static int
fe2de317
MT
23140diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
23141index f6ff57b..481690f 100644
23142--- a/arch/x86/mm/pat.c
23143+++ b/arch/x86/mm/pat.c
57199397
MT
23144@@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
23145
23146 if (!entry) {
58c5fc13
MT
23147 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
23148- current->comm, current->pid, start, end);
23149+ current->comm, task_pid_nr(current), start, end);
57199397 23150 return -EINVAL;
58c5fc13
MT
23151 }
23152
fe2de317 23153@@ -492,8 +492,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
57199397
MT
23154 while (cursor < to) {
23155 if (!devmem_is_allowed(pfn)) {
23156 printk(KERN_INFO
23157- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
23158- current->comm, from, to);
23159+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
23160+ current->comm, from, to, cursor);
23161 return 0;
23162 }
23163 cursor += PAGE_SIZE;
fe2de317 23164@@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
58c5fc13
MT
23165 printk(KERN_INFO
23166 "%s:%d ioremap_change_attr failed %s "
23167 "for %Lx-%Lx\n",
23168- current->comm, current->pid,
23169+ current->comm, task_pid_nr(current),
23170 cattr_name(flags),
23171 base, (unsigned long long)(base + size));
23172 return -EINVAL;
fe2de317 23173@@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
57199397
MT
23174 if (want_flags != flags) {
23175 printk(KERN_WARNING
23176 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
23177- current->comm, current->pid,
23178+ current->comm, task_pid_nr(current),
23179 cattr_name(want_flags),
23180 (unsigned long long)paddr,
23181 (unsigned long long)(paddr + size),
fe2de317 23182@@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
58c5fc13
MT
23183 free_memtype(paddr, paddr + size);
23184 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
23185 " for %Lx-%Lx, got %s\n",
23186- current->comm, current->pid,
23187+ current->comm, task_pid_nr(current),
23188 cattr_name(want_flags),
23189 (unsigned long long)paddr,
23190 (unsigned long long)(paddr + size),
fe2de317
MT
23191diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
23192index 9f0614d..92ae64a 100644
23193--- a/arch/x86/mm/pf_in.c
23194+++ b/arch/x86/mm/pf_in.c
23195@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
15a11c5b
MT
23196 int i;
23197 enum reason_type rv = OTHERS;
23198
23199- p = (unsigned char *)ins_addr;
23200+ p = (unsigned char *)ktla_ktva(ins_addr);
23201 p += skip_prefix(p, &prf);
23202 p += get_opcode(p, &opcode);
23203
fe2de317 23204@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
15a11c5b
MT
23205 struct prefix_bits prf;
23206 int i;
23207
23208- p = (unsigned char *)ins_addr;
23209+ p = (unsigned char *)ktla_ktva(ins_addr);
23210 p += skip_prefix(p, &prf);
23211 p += get_opcode(p, &opcode);
23212
fe2de317 23213@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
15a11c5b
MT
23214 struct prefix_bits prf;
23215 int i;
23216
23217- p = (unsigned char *)ins_addr;
23218+ p = (unsigned char *)ktla_ktva(ins_addr);
23219 p += skip_prefix(p, &prf);
23220 p += get_opcode(p, &opcode);
23221
fe2de317 23222@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
15a11c5b
MT
23223 struct prefix_bits prf;
23224 int i;
23225
23226- p = (unsigned char *)ins_addr;
23227+ p = (unsigned char *)ktla_ktva(ins_addr);
23228 p += skip_prefix(p, &prf);
23229 p += get_opcode(p, &opcode);
23230 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
fe2de317 23231@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
15a11c5b
MT
23232 struct prefix_bits prf;
23233 int i;
23234
23235- p = (unsigned char *)ins_addr;
23236+ p = (unsigned char *)ktla_ktva(ins_addr);
23237 p += skip_prefix(p, &prf);
23238 p += get_opcode(p, &opcode);
23239 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
fe2de317
MT
23240diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
23241index 8573b83..6372501 100644
23242--- a/arch/x86/mm/pgtable.c
23243+++ b/arch/x86/mm/pgtable.c
23244@@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *pgd)
df50ba0c
MT
23245 list_del(&page->lru);
23246 }
23247
23248-#define UNSHARED_PTRS_PER_PGD \
23249- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
23250+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23251+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
bc901d79 23252
df50ba0c
MT
23253+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
23254+{
23255+ while (count--)
bc901d79 23256+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
df50ba0c
MT
23257+}
23258+#endif
fe2de317 23259
df50ba0c
MT
23260+#ifdef CONFIG_PAX_PER_CPU_PGD
23261+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
23262+{
23263+ while (count--)
23264+
23265+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23266+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
23267+#else
23268+ *dst++ = *src++;
23269+#endif
fe2de317 23270+
df50ba0c
MT
23271+}
23272+#endif
23273+
df50ba0c
MT
23274+#ifdef CONFIG_X86_64
23275+#define pxd_t pud_t
23276+#define pyd_t pgd_t
23277+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
23278+#define pxd_free(mm, pud) pud_free((mm), (pud))
23279+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
23280+#define pyd_offset(mm ,address) pgd_offset((mm), (address))
23281+#define PYD_SIZE PGDIR_SIZE
23282+#else
23283+#define pxd_t pmd_t
23284+#define pyd_t pud_t
23285+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
23286+#define pxd_free(mm, pud) pmd_free((mm), (pud))
23287+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
23288+#define pyd_offset(mm ,address) pud_offset((mm), (address))
23289+#define PYD_SIZE PUD_SIZE
23290+#endif
66a7e928
MT
23291+
23292+#ifdef CONFIG_PAX_PER_CPU_PGD
23293+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
23294+static inline void pgd_dtor(pgd_t *pgd) {}
df50ba0c 23295+#else
bc901d79 23296 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
df50ba0c 23297 {
66a7e928
MT
23298 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
23299@@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
df50ba0c 23300 pgd_list_del(pgd);
16454cff 23301 spin_unlock(&pgd_lock);
df50ba0c
MT
23302 }
23303+#endif
23304
23305 /*
23306 * List of all pgd's needed for non-PAE so it can invalidate entries
66a7e928 23307@@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
df50ba0c
MT
23308 * -- wli
23309 */
23310
23311-#ifdef CONFIG_X86_PAE
23312+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
23313 /*
23314 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
23315 * updating the top-level pagetable entries to guarantee the
66a7e928 23316@@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
df50ba0c
MT
23317 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
23318 * and initialize the kernel pmds here.
23319 */
23320-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
23321+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
23322
23323 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
23324 {
fe2de317 23325@@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
16454cff
MT
23326 */
23327 flush_tlb_mm(mm);
df50ba0c
MT
23328 }
23329+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
23330+#define PREALLOCATED_PXDS USER_PGD_PTRS
23331 #else /* !CONFIG_X86_PAE */
23332
23333 /* No need to prepopulate any pagetable entries in non-PAE modes. */
23334-#define PREALLOCATED_PMDS 0
23335+#define PREALLOCATED_PXDS 0
23336
23337 #endif /* CONFIG_X86_PAE */
23338
23339-static void free_pmds(pmd_t *pmds[])
23340+static void free_pxds(pxd_t *pxds[])
23341 {
23342 int i;
23343
23344- for(i = 0; i < PREALLOCATED_PMDS; i++)
23345- if (pmds[i])
23346- free_page((unsigned long)pmds[i]);
23347+ for(i = 0; i < PREALLOCATED_PXDS; i++)
23348+ if (pxds[i])
23349+ free_page((unsigned long)pxds[i]);
23350 }
23351
23352-static int preallocate_pmds(pmd_t *pmds[])
23353+static int preallocate_pxds(pxd_t *pxds[])
23354 {
23355 int i;
23356 bool failed = false;
23357
23358- for(i = 0; i < PREALLOCATED_PMDS; i++) {
23359- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
23360- if (pmd == NULL)
23361+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
23362+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
23363+ if (pxd == NULL)
23364 failed = true;
23365- pmds[i] = pmd;
23366+ pxds[i] = pxd;
23367 }
23368
23369 if (failed) {
23370- free_pmds(pmds);
23371+ free_pxds(pxds);
23372 return -ENOMEM;
23373 }
23374
fe2de317 23375@@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[])
df50ba0c
MT
23376 * preallocate which never got a corresponding vma will need to be
23377 * freed manually.
23378 */
23379-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
23380+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
23381 {
23382 int i;
23383
23384- for(i = 0; i < PREALLOCATED_PMDS; i++) {
23385+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
23386 pgd_t pgd = pgdp[i];
23387
23388 if (pgd_val(pgd) != 0) {
23389- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
23390+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
23391
23392- pgdp[i] = native_make_pgd(0);
23393+ set_pgd(pgdp + i, native_make_pgd(0));
23394
23395- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
23396- pmd_free(mm, pmd);
23397+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
23398+ pxd_free(mm, pxd);
23399 }
23400 }
23401 }
23402
23403-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
23404+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
23405 {
23406- pud_t *pud;
23407+ pyd_t *pyd;
23408 unsigned long addr;
23409 int i;
23410
23411- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
23412+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
23413 return;
23414
23415- pud = pud_offset(pgd, 0);
23416+#ifdef CONFIG_X86_64
23417+ pyd = pyd_offset(mm, 0L);
23418+#else
23419+ pyd = pyd_offset(pgd, 0L);
23420+#endif
23421
23422- for (addr = i = 0; i < PREALLOCATED_PMDS;
23423- i++, pud++, addr += PUD_SIZE) {
23424- pmd_t *pmd = pmds[i];
23425+ for (addr = i = 0; i < PREALLOCATED_PXDS;
23426+ i++, pyd++, addr += PYD_SIZE) {
23427+ pxd_t *pxd = pxds[i];
23428
23429 if (i >= KERNEL_PGD_BOUNDARY)
23430- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
23431- sizeof(pmd_t) * PTRS_PER_PMD);
23432+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
23433+ sizeof(pxd_t) * PTRS_PER_PMD);
23434
23435- pud_populate(mm, pud, pmd);
23436+ pyd_populate(mm, pyd, pxd);
23437 }
23438 }
23439
23440 pgd_t *pgd_alloc(struct mm_struct *mm)
23441 {
23442 pgd_t *pgd;
23443- pmd_t *pmds[PREALLOCATED_PMDS];
23444+ pxd_t *pxds[PREALLOCATED_PXDS];
df50ba0c
MT
23445
23446 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
16454cff 23447
66a7e928 23448@@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
df50ba0c
MT
23449
23450 mm->pgd = pgd;
23451
23452- if (preallocate_pmds(pmds) != 0)
23453+ if (preallocate_pxds(pxds) != 0)
23454 goto out_free_pgd;
23455
23456 if (paravirt_pgd_alloc(mm) != 0)
23457- goto out_free_pmds;
23458+ goto out_free_pxds;
23459
23460 /*
23461 * Make sure that pre-populating the pmds is atomic with
66a7e928 23462@@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
16454cff 23463 spin_lock(&pgd_lock);
df50ba0c 23464
bc901d79 23465 pgd_ctor(mm, pgd);
df50ba0c
MT
23466- pgd_prepopulate_pmd(mm, pgd, pmds);
23467+ pgd_prepopulate_pxd(mm, pgd, pxds);
23468
16454cff 23469 spin_unlock(&pgd_lock);
df50ba0c
MT
23470
23471 return pgd;
23472
23473-out_free_pmds:
23474- free_pmds(pmds);
23475+out_free_pxds:
23476+ free_pxds(pxds);
23477 out_free_pgd:
23478 free_page((unsigned long)pgd);
23479 out:
66a7e928 23480@@ -295,7 +344,7 @@ out:
df50ba0c
MT
23481
23482 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
23483 {
23484- pgd_mop_up_pmds(mm, pgd);
23485+ pgd_mop_up_pxds(mm, pgd);
23486 pgd_dtor(pgd);
23487 paravirt_pgd_free(mm, pgd);
23488 free_page((unsigned long)pgd);
fe2de317
MT
23489diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
23490index cac7184..09a39fa 100644
23491--- a/arch/x86/mm/pgtable_32.c
23492+++ b/arch/x86/mm/pgtable_32.c
23493@@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
23494 return;
23495 }
23496 pte = pte_offset_kernel(pmd, vaddr);
23497+
23498+ pax_open_kernel();
23499 if (pte_val(pteval))
23500 set_pte_at(&init_mm, vaddr, pte, pteval);
23501 else
23502 pte_clear(&init_mm, vaddr, pte);
23503+ pax_close_kernel();
23504
23505 /*
23506 * It's enough to flush this one mapping.
23507diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
23508index 410531d..0f16030 100644
23509--- a/arch/x86/mm/setup_nx.c
23510+++ b/arch/x86/mm/setup_nx.c
efbe55a5 23511@@ -5,8 +5,10 @@
df50ba0c
MT
23512 #include <asm/pgtable.h>
23513 #include <asm/proto.h>
23514
23515+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23516 static int disable_nx __cpuinitdata;
df50ba0c 23517
efbe55a5 23518+#ifndef CONFIG_PAX_PAGEEXEC
df50ba0c
MT
23519 /*
23520 * noexec = on|off
23521 *
fe2de317 23522@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
df50ba0c
MT
23523 return 0;
23524 }
23525 early_param("noexec", noexec_setup);
efbe55a5
MT
23526+#endif
23527+
df50ba0c
MT
23528+#endif
23529
23530 void __cpuinit x86_configure_nx(void)
23531 {
23532+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23533 if (cpu_has_nx && !disable_nx)
23534 __supported_pte_mask |= _PAGE_NX;
23535 else
23536+#endif
23537 __supported_pte_mask &= ~_PAGE_NX;
23538 }
23539
fe2de317
MT
23540diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
23541index d6c0418..06a0ad5 100644
23542--- a/arch/x86/mm/tlb.c
23543+++ b/arch/x86/mm/tlb.c
bc901d79 23544@@ -65,7 +65,11 @@ void leave_mm(int cpu)
df50ba0c
MT
23545 BUG();
23546 cpumask_clear_cpu(cpu,
23547 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
23548+
23549+#ifndef CONFIG_PAX_PER_CPU_PGD
23550 load_cr3(swapper_pg_dir);
23551+#endif
23552+
23553 }
23554 EXPORT_SYMBOL_GPL(leave_mm);
23555
fe2de317
MT
23556diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
23557index 6687022..ceabcfa 100644
23558--- a/arch/x86/net/bpf_jit.S
23559+++ b/arch/x86/net/bpf_jit.S
6e9df6a3
MT
23560@@ -9,6 +9,7 @@
23561 */
23562 #include <linux/linkage.h>
23563 #include <asm/dwarf2.h>
23564+#include <asm/alternative-asm.h>
23565
23566 /*
23567 * Calling convention :
23568@@ -35,6 +36,7 @@ sk_load_word:
23569 jle bpf_slow_path_word
23570 mov (SKBDATA,%rsi),%eax
23571 bswap %eax /* ntohl() */
23572+ pax_force_retaddr
23573 ret
23574
23575
23576@@ -53,6 +55,7 @@ sk_load_half:
23577 jle bpf_slow_path_half
23578 movzwl (SKBDATA,%rsi),%eax
23579 rol $8,%ax # ntohs()
23580+ pax_force_retaddr
23581 ret
23582
23583 sk_load_byte_ind:
23584@@ -66,6 +69,7 @@ sk_load_byte:
23585 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
23586 jle bpf_slow_path_byte
23587 movzbl (SKBDATA,%rsi),%eax
23588+ pax_force_retaddr
23589 ret
23590
23591 /**
23592@@ -82,6 +86,7 @@ ENTRY(sk_load_byte_msh)
23593 movzbl (SKBDATA,%rsi),%ebx
23594 and $15,%bl
23595 shl $2,%bl
23596+ pax_force_retaddr
23597 ret
23598 CFI_ENDPROC
23599 ENDPROC(sk_load_byte_msh)
23600@@ -91,6 +96,7 @@ bpf_error:
23601 xor %eax,%eax
23602 mov -8(%rbp),%rbx
23603 leaveq
23604+ pax_force_retaddr
23605 ret
23606
23607 /* rsi contains offset and can be scratched */
23608@@ -113,6 +119,7 @@ bpf_slow_path_word:
23609 js bpf_error
23610 mov -12(%rbp),%eax
23611 bswap %eax
23612+ pax_force_retaddr
23613 ret
23614
23615 bpf_slow_path_half:
23616@@ -121,12 +128,14 @@ bpf_slow_path_half:
23617 mov -12(%rbp),%ax
23618 rol $8,%ax
23619 movzwl %ax,%eax
23620+ pax_force_retaddr
23621 ret
23622
23623 bpf_slow_path_byte:
23624 bpf_slow_path_common(1)
23625 js bpf_error
23626 movzbl -12(%rbp),%eax
23627+ pax_force_retaddr
23628 ret
23629
23630 bpf_slow_path_byte_msh:
23631@@ -137,4 +146,5 @@ bpf_slow_path_byte_msh:
23632 and $15,%al
23633 shl $2,%al
23634 xchg %eax,%ebx
23635+ pax_force_retaddr
23636 ret
fe2de317
MT
23637diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
23638index bfab3fa..05aac3a 100644
23639--- a/arch/x86/net/bpf_jit_comp.c
23640+++ b/arch/x86/net/bpf_jit_comp.c
23641@@ -117,6 +117,10 @@ static inline void bpf_flush_icache(void *start, void *end)
23642 set_fs(old_fs);
23643 }
23644
23645+struct bpf_jit_work {
23646+ struct work_struct work;
23647+ void *image;
23648+};
23649
23650 void bpf_jit_compile(struct sk_filter *fp)
23651 {
23652@@ -141,6 +145,10 @@ void bpf_jit_compile(struct sk_filter *fp)
23653 if (addrs == NULL)
23654 return;
23655
23656+ fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
23657+ if (!fp->work)
23658+ goto out;
23659+
23660 /* Before first pass, make a rough estimation of addrs[]
23661 * each bpf instruction is translated to less than 64 bytes
23662 */
23663@@ -585,11 +593,12 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
23664 if (image) {
23665 if (unlikely(proglen + ilen > oldproglen)) {
23666 pr_err("bpb_jit_compile fatal error\n");
23667- kfree(addrs);
23668- module_free(NULL, image);
23669- return;
23670+ module_free_exec(NULL, image);
23671+ goto out;
23672 }
23673+ pax_open_kernel();
23674 memcpy(image + proglen, temp, ilen);
23675+ pax_close_kernel();
23676 }
23677 proglen += ilen;
23678 addrs[i] = proglen;
23679@@ -609,7 +618,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
23680 break;
23681 }
23682 if (proglen == oldproglen) {
23683- image = module_alloc(max_t(unsigned int,
23684+ image = module_alloc_exec(max_t(unsigned int,
23685 proglen,
23686 sizeof(struct work_struct)));
23687 if (!image)
23688@@ -631,24 +640,27 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
23689 fp->bpf_func = (void *)image;
23690 }
23691 out:
23692+ kfree(fp->work);
23693 kfree(addrs);
23694 return;
23695 }
23696
23697 static void jit_free_defer(struct work_struct *arg)
23698 {
23699- module_free(NULL, arg);
23700+ module_free_exec(NULL, ((struct bpf_jit_work*)arg)->image);
23701+ kfree(arg);
23702 }
23703
23704 /* run from softirq, we must use a work_struct to call
23705- * module_free() from process context
23706+ * module_free_exec() from process context
23707 */
23708 void bpf_jit_free(struct sk_filter *fp)
23709 {
23710 if (fp->bpf_func != sk_run_filter) {
23711- struct work_struct *work = (struct work_struct *)fp->bpf_func;
23712+ struct work_struct *work = &fp->work->work;
23713
23714 INIT_WORK(work, jit_free_defer);
23715+ fp->work->image = fp->bpf_func;
23716 schedule_work(work);
23717 }
23718 }
23719diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
23720index bff89df..377758a 100644
23721--- a/arch/x86/oprofile/backtrace.c
23722+++ b/arch/x86/oprofile/backtrace.c
23723@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
6e9df6a3
MT
23724 struct stack_frame_ia32 *fp;
23725 unsigned long bytes;
23726
23727- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
23728+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
23729 if (bytes != sizeof(bufhead))
23730 return NULL;
23731
23732- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
23733+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
23734
23735 oprofile_add_trace(bufhead[0].return_address);
23736
fe2de317 23737@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
6e9df6a3
MT
23738 struct stack_frame bufhead[2];
23739 unsigned long bytes;
23740
23741- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
23742+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
23743 if (bytes != sizeof(bufhead))
23744 return NULL;
23745
fe2de317 23746@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
58c5fc13 23747 {
bc901d79 23748 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
58c5fc13
MT
23749
23750- if (!user_mode_vm(regs)) {
23751+ if (!user_mode(regs)) {
23752 unsigned long stack = kernel_stack_pointer(regs);
23753 if (depth)
66a7e928 23754 dump_trace(NULL, regs, (unsigned long *)stack, 0,
fe2de317
MT
23755diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
23756index cb29191..036766d 100644
23757--- a/arch/x86/pci/mrst.c
23758+++ b/arch/x86/pci/mrst.c
15a11c5b
MT
23759@@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
23760 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
23761 pci_mmcfg_late_init();
23762 pcibios_enable_irq = mrst_pci_irq_enable;
23763- pci_root_ops = pci_mrst_ops;
23764+ pax_open_kernel();
23765+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
23766+ pax_close_kernel();
23767 /* Continue with standard init */
23768 return 1;
ae4e228f 23769 }
fe2de317
MT
23770diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
23771index f685535..2b76a81 100644
23772--- a/arch/x86/pci/pcbios.c
23773+++ b/arch/x86/pci/pcbios.c
16454cff 23774@@ -79,50 +79,93 @@ union bios32 {
58c5fc13
MT
23775 static struct {
23776 unsigned long address;
23777 unsigned short segment;
23778-} bios32_indirect = { 0, __KERNEL_CS };
23779+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
23780
23781 /*
23782 * Returns the entry point for the given service, NULL on error
23783 */
23784
23785-static unsigned long bios32_service(unsigned long service)
23786+static unsigned long __devinit bios32_service(unsigned long service)
23787 {
23788 unsigned char return_code; /* %al */
23789 unsigned long address; /* %ebx */
23790 unsigned long length; /* %ecx */
23791 unsigned long entry; /* %edx */
23792 unsigned long flags;
23793+ struct desc_struct d, *gdt;
58c5fc13
MT
23794
23795 local_irq_save(flags);
23796- __asm__("lcall *(%%edi); cld"
23797+
23798+ gdt = get_cpu_gdt_table(smp_processor_id());
23799+
58c5fc13
MT
23800+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
23801+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23802+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
23803+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23804+
58c5fc13
MT
23805+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
23806 : "=a" (return_code),
23807 "=b" (address),
23808 "=c" (length),
23809 "=d" (entry)
23810 : "0" (service),
23811 "1" (0),
23812- "D" (&bios32_indirect));
23813+ "D" (&bios32_indirect),
23814+ "r"(__PCIBIOS_DS)
23815+ : "memory");
23816+
ae4e228f 23817+ pax_open_kernel();
58c5fc13
MT
23818+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
23819+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
23820+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
23821+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
ae4e228f 23822+ pax_close_kernel();
58c5fc13
MT
23823+
23824 local_irq_restore(flags);
23825
23826 switch (return_code) {
23827- case 0:
23828- return address + entry;
23829- case 0x80: /* Not present */
23830- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23831- return 0;
23832- default: /* Shouldn't happen */
23833- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23834- service, return_code);
23835+ case 0: {
23836+ int cpu;
23837+ unsigned char flags;
23838+
23839+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
23840+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
23841+ printk(KERN_WARNING "bios32_service: not valid\n");
23842 return 0;
23843+ }
23844+ address = address + PAGE_OFFSET;
23845+ length += 16UL; /* some BIOSs underreport this... */
23846+ flags = 4;
23847+ if (length >= 64*1024*1024) {
23848+ length >>= PAGE_SHIFT;
23849+ flags |= 8;
23850+ }
23851+
58c5fc13
MT
23852+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
23853+ gdt = get_cpu_gdt_table(cpu);
23854+ pack_descriptor(&d, address, length, 0x9b, flags);
23855+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23856+ pack_descriptor(&d, address, length, 0x93, flags);
23857+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23858+ }
58c5fc13
MT
23859+ return entry;
23860+ }
23861+ case 0x80: /* Not present */
23862+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23863+ return 0;
23864+ default: /* Shouldn't happen */
23865+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23866+ service, return_code);
23867+ return 0;
23868 }
23869 }
23870
23871 static struct {
23872 unsigned long address;
23873 unsigned short segment;
23874-} pci_indirect = { 0, __KERNEL_CS };
23875+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
23876
23877-static int pci_bios_present;
23878+static int pci_bios_present __read_only;
23879
23880 static int __devinit check_pcibios(void)
23881 {
16454cff 23882@@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
58c5fc13
MT
23883 unsigned long flags, pcibios_entry;
23884
23885 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
23886- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
23887+ pci_indirect.address = pcibios_entry;
23888
23889 local_irq_save(flags);
23890- __asm__(
23891- "lcall *(%%edi); cld\n\t"
23892+ __asm__("movw %w6, %%ds\n\t"
23893+ "lcall *%%ss:(%%edi); cld\n\t"
23894+ "push %%ss\n\t"
23895+ "pop %%ds\n\t"
23896 "jc 1f\n\t"
23897 "xor %%ah, %%ah\n"
23898 "1:"
16454cff 23899@@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
58c5fc13
MT
23900 "=b" (ebx),
23901 "=c" (ecx)
23902 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
23903- "D" (&pci_indirect)
23904+ "D" (&pci_indirect),
23905+ "r" (__PCIBIOS_DS)
23906 : "memory");
23907 local_irq_restore(flags);
23908
fe2de317 23909@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
58c5fc13
MT
23910
23911 switch (len) {
23912 case 1:
23913- __asm__("lcall *(%%esi); cld\n\t"
23914+ __asm__("movw %w6, %%ds\n\t"
23915+ "lcall *%%ss:(%%esi); cld\n\t"
23916+ "push %%ss\n\t"
23917+ "pop %%ds\n\t"
23918 "jc 1f\n\t"
23919 "xor %%ah, %%ah\n"
23920 "1:"
fe2de317 23921@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
58c5fc13
MT
23922 : "1" (PCIBIOS_READ_CONFIG_BYTE),
23923 "b" (bx),
23924 "D" ((long)reg),
23925- "S" (&pci_indirect));
23926+ "S" (&pci_indirect),
23927+ "r" (__PCIBIOS_DS));
23928 /*
23929 * Zero-extend the result beyond 8 bits, do not trust the
23930 * BIOS having done it:
fe2de317 23931@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
58c5fc13
MT
23932 *value &= 0xff;
23933 break;
23934 case 2:
23935- __asm__("lcall *(%%esi); cld\n\t"
23936+ __asm__("movw %w6, %%ds\n\t"
23937+ "lcall *%%ss:(%%esi); cld\n\t"
23938+ "push %%ss\n\t"
23939+ "pop %%ds\n\t"
23940 "jc 1f\n\t"
23941 "xor %%ah, %%ah\n"
23942 "1:"
fe2de317 23943@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
58c5fc13
MT
23944 : "1" (PCIBIOS_READ_CONFIG_WORD),
23945 "b" (bx),
23946 "D" ((long)reg),
23947- "S" (&pci_indirect));
23948+ "S" (&pci_indirect),
23949+ "r" (__PCIBIOS_DS));
23950 /*
23951 * Zero-extend the result beyond 16 bits, do not trust the
23952 * BIOS having done it:
fe2de317 23953@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
58c5fc13
MT
23954 *value &= 0xffff;
23955 break;
23956 case 4:
23957- __asm__("lcall *(%%esi); cld\n\t"
23958+ __asm__("movw %w6, %%ds\n\t"
23959+ "lcall *%%ss:(%%esi); cld\n\t"
23960+ "push %%ss\n\t"
23961+ "pop %%ds\n\t"
23962 "jc 1f\n\t"
23963 "xor %%ah, %%ah\n"
23964 "1:"
fe2de317 23965@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
58c5fc13
MT
23966 : "1" (PCIBIOS_READ_CONFIG_DWORD),
23967 "b" (bx),
23968 "D" ((long)reg),
23969- "S" (&pci_indirect));
23970+ "S" (&pci_indirect),
23971+ "r" (__PCIBIOS_DS));
23972 break;
23973 }
23974
fe2de317 23975@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
58c5fc13
MT
23976
23977 switch (len) {
23978 case 1:
23979- __asm__("lcall *(%%esi); cld\n\t"
23980+ __asm__("movw %w6, %%ds\n\t"
23981+ "lcall *%%ss:(%%esi); cld\n\t"
23982+ "push %%ss\n\t"
23983+ "pop %%ds\n\t"
23984 "jc 1f\n\t"
23985 "xor %%ah, %%ah\n"
23986 "1:"
fe2de317 23987@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
58c5fc13
MT
23988 "c" (value),
23989 "b" (bx),
23990 "D" ((long)reg),
23991- "S" (&pci_indirect));
23992+ "S" (&pci_indirect),
23993+ "r" (__PCIBIOS_DS));
23994 break;
23995 case 2:
23996- __asm__("lcall *(%%esi); cld\n\t"
23997+ __asm__("movw %w6, %%ds\n\t"
23998+ "lcall *%%ss:(%%esi); cld\n\t"
23999+ "push %%ss\n\t"
24000+ "pop %%ds\n\t"
24001 "jc 1f\n\t"
24002 "xor %%ah, %%ah\n"
24003 "1:"
fe2de317 24004@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
58c5fc13
MT
24005 "c" (value),
24006 "b" (bx),
24007 "D" ((long)reg),
24008- "S" (&pci_indirect));
24009+ "S" (&pci_indirect),
24010+ "r" (__PCIBIOS_DS));
24011 break;
24012 case 4:
24013- __asm__("lcall *(%%esi); cld\n\t"
24014+ __asm__("movw %w6, %%ds\n\t"
24015+ "lcall *%%ss:(%%esi); cld\n\t"
24016+ "push %%ss\n\t"
24017+ "pop %%ds\n\t"
24018 "jc 1f\n\t"
24019 "xor %%ah, %%ah\n"
24020 "1:"
fe2de317 24021@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
58c5fc13
MT
24022 "c" (value),
24023 "b" (bx),
24024 "D" ((long)reg),
24025- "S" (&pci_indirect));
24026+ "S" (&pci_indirect),
24027+ "r" (__PCIBIOS_DS));
24028 break;
24029 }
24030
fe2de317 24031@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
58c5fc13
MT
24032
24033 DBG("PCI: Fetching IRQ routing table... ");
24034 __asm__("push %%es\n\t"
24035+ "movw %w8, %%ds\n\t"
24036 "push %%ds\n\t"
24037 "pop %%es\n\t"
24038- "lcall *(%%esi); cld\n\t"
24039+ "lcall *%%ss:(%%esi); cld\n\t"
24040 "pop %%es\n\t"
24041+ "push %%ss\n\t"
24042+ "pop %%ds\n"
24043 "jc 1f\n\t"
24044 "xor %%ah, %%ah\n"
24045 "1:"
fe2de317 24046@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
58c5fc13
MT
24047 "1" (0),
24048 "D" ((long) &opt),
24049 "S" (&pci_indirect),
24050- "m" (opt)
24051+ "m" (opt),
24052+ "r" (__PCIBIOS_DS)
24053 : "memory");
24054 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
24055 if (ret & 0xff00)
fe2de317 24056@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
58c5fc13
MT
24057 {
24058 int ret;
24059
24060- __asm__("lcall *(%%esi); cld\n\t"
24061+ __asm__("movw %w5, %%ds\n\t"
24062+ "lcall *%%ss:(%%esi); cld\n\t"
24063+ "push %%ss\n\t"
24064+ "pop %%ds\n"
24065 "jc 1f\n\t"
24066 "xor %%ah, %%ah\n"
24067 "1:"
fe2de317 24068@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
58c5fc13
MT
24069 : "0" (PCIBIOS_SET_PCI_HW_INT),
24070 "b" ((dev->bus->number << 8) | dev->devfn),
24071 "c" ((irq << 8) | (pin + 10)),
24072- "S" (&pci_indirect));
24073+ "S" (&pci_indirect),
24074+ "r" (__PCIBIOS_DS));
24075 return !(ret & 0xff00);
24076 }
24077 EXPORT_SYMBOL(pcibios_set_irq_routing);
fe2de317
MT
24078diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
24079index 5cab48e..b025f9b 100644
24080--- a/arch/x86/platform/efi/efi_32.c
24081+++ b/arch/x86/platform/efi/efi_32.c
15a11c5b 24082@@ -38,70 +38,56 @@
bc901d79
MT
24083 */
24084
24085 static unsigned long efi_rt_eflags;
24086-static pgd_t efi_bak_pg_dir_pointer[2];
24087+static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
24088
24089-void efi_call_phys_prelog(void)
24090+void __init efi_call_phys_prelog(void)
24091 {
24092- unsigned long cr4;
24093- unsigned long temp;
24094 struct desc_ptr gdt_descr;
24095
15a11c5b
MT
24096+#ifdef CONFIG_PAX_KERNEXEC
24097+ struct desc_struct d;
24098+#endif
fe2de317
MT
24099+
24100 local_irq_save(efi_rt_eflags);
bc901d79
MT
24101
24102- /*
24103- * If I don't have PAE, I should just duplicate two entries in page
24104- * directory. If I have PAE, I just need to duplicate one entry in
24105- * page directory.
24106- */
24107- cr4 = read_cr4_safe();
fe2de317 24108-
bc901d79
MT
24109- if (cr4 & X86_CR4_PAE) {
24110- efi_bak_pg_dir_pointer[0].pgd =
24111- swapper_pg_dir[pgd_index(0)].pgd;
24112- swapper_pg_dir[0].pgd =
24113- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
24114- } else {
24115- efi_bak_pg_dir_pointer[0].pgd =
24116- swapper_pg_dir[pgd_index(0)].pgd;
24117- efi_bak_pg_dir_pointer[1].pgd =
24118- swapper_pg_dir[pgd_index(0x400000)].pgd;
24119- swapper_pg_dir[pgd_index(0)].pgd =
24120- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
24121- temp = PAGE_OFFSET + 0x400000;
24122- swapper_pg_dir[pgd_index(0x400000)].pgd =
24123- swapper_pg_dir[pgd_index(temp)].pgd;
24124- }
24125+ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
24126+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
24127+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
24128
24129 /*
24130 * After the lock is released, the original page table is restored.
24131 */
24132 __flush_tlb_all();
24133
15a11c5b
MT
24134+#ifdef CONFIG_PAX_KERNEXEC
24135+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
6e9df6a3 24136+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
15a11c5b 24137+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
6e9df6a3 24138+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
15a11c5b
MT
24139+#endif
24140+
24141 gdt_descr.address = __pa(get_cpu_gdt_table(0));
bc901d79
MT
24142 gdt_descr.size = GDT_SIZE - 1;
24143 load_gdt(&gdt_descr);
24144 }
24145
24146-void efi_call_phys_epilog(void)
24147+void __init efi_call_phys_epilog(void)
24148 {
24149- unsigned long cr4;
24150 struct desc_ptr gdt_descr;
24151
15a11c5b
MT
24152+#ifdef CONFIG_PAX_KERNEXEC
24153+ struct desc_struct d;
24154+
24155+ memset(&d, 0, sizeof d);
6e9df6a3
MT
24156+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
24157+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
15a11c5b
MT
24158+#endif
24159+
24160 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
bc901d79
MT
24161 gdt_descr.size = GDT_SIZE - 1;
24162 load_gdt(&gdt_descr);
24163
24164- cr4 = read_cr4_safe();
24165-
24166- if (cr4 & X86_CR4_PAE) {
24167- swapper_pg_dir[pgd_index(0)].pgd =
24168- efi_bak_pg_dir_pointer[0].pgd;
24169- } else {
24170- swapper_pg_dir[pgd_index(0)].pgd =
24171- efi_bak_pg_dir_pointer[0].pgd;
24172- swapper_pg_dir[pgd_index(0x400000)].pgd =
24173- efi_bak_pg_dir_pointer[1].pgd;
24174- }
24175+ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
24176
24177 /*
24178 * After the lock is released, the original page table is restored.
fe2de317
MT
24179diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
24180index fbe66e6..c5c0dd2 100644
24181--- a/arch/x86/platform/efi/efi_stub_32.S
24182+++ b/arch/x86/platform/efi/efi_stub_32.S
15a11c5b 24183@@ -6,7 +6,9 @@
bc901d79
MT
24184 */
24185
24186 #include <linux/linkage.h>
24187+#include <linux/init.h>
24188 #include <asm/page_types.h>
15a11c5b 24189+#include <asm/segment.h>
bc901d79
MT
24190
24191 /*
15a11c5b
MT
24192 * efi_call_phys(void *, ...) is a function with variable parameters.
24193@@ -20,7 +22,7 @@
bc901d79
MT
24194 * service functions will comply with gcc calling convention, too.
24195 */
24196
24197-.text
24198+__INIT
24199 ENTRY(efi_call_phys)
24200 /*
24201 * 0. The function can only be called in Linux kernel. So CS has been
15a11c5b 24202@@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
bc901d79
MT
24203 * The mapping of lower virtual memory has been created in prelog and
24204 * epilog.
24205 */
24206- movl $1f, %edx
24207- subl $__PAGE_OFFSET, %edx
24208- jmp *%edx
15a11c5b
MT
24209+ movl $(__KERNEXEC_EFI_DS), %edx
24210+ mov %edx, %ds
24211+ mov %edx, %es
24212+ mov %edx, %ss
24213+ ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
bc901d79
MT
24214 1:
24215
24216 /*
15a11c5b 24217@@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
bc901d79
MT
24218 * parameter 2, ..., param n. To make things easy, we save the return
24219 * address of efi_call_phys in a global variable.
24220 */
24221- popl %edx
24222- movl %edx, saved_return_addr
24223- /* get the function pointer into ECX*/
24224- popl %ecx
24225- movl %ecx, efi_rt_function_ptr
24226- movl $2f, %edx
24227- subl $__PAGE_OFFSET, %edx
24228- pushl %edx
24229+ popl (saved_return_addr)
24230+ popl (efi_rt_function_ptr)
24231
24232 /*
24233 * 3. Clear PG bit in %CR0.
15a11c5b 24234@@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
bc901d79
MT
24235 /*
24236 * 5. Call the physical function.
24237 */
24238- jmp *%ecx
24239+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
24240
24241-2:
24242 /*
24243 * 6. After EFI runtime service returns, control will return to
24244 * following instruction. We'd better readjust stack pointer first.
15a11c5b 24245@@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
bc901d79
MT
24246 movl %cr0, %edx
24247 orl $0x80000000, %edx
24248 movl %edx, %cr0
24249- jmp 1f
24250-1:
24251+
24252 /*
24253 * 8. Now restore the virtual mode from flat mode by
24254 * adding EIP with PAGE_OFFSET.
24255 */
24256- movl $1f, %edx
24257- jmp *%edx
15a11c5b 24258+ ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
bc901d79 24259 1:
15a11c5b
MT
24260+ movl $(__KERNEL_DS), %edx
24261+ mov %edx, %ds
24262+ mov %edx, %es
24263+ mov %edx, %ss
bc901d79
MT
24264
24265 /*
24266 * 9. Balance the stack. And because EAX contain the return value,
24267 * we'd better not clobber it.
24268 */
24269- leal efi_rt_function_ptr, %edx
24270- movl (%edx), %ecx
24271- pushl %ecx
24272+ pushl (efi_rt_function_ptr)
24273
24274 /*
24275- * 10. Push the saved return address onto the stack and return.
24276+ * 10. Return to the saved return address.
24277 */
24278- leal saved_return_addr, %edx
24279- movl (%edx), %ecx
24280- pushl %ecx
24281- ret
24282+ jmpl *(saved_return_addr)
24283 ENDPROC(efi_call_phys)
24284 .previous
24285
24286-.data
24287+__INITDATA
24288 saved_return_addr:
24289 .long 0
24290 efi_rt_function_ptr:
fe2de317
MT
24291diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
24292index 4c07cca..2c8427d 100644
24293--- a/arch/x86/platform/efi/efi_stub_64.S
24294+++ b/arch/x86/platform/efi/efi_stub_64.S
6e9df6a3
MT
24295@@ -7,6 +7,7 @@
24296 */
24297
24298 #include <linux/linkage.h>
24299+#include <asm/alternative-asm.h>
24300
24301 #define SAVE_XMM \
24302 mov %rsp, %rax; \
24303@@ -40,6 +41,7 @@ ENTRY(efi_call0)
15a11c5b
MT
24304 call *%rdi
24305 addq $32, %rsp
24306 RESTORE_XMM
fe2de317 24307+ pax_force_retaddr 0, 1
15a11c5b
MT
24308 ret
24309 ENDPROC(efi_call0)
24310
6e9df6a3 24311@@ -50,6 +52,7 @@ ENTRY(efi_call1)
15a11c5b
MT
24312 call *%rdi
24313 addq $32, %rsp
24314 RESTORE_XMM
fe2de317 24315+ pax_force_retaddr 0, 1
15a11c5b
MT
24316 ret
24317 ENDPROC(efi_call1)
24318
6e9df6a3 24319@@ -60,6 +63,7 @@ ENTRY(efi_call2)
15a11c5b
MT
24320 call *%rdi
24321 addq $32, %rsp
24322 RESTORE_XMM
fe2de317 24323+ pax_force_retaddr 0, 1
15a11c5b
MT
24324 ret
24325 ENDPROC(efi_call2)
24326
6e9df6a3 24327@@ -71,6 +75,7 @@ ENTRY(efi_call3)
15a11c5b
MT
24328 call *%rdi
24329 addq $32, %rsp
24330 RESTORE_XMM
fe2de317 24331+ pax_force_retaddr 0, 1
15a11c5b
MT
24332 ret
24333 ENDPROC(efi_call3)
24334
6e9df6a3 24335@@ -83,6 +88,7 @@ ENTRY(efi_call4)
15a11c5b
MT
24336 call *%rdi
24337 addq $32, %rsp
24338 RESTORE_XMM
fe2de317 24339+ pax_force_retaddr 0, 1
15a11c5b
MT
24340 ret
24341 ENDPROC(efi_call4)
24342
6e9df6a3 24343@@ -96,6 +102,7 @@ ENTRY(efi_call5)
15a11c5b
MT
24344 call *%rdi
24345 addq $48, %rsp
24346 RESTORE_XMM
fe2de317 24347+ pax_force_retaddr 0, 1
15a11c5b
MT
24348 ret
24349 ENDPROC(efi_call5)
24350
6e9df6a3 24351@@ -112,5 +119,6 @@ ENTRY(efi_call6)
15a11c5b
MT
24352 call *%rdi
24353 addq $48, %rsp
24354 RESTORE_XMM
fe2de317 24355+ pax_force_retaddr 0, 1
15a11c5b
MT
24356 ret
24357 ENDPROC(efi_call6)
fe2de317
MT
24358diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
24359index fe73276..70fe25a 100644
24360--- a/arch/x86/platform/mrst/mrst.c
24361+++ b/arch/x86/platform/mrst/mrst.c
15a11c5b
MT
24362@@ -239,14 +239,16 @@ static int mrst_i8042_detect(void)
24363 }
24364
24365 /* Reboot and power off are handled by the SCU on a MID device */
24366-static void mrst_power_off(void)
24367+static __noreturn void mrst_power_off(void)
24368 {
24369 intel_scu_ipc_simple_command(0xf1, 1);
24370+ BUG();
24371 }
24372
24373-static void mrst_reboot(void)
24374+static __noreturn void mrst_reboot(void)
24375 {
24376 intel_scu_ipc_simple_command(0xf1, 0);
24377+ BUG();
66a7e928
MT
24378 }
24379
15a11c5b 24380 /*
fe2de317
MT
24381diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
24382index 5b55219..b326540 100644
24383--- a/arch/x86/platform/uv/tlb_uv.c
24384+++ b/arch/x86/platform/uv/tlb_uv.c
24385@@ -377,6 +377,8 @@ static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp)
6e9df6a3 24386 struct bau_control *smaster = bcp->socket_master;
66a7e928
MT
24387 struct reset_args reset_args;
24388
24389+ pax_track_stack();
24390+
24391 reset_args.sender = sender;
6e9df6a3 24392 cpus_clear(*mask);
15a11c5b 24393 /* find a single cpu for each uvhub in this distribution mask */
fe2de317
MT
24394diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
24395index 87bb35e..eff2da8 100644
24396--- a/arch/x86/power/cpu.c
24397+++ b/arch/x86/power/cpu.c
6892158b 24398@@ -130,7 +130,7 @@ static void do_fpu_end(void)
58c5fc13
MT
24399 static void fix_processor_context(void)
24400 {
24401 int cpu = smp_processor_id();
24402- struct tss_struct *t = &per_cpu(init_tss, cpu);
24403+ struct tss_struct *t = init_tss + cpu;
58c5fc13
MT
24404
24405 set_tss_desc(cpu, t); /*
24406 * This just modifies memory; should not be
6892158b 24407@@ -140,7 +140,9 @@ static void fix_processor_context(void)
58c5fc13
MT
24408 */
24409
24410 #ifdef CONFIG_X86_64
ae4e228f 24411+ pax_open_kernel();
58c5fc13 24412 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
ae4e228f 24413+ pax_close_kernel();
58c5fc13 24414
58c5fc13
MT
24415 syscall_init(); /* This sets MSR_*STAR and related */
24416 #endif
fe2de317
MT
24417diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
24418index 5d17950..2253fc9 100644
24419--- a/arch/x86/vdso/Makefile
24420+++ b/arch/x86/vdso/Makefile
6e9df6a3 24421@@ -137,7 +137,7 @@ quiet_cmd_vdso = VDSO $@
6892158b
MT
24422 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
24423 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
58c5fc13 24424
ae4e228f 24425-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
71d190be 24426+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
58c5fc13
MT
24427 GCOV_PROFILE := n
24428
24429 #
fe2de317
MT
24430diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
24431index 468d591..8e80a0a 100644
24432--- a/arch/x86/vdso/vdso32-setup.c
24433+++ b/arch/x86/vdso/vdso32-setup.c
58c5fc13
MT
24434@@ -25,6 +25,7 @@
24435 #include <asm/tlbflush.h>
24436 #include <asm/vdso.h>
24437 #include <asm/proto.h>
24438+#include <asm/mman.h>
24439
24440 enum {
24441 VDSO_DISABLED = 0,
fe2de317 24442@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
58c5fc13
MT
24443 void enable_sep_cpu(void)
24444 {
24445 int cpu = get_cpu();
24446- struct tss_struct *tss = &per_cpu(init_tss, cpu);
24447+ struct tss_struct *tss = init_tss + cpu;
24448
24449 if (!boot_cpu_has(X86_FEATURE_SEP)) {
24450 put_cpu();
24451@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
24452 gate_vma.vm_start = FIXADDR_USER_START;
24453 gate_vma.vm_end = FIXADDR_USER_END;
24454 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
24455- gate_vma.vm_page_prot = __P101;
24456+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
24457 /*
24458 * Make sure the vDSO gets into every core dump.
24459 * Dumping its contents makes post-mortem fully interpretable later
fe2de317 24460@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
58c5fc13
MT
24461 if (compat)
24462 addr = VDSO_HIGH_BASE;
24463 else {
24464- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
24465+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
24466 if (IS_ERR_VALUE(addr)) {
24467 ret = addr;
24468 goto up_fail;
24469 }
24470 }
24471
24472- current->mm->context.vdso = (void *)addr;
24473+ current->mm->context.vdso = addr;
24474
24475 if (compat_uses_vma || !compat) {
24476 /*
fe2de317 24477@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
ae4e228f
MT
24478 }
24479
24480 current_thread_info()->sysenter_return =
24481- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
24482+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
58c5fc13
MT
24483
24484 up_fail:
24485 if (ret)
24486- current->mm->context.vdso = NULL;
24487+ current->mm->context.vdso = 0;
24488
24489 up_write(&mm->mmap_sem);
24490
ae4e228f 24491@@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
58c5fc13
MT
24492
24493 const char *arch_vma_name(struct vm_area_struct *vma)
24494 {
24495- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
24496+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
24497 return "[vdso]";
24498+
24499+#ifdef CONFIG_PAX_SEGMEXEC
24500+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
24501+ return "[vdso]";
24502+#endif
24503+
24504 return NULL;
24505 }
24506
fe2de317 24507@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
66a7e928
MT
24508 * Check to see if the corresponding task was created in compat vdso
24509 * mode.
24510 */
58c5fc13
MT
24511- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
24512+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
24513 return &gate_vma;
24514 return NULL;
24515 }
fe2de317
MT
24516diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
24517index 316fbca..4638633 100644
24518--- a/arch/x86/vdso/vma.c
24519+++ b/arch/x86/vdso/vma.c
6e9df6a3 24520@@ -16,8 +16,6 @@
15a11c5b 24521 #include <asm/vdso.h>
6e9df6a3 24522 #include <asm/page.h>
15a11c5b
MT
24523
24524-unsigned int __read_mostly vdso_enabled = 1;
24525-
24526 extern char vdso_start[], vdso_end[];
24527 extern unsigned short vdso_sync_cpuid;
15a11c5b 24528
fe2de317 24529@@ -97,13 +95,15 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
6e9df6a3 24530 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
15a11c5b 24531 {
6e9df6a3
MT
24532 struct mm_struct *mm = current->mm;
24533- unsigned long addr;
24534+ unsigned long addr = 0;
15a11c5b
MT
24535 int ret;
24536
24537- if (!vdso_enabled)
24538- return 0;
24539-
24540 down_write(&mm->mmap_sem);
6e9df6a3
MT
24541+
24542+#ifdef CONFIG_PAX_RANDMMAP
24543+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
24544+#endif
24545+
24546 addr = vdso_addr(mm->start_stack, vdso_size);
24547 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
15a11c5b 24548 if (IS_ERR_VALUE(addr)) {
fe2de317 24549@@ -111,26 +111,18 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
58c5fc13
MT
24550 goto up_fail;
24551 }
24552
24553- current->mm->context.vdso = (void *)addr;
6e9df6a3 24554+ mm->context.vdso = addr;
58c5fc13 24555
6e9df6a3 24556 ret = install_special_mapping(mm, addr, vdso_size,
58c5fc13 24557 VM_READ|VM_EXEC|
6e9df6a3 24558 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
58c5fc13 24559 VM_ALWAYSDUMP,
6e9df6a3
MT
24560 vdso_pages);
24561- if (ret) {
58c5fc13 24562- current->mm->context.vdso = NULL;
6e9df6a3
MT
24563- goto up_fail;
24564- }
24565+
15a11c5b
MT
24566+ if (ret)
24567+ mm->context.vdso = 0;
6e9df6a3 24568
15a11c5b 24569 up_fail:
58c5fc13
MT
24570 up_write(&mm->mmap_sem);
24571 return ret;
24572 }
24573-
24574-static __init int vdso_setup(char *s)
24575-{
24576- vdso_enabled = simple_strtoul(s, NULL, 0);
24577- return 0;
24578-}
24579-__setup("vdso=", vdso_setup);
fe2de317
MT
24580diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
24581index 46c8069..6330d3c 100644
24582--- a/arch/x86/xen/enlighten.c
24583+++ b/arch/x86/xen/enlighten.c
bc901d79 24584@@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
58c5fc13
MT
24585
24586 struct shared_info xen_dummy_shared_info;
24587
24588-void *xen_initial_gdt;
24589-
6892158b
MT
24590 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
24591 __read_mostly int xen_have_vector_callback;
24592 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
fe2de317 24593@@ -1028,7 +1026,7 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
66a7e928
MT
24594 #endif
24595 };
24596
24597-static void xen_reboot(int reason)
24598+static __noreturn void xen_reboot(int reason)
24599 {
24600 struct sched_shutdown r = { .reason = reason };
24601
6e9df6a3 24602@@ -1036,17 +1034,17 @@ static void xen_reboot(int reason)
66a7e928
MT
24603 BUG();
24604 }
24605
24606-static void xen_restart(char *msg)
24607+static __noreturn void xen_restart(char *msg)
24608 {
24609 xen_reboot(SHUTDOWN_reboot);
24610 }
24611
24612-static void xen_emergency_restart(void)
24613+static __noreturn void xen_emergency_restart(void)
24614 {
24615 xen_reboot(SHUTDOWN_reboot);
24616 }
24617
24618-static void xen_machine_halt(void)
24619+static __noreturn void xen_machine_halt(void)
24620 {
24621 xen_reboot(SHUTDOWN_poweroff);
24622 }
fe2de317 24623@@ -1152,7 +1150,17 @@ asmlinkage void __init xen_start_kernel(void)
df50ba0c
MT
24624 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
24625
24626 /* Work out if we support NX */
24627- x86_configure_nx();
24628+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
24629+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
57199397 24630+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
df50ba0c
MT
24631+ unsigned l, h;
24632+
24633+ __supported_pte_mask |= _PAGE_NX;
24634+ rdmsr(MSR_EFER, l, h);
24635+ l |= EFER_NX;
24636+ wrmsr(MSR_EFER, l, h);
24637+ }
24638+#endif
24639
24640 xen_setup_features();
24641
fe2de317 24642@@ -1183,13 +1191,6 @@ asmlinkage void __init xen_start_kernel(void)
58c5fc13
MT
24643
24644 machine_ops = xen_machine_ops;
24645
24646- /*
24647- * The only reliable way to retain the initial address of the
24648- * percpu gdt_page is to remember it here, so we can go and
24649- * mark it RW later, when the initial percpu area is freed.
24650- */
24651- xen_initial_gdt = &per_cpu(gdt_page, 0);
24652-
24653 xen_smp_init();
24654
16454cff 24655 #ifdef CONFIG_ACPI_NUMA
fe2de317
MT
24656diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
24657index 3dd53f9..5aa5df3 100644
24658--- a/arch/x86/xen/mmu.c
24659+++ b/arch/x86/xen/mmu.c
24660@@ -1768,6 +1768,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
58c5fc13
MT
24661 convert_pfn_mfn(init_level4_pgt);
24662 convert_pfn_mfn(level3_ident_pgt);
24663 convert_pfn_mfn(level3_kernel_pgt);
fe2de317
MT
24664+ convert_pfn_mfn(level3_vmalloc_start_pgt);
24665+ convert_pfn_mfn(level3_vmalloc_end_pgt);
58c5fc13
MT
24666+ convert_pfn_mfn(level3_vmemmap_pgt);
24667
24668 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
24669 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
fe2de317 24670@@ -1786,7 +1789,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
58c5fc13
MT
24671 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
24672 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
24673 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
fe2de317
MT
24674+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
24675+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
58c5fc13
MT
24676+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
24677 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
24678+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
24679 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
24680 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
24681
fe2de317 24682@@ -2000,6 +2007,7 @@ static void __init xen_post_allocator_init(void)
15a11c5b
MT
24683 pv_mmu_ops.set_pud = xen_set_pud;
24684 #if PAGETABLE_LEVELS == 4
24685 pv_mmu_ops.set_pgd = xen_set_pgd;
24686+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
24687 #endif
6892158b 24688
15a11c5b 24689 /* This will work as long as patching hasn't happened yet
fe2de317 24690@@ -2081,6 +2089,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
15a11c5b
MT
24691 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
24692 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
24693 .set_pgd = xen_set_pgd_hyper,
24694+ .set_pgd_batched = xen_set_pgd_hyper,
6892158b 24695
15a11c5b
MT
24696 .alloc_pud = xen_alloc_pmd_init,
24697 .release_pud = xen_release_pmd_init,
fe2de317
MT
24698diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
24699index 041d4fe..7666b7e 100644
24700--- a/arch/x86/xen/smp.c
24701+++ b/arch/x86/xen/smp.c
24702@@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
58c5fc13
MT
24703 {
24704 BUG_ON(smp_processor_id() != 0);
24705 native_smp_prepare_boot_cpu();
24706-
24707- /* We've switched to the "real" per-cpu gdt, so make sure the
24708- old memory can be recycled */
24709- make_lowmem_page_readwrite(xen_initial_gdt);
24710-
bc901d79 24711 xen_filter_cpu_maps();
58c5fc13
MT
24712 xen_setup_vcpu_info_placement();
24713 }
fe2de317 24714@@ -275,12 +270,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
58c5fc13
MT
24715 gdt = get_cpu_gdt_table(cpu);
24716
24717 ctxt->flags = VGCF_IN_KERNEL;
24718- ctxt->user_regs.ds = __USER_DS;
24719- ctxt->user_regs.es = __USER_DS;
24720+ ctxt->user_regs.ds = __KERNEL_DS;
24721+ ctxt->user_regs.es = __KERNEL_DS;
24722 ctxt->user_regs.ss = __KERNEL_DS;
24723 #ifdef CONFIG_X86_32
24724 ctxt->user_regs.fs = __KERNEL_PERCPU;
bc901d79
MT
24725- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
24726+ savesegment(gs, ctxt->user_regs.gs);
24727 #else
24728 ctxt->gs_base_kernel = per_cpu_offset(cpu);
24729 #endif
fe2de317 24730@@ -331,13 +326,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
71d190be
MT
24731 int rc;
24732
24733 per_cpu(current_task, cpu) = idle;
24734+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
24735 #ifdef CONFIG_X86_32
24736 irq_ctx_init(cpu);
24737 #else
24738 clear_tsk_thread_flag(idle, TIF_FORK);
24739- per_cpu(kernel_stack, cpu) =
24740- (unsigned long)task_stack_page(idle) -
24741- KERNEL_STACK_OFFSET + THREAD_SIZE;
66a7e928 24742+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
71d190be
MT
24743 #endif
24744 xen_setup_runstate_info(cpu);
24745 xen_setup_timer(cpu);
fe2de317
MT
24746diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
24747index b040b0e..8cc4fe0 100644
24748--- a/arch/x86/xen/xen-asm_32.S
24749+++ b/arch/x86/xen/xen-asm_32.S
71d190be
MT
24750@@ -83,14 +83,14 @@ ENTRY(xen_iret)
24751 ESP_OFFSET=4 # bytes pushed onto stack
24752
24753 /*
24754- * Store vcpu_info pointer for easy access. Do it this way to
24755- * avoid having to reload %fs
24756+ * Store vcpu_info pointer for easy access.
24757 */
24758 #ifdef CONFIG_SMP
24759- GET_THREAD_INFO(%eax)
24760- movl TI_cpu(%eax), %eax
24761- movl __per_cpu_offset(,%eax,4), %eax
24762- mov xen_vcpu(%eax), %eax
24763+ push %fs
24764+ mov $(__KERNEL_PERCPU), %eax
24765+ mov %eax, %fs
24766+ mov PER_CPU_VAR(xen_vcpu), %eax
24767+ pop %fs
24768 #else
24769 movl xen_vcpu, %eax
24770 #endif
fe2de317
MT
24771diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
24772index aaa7291..3f77960 100644
24773--- a/arch/x86/xen/xen-head.S
24774+++ b/arch/x86/xen/xen-head.S
df50ba0c
MT
24775@@ -19,6 +19,17 @@ ENTRY(startup_xen)
24776 #ifdef CONFIG_X86_32
24777 mov %esi,xen_start_info
24778 mov $init_thread_union+THREAD_SIZE,%esp
24779+#ifdef CONFIG_SMP
24780+ movl $cpu_gdt_table,%edi
24781+ movl $__per_cpu_load,%eax
24782+ movw %ax,__KERNEL_PERCPU + 2(%edi)
24783+ rorl $16,%eax
24784+ movb %al,__KERNEL_PERCPU + 4(%edi)
24785+ movb %ah,__KERNEL_PERCPU + 7(%edi)
24786+ movl $__per_cpu_end - 1,%eax
24787+ subl $__per_cpu_start,%eax
24788+ movw %ax,__KERNEL_PERCPU + 0(%edi)
24789+#endif
24790 #else
24791 mov %rsi,xen_start_info
24792 mov $init_thread_union+THREAD_SIZE,%rsp
fe2de317
MT
24793diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
24794index b095739..8c17bcd 100644
24795--- a/arch/x86/xen/xen-ops.h
24796+++ b/arch/x86/xen/xen-ops.h
58c5fc13
MT
24797@@ -10,8 +10,6 @@
24798 extern const char xen_hypervisor_callback[];
24799 extern const char xen_failsafe_callback[];
24800
24801-extern void *xen_initial_gdt;
24802-
24803 struct trap_info;
24804 void xen_copy_trap_info(struct trap_info *traps);
24805
fe2de317
MT
24806diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
24807index 58916af..9cb880b 100644
24808--- a/block/blk-iopoll.c
24809+++ b/block/blk-iopoll.c
24810@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
ae4e228f
MT
24811 }
24812 EXPORT_SYMBOL(blk_iopoll_complete);
24813
24814-static void blk_iopoll_softirq(struct softirq_action *h)
24815+static void blk_iopoll_softirq(void)
24816 {
24817 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
24818 int rearm = 0, budget = blk_iopoll_budget;
fe2de317
MT
24819diff --git a/block/blk-map.c b/block/blk-map.c
24820index 164cd00..6d96fc1 100644
24821--- a/block/blk-map.c
24822+++ b/block/blk-map.c
24823@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
ae4e228f
MT
24824 if (!len || !kbuf)
24825 return -EINVAL;
58c5fc13 24826
bc901d79
MT
24827- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
24828+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
ae4e228f
MT
24829 if (do_copy)
24830 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
24831 else
fe2de317
MT
24832diff --git a/block/blk-softirq.c b/block/blk-softirq.c
24833index 1366a89..e17f54b 100644
24834--- a/block/blk-softirq.c
24835+++ b/block/blk-softirq.c
24836@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
ae4e228f
MT
24837 * Softirq action handler - move entries to local list and loop over them
24838 * while passing them to the queue registered handler.
24839 */
24840-static void blk_done_softirq(struct softirq_action *h)
24841+static void blk_done_softirq(void)
24842 {
24843 struct list_head *cpu_list, local_list;
58c5fc13 24844
fe2de317
MT
24845diff --git a/block/bsg.c b/block/bsg.c
24846index 702f131..37808bf 100644
24847--- a/block/bsg.c
24848+++ b/block/bsg.c
24849@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
71d190be
MT
24850 struct sg_io_v4 *hdr, struct bsg_device *bd,
24851 fmode_t has_write_perm)
24852 {
24853+ unsigned char tmpcmd[sizeof(rq->__cmd)];
24854+ unsigned char *cmdptr;
24855+
24856 if (hdr->request_len > BLK_MAX_CDB) {
24857 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
24858 if (!rq->cmd)
24859 return -ENOMEM;
24860- }
24861+ cmdptr = rq->cmd;
24862+ } else
24863+ cmdptr = tmpcmd;
24864
6e9df6a3
MT
24865- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
24866+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
71d190be
MT
24867 hdr->request_len))
24868 return -EFAULT;
24869
24870+ if (cmdptr != rq->cmd)
24871+ memcpy(rq->cmd, cmdptr, hdr->request_len);
24872+
24873 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
24874 if (blk_verify_command(rq->cmd, has_write_perm))
24875 return -EPERM;
fe2de317
MT
24876diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
24877index 7b72502..646105c 100644
24878--- a/block/compat_ioctl.c
24879+++ b/block/compat_ioctl.c
24880@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
6e9df6a3
MT
24881 err |= __get_user(f->spec1, &uf->spec1);
24882 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
24883 err |= __get_user(name, &uf->name);
24884- f->name = compat_ptr(name);
24885+ f->name = (void __force_kernel *)compat_ptr(name);
24886 if (err) {
24887 err = -EFAULT;
24888 goto out;
fe2de317
MT
24889diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
24890index 4f4230b..0feae9a 100644
24891--- a/block/scsi_ioctl.c
24892+++ b/block/scsi_ioctl.c
71d190be
MT
24893@@ -222,8 +222,20 @@ EXPORT_SYMBOL(blk_verify_command);
24894 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
24895 struct sg_io_hdr *hdr, fmode_t mode)
24896 {
24897- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
24898+ unsigned char tmpcmd[sizeof(rq->__cmd)];
24899+ unsigned char *cmdptr;
24900+
24901+ if (rq->cmd != rq->__cmd)
24902+ cmdptr = rq->cmd;
24903+ else
24904+ cmdptr = tmpcmd;
24905+
24906+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
24907 return -EFAULT;
24908+
24909+ if (cmdptr != rq->cmd)
24910+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
24911+
24912 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
24913 return -EPERM;
24914
fe2de317 24915@@ -432,6 +444,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
71d190be
MT
24916 int err;
24917 unsigned int in_len, out_len, bytes, opcode, cmdlen;
24918 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
24919+ unsigned char tmpcmd[sizeof(rq->__cmd)];
24920+ unsigned char *cmdptr;
24921
24922 if (!sic)
24923 return -EINVAL;
fe2de317 24924@@ -465,9 +479,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
71d190be
MT
24925 */
24926 err = -EFAULT;
24927 rq->cmd_len = cmdlen;
24928- if (copy_from_user(rq->cmd, sic->data, cmdlen))
24929+
24930+ if (rq->cmd != rq->__cmd)
24931+ cmdptr = rq->cmd;
24932+ else
24933+ cmdptr = tmpcmd;
24934+
24935+ if (copy_from_user(cmdptr, sic->data, cmdlen))
24936 goto error;
24937
24938+ if (rq->cmd != cmdptr)
24939+ memcpy(rq->cmd, cmdptr, cmdlen);
24940+
24941 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
24942 goto error;
24943
fe2de317
MT
24944diff --git a/crypto/cryptd.c b/crypto/cryptd.c
24945index 671d4d6..5f24030 100644
24946--- a/crypto/cryptd.c
24947+++ b/crypto/cryptd.c
15a11c5b
MT
24948@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
24949
24950 struct cryptd_blkcipher_request_ctx {
24951 crypto_completion_t complete;
24952-};
24953+} __no_const;
24954
24955 struct cryptd_hash_ctx {
24956 struct crypto_shash *child;
24957@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
24958
24959 struct cryptd_aead_request_ctx {
24960 crypto_completion_t complete;
24961-};
24962+} __no_const;
24963
24964 static void cryptd_queue_worker(struct work_struct *work);
24965
fe2de317
MT
24966diff --git a/crypto/serpent.c b/crypto/serpent.c
24967index b651a55..a9ddd79b 100644
24968--- a/crypto/serpent.c
24969+++ b/crypto/serpent.c
24970@@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
66a7e928
MT
24971 u32 r0,r1,r2,r3,r4;
24972 int i;
24973
24974+ pax_track_stack();
24975+
24976 /* Copy key, add padding */
24977
24978 for (i = 0; i < keylen; ++i)
fe2de317
MT
24979diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
24980index 5d41894..22021e4 100644
24981--- a/drivers/acpi/apei/cper.c
24982+++ b/drivers/acpi/apei/cper.c
66a7e928 24983@@ -38,12 +38,12 @@
8308f9c9
MT
24984 */
24985 u64 cper_next_record_id(void)
24986 {
24987- static atomic64_t seq;
24988+ static atomic64_unchecked_t seq;
24989
24990- if (!atomic64_read(&seq))
24991- atomic64_set(&seq, ((u64)get_seconds()) << 32);
24992+ if (!atomic64_read_unchecked(&seq))
24993+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
24994
24995- return atomic64_inc_return(&seq);
24996+ return atomic64_inc_return_unchecked(&seq);
24997 }
24998 EXPORT_SYMBOL_GPL(cper_next_record_id);
24999
fe2de317
MT
25000diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
25001index 22f918b..9fafb84 100644
25002--- a/drivers/acpi/ec_sys.c
25003+++ b/drivers/acpi/ec_sys.c
15a11c5b
MT
25004@@ -11,6 +11,7 @@
25005 #include <linux/kernel.h>
25006 #include <linux/acpi.h>
25007 #include <linux/debugfs.h>
25008+#include <asm/uaccess.h>
25009 #include "internal.h"
ae4e228f 25010
15a11c5b 25011 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
fe2de317 25012@@ -39,7 +40,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
15a11c5b
MT
25013 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
25014 */
25015 unsigned int size = EC_SPACE_SIZE;
25016- u8 *data = (u8 *) buf;
25017+ u8 data;
25018 loff_t init_off = *off;
25019 int err = 0;
ae4e228f 25020
fe2de317 25021@@ -52,9 +53,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
15a11c5b 25022 size = count;
58c5fc13 25023
15a11c5b
MT
25024 while (size) {
25025- err = ec_read(*off, &data[*off - init_off]);
25026+ err = ec_read(*off, &data);
25027 if (err)
25028 return err;
25029+ if (put_user(data, &buf[*off - init_off]))
25030+ return -EFAULT;
25031 *off += 1;
25032 size--;
25033 }
fe2de317 25034@@ -70,7 +73,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
66a7e928 25035
15a11c5b
MT
25036 unsigned int size = count;
25037 loff_t init_off = *off;
25038- u8 *data = (u8 *) buf;
25039 int err = 0;
df50ba0c 25040
15a11c5b 25041 if (*off >= EC_SPACE_SIZE)
fe2de317 25042@@ -81,7 +83,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
15a11c5b 25043 }
df50ba0c 25044
15a11c5b
MT
25045 while (size) {
25046- u8 byte_write = data[*off - init_off];
25047+ u8 byte_write;
25048+ if (get_user(byte_write, &buf[*off - init_off]))
25049+ return -EFAULT;
25050 err = ec_write(*off, byte_write);
25051 if (err)
25052 return err;
fe2de317
MT
25053diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
25054index f5f9869..da87aeb 100644
25055--- a/drivers/acpi/proc.c
25056+++ b/drivers/acpi/proc.c
25057@@ -342,19 +342,13 @@ acpi_system_write_wakeup_device(struct file *file,
df50ba0c
MT
25058 size_t count, loff_t * ppos)
25059 {
25060 struct list_head *node, *next;
25061- char strbuf[5];
25062- char str[5] = "";
25063- unsigned int len = count;
fe2de317
MT
25064+ char strbuf[5] = {0};
25065
df50ba0c
MT
25066- if (len > 4)
25067- len = 4;
25068- if (len < 0)
16454cff
MT
25069+ if (count > 4)
25070+ count = 4;
df50ba0c
MT
25071+ if (copy_from_user(strbuf, buffer, count))
25072 return -EFAULT;
fe2de317
MT
25073-
25074- if (copy_from_user(strbuf, buffer, len))
25075- return -EFAULT;
df50ba0c
MT
25076- strbuf[len] = '\0';
25077- sscanf(strbuf, "%s", str);
25078+ strbuf[count] = '\0';
25079
25080 mutex_lock(&acpi_device_lock);
25081 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
fe2de317 25082@@ -363,7 +357,7 @@ acpi_system_write_wakeup_device(struct file *file,
df50ba0c
MT
25083 if (!dev->wakeup.flags.valid)
25084 continue;
25085
25086- if (!strncmp(dev->pnp.bus_id, str, 4)) {
25087+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
16454cff
MT
25088 if (device_can_wakeup(&dev->dev)) {
25089 bool enable = !device_may_wakeup(&dev->dev);
25090 device_set_wakeup_enable(&dev->dev, enable);
fe2de317
MT
25091diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
25092index a4e0f1b..9793b28 100644
25093--- a/drivers/acpi/processor_driver.c
25094+++ b/drivers/acpi/processor_driver.c
25095@@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
58c5fc13 25096 return 0;
57199397 25097 #endif
58c5fc13
MT
25098
25099- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
25100+ BUG_ON(pr->id >= nr_cpu_ids);
25101
25102 /*
25103 * Buggy BIOS check
fe2de317
MT
25104diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
25105index 4a3a5ae..cbee192 100644
25106--- a/drivers/ata/libata-core.c
25107+++ b/drivers/ata/libata-core.c
25108@@ -4733,7 +4733,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
6892158b
MT
25109 struct ata_port *ap;
25110 unsigned int tag;
25111
25112- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25113+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25114 ap = qc->ap;
25115
25116 qc->flags = 0;
fe2de317 25117@@ -4749,7 +4749,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
6892158b
MT
25118 struct ata_port *ap;
25119 struct ata_link *link;
25120
25121- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25122+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25123 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
25124 ap = qc->ap;
25125 link = qc->dev->link;
fe2de317 25126@@ -5754,6 +5754,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
ae4e228f
MT
25127 return;
25128
25129 spin_lock(&lock);
25130+ pax_open_kernel();
25131
25132 for (cur = ops->inherits; cur; cur = cur->inherits) {
25133 void **inherit = (void **)cur;
fe2de317 25134@@ -5767,8 +5768,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
ae4e228f
MT
25135 if (IS_ERR(*pp))
25136 *pp = NULL;
25137
25138- ops->inherits = NULL;
15a11c5b 25139+ *(struct ata_port_operations **)&ops->inherits = NULL;
ae4e228f
MT
25140
25141+ pax_close_kernel();
25142 spin_unlock(&lock);
25143 }
25144
fe2de317
MT
25145diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
25146index ed16fbe..fc92cb8 100644
25147--- a/drivers/ata/libata-eh.c
25148+++ b/drivers/ata/libata-eh.c
6e9df6a3 25149@@ -2515,6 +2515,8 @@ void ata_eh_report(struct ata_port *ap)
66a7e928
MT
25150 {
25151 struct ata_link *link;
25152
25153+ pax_track_stack();
25154+
25155 ata_for_each_link(link, ap, HOST_FIRST)
25156 ata_eh_link_report(link);
25157 }
fe2de317
MT
25158diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
25159index 719bb73..79ce858 100644
25160--- a/drivers/ata/pata_arasan_cf.c
25161+++ b/drivers/ata/pata_arasan_cf.c
25162@@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
66a7e928
MT
25163 /* Handle platform specific quirks */
25164 if (pdata->quirk) {
25165 if (pdata->quirk & CF_BROKEN_PIO) {
25166- ap->ops->set_piomode = NULL;
25167+ pax_open_kernel();
15a11c5b 25168+ *(void **)&ap->ops->set_piomode = NULL;
66a7e928
MT
25169+ pax_close_kernel();
25170 ap->pio_mask = 0;
25171 }
25172 if (pdata->quirk & CF_BROKEN_MWDMA)
fe2de317
MT
25173diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
25174index f9b983a..887b9d8 100644
25175--- a/drivers/atm/adummy.c
25176+++ b/drivers/atm/adummy.c
25177@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
15a11c5b
MT
25178 vcc->pop(vcc, skb);
25179 else
25180 dev_kfree_skb_any(skb);
25181- atomic_inc(&vcc->stats->tx);
25182+ atomic_inc_unchecked(&vcc->stats->tx);
ae4e228f 25183
15a11c5b
MT
25184 return 0;
25185 }
fe2de317
MT
25186diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
25187index f8f41e0..1f987dd 100644
25188--- a/drivers/atm/ambassador.c
25189+++ b/drivers/atm/ambassador.c
25190@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
15a11c5b
MT
25191 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
25192
25193 // VC layer stats
25194- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25195+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25196
25197 // free the descriptor
25198 kfree (tx_descr);
fe2de317 25199@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
15a11c5b
MT
25200 dump_skb ("<<<", vc, skb);
25201
25202 // VC layer stats
25203- atomic_inc(&atm_vcc->stats->rx);
25204+ atomic_inc_unchecked(&atm_vcc->stats->rx);
25205 __net_timestamp(skb);
25206 // end of our responsibility
25207 atm_vcc->push (atm_vcc, skb);
fe2de317 25208@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
15a11c5b
MT
25209 } else {
25210 PRINTK (KERN_INFO, "dropped over-size frame");
25211 // should we count this?
25212- atomic_inc(&atm_vcc->stats->rx_drop);
25213+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25214 }
25215
25216 } else {
fe2de317 25217@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
15a11c5b
MT
25218 }
25219
25220 if (check_area (skb->data, skb->len)) {
25221- atomic_inc(&atm_vcc->stats->tx_err);
25222+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
25223 return -ENOMEM; // ?
25224 }
25225
fe2de317
MT
25226diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
25227index b22d71c..d6e1049 100644
25228--- a/drivers/atm/atmtcp.c
25229+++ b/drivers/atm/atmtcp.c
25230@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
15a11c5b
MT
25231 if (vcc->pop) vcc->pop(vcc,skb);
25232 else dev_kfree_skb(skb);
25233 if (dev_data) return 0;
25234- atomic_inc(&vcc->stats->tx_err);
25235+ atomic_inc_unchecked(&vcc->stats->tx_err);
25236 return -ENOLINK;
25237 }
25238 size = skb->len+sizeof(struct atmtcp_hdr);
fe2de317 25239@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
15a11c5b
MT
25240 if (!new_skb) {
25241 if (vcc->pop) vcc->pop(vcc,skb);
25242 else dev_kfree_skb(skb);
25243- atomic_inc(&vcc->stats->tx_err);
25244+ atomic_inc_unchecked(&vcc->stats->tx_err);
25245 return -ENOBUFS;
25246 }
25247 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
fe2de317 25248@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
15a11c5b
MT
25249 if (vcc->pop) vcc->pop(vcc,skb);
25250 else dev_kfree_skb(skb);
25251 out_vcc->push(out_vcc,new_skb);
25252- atomic_inc(&vcc->stats->tx);
25253- atomic_inc(&out_vcc->stats->rx);
25254+ atomic_inc_unchecked(&vcc->stats->tx);
25255+ atomic_inc_unchecked(&out_vcc->stats->rx);
25256 return 0;
25257 }
ae4e228f 25258
fe2de317 25259@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
15a11c5b
MT
25260 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
25261 read_unlock(&vcc_sklist_lock);
25262 if (!out_vcc) {
25263- atomic_inc(&vcc->stats->tx_err);
25264+ atomic_inc_unchecked(&vcc->stats->tx_err);
25265 goto done;
25266 }
25267 skb_pull(skb,sizeof(struct atmtcp_hdr));
fe2de317 25268@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
15a11c5b
MT
25269 __net_timestamp(new_skb);
25270 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
25271 out_vcc->push(out_vcc,new_skb);
25272- atomic_inc(&vcc->stats->tx);
25273- atomic_inc(&out_vcc->stats->rx);
25274+ atomic_inc_unchecked(&vcc->stats->tx);
25275+ atomic_inc_unchecked(&out_vcc->stats->rx);
25276 done:
25277 if (vcc->pop) vcc->pop(vcc,skb);
25278 else dev_kfree_skb(skb);
fe2de317
MT
25279diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
25280index 9307141..d8521bf 100644
25281--- a/drivers/atm/eni.c
25282+++ b/drivers/atm/eni.c
15a11c5b
MT
25283@@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
25284 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
25285 vcc->dev->number);
25286 length = 0;
25287- atomic_inc(&vcc->stats->rx_err);
25288+ atomic_inc_unchecked(&vcc->stats->rx_err);
25289 }
25290 else {
25291 length = ATM_CELL_SIZE-1; /* no HEC */
25292@@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25293 size);
25294 }
25295 eff = length = 0;
25296- atomic_inc(&vcc->stats->rx_err);
25297+ atomic_inc_unchecked(&vcc->stats->rx_err);
25298 }
25299 else {
25300 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
25301@@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25302 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
25303 vcc->dev->number,vcc->vci,length,size << 2,descr);
25304 length = eff = 0;
25305- atomic_inc(&vcc->stats->rx_err);
25306+ atomic_inc_unchecked(&vcc->stats->rx_err);
25307 }
25308 }
25309 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
25310@@ -771,7 +771,7 @@ rx_dequeued++;
25311 vcc->push(vcc,skb);
25312 pushed++;
25313 }
25314- atomic_inc(&vcc->stats->rx);
25315+ atomic_inc_unchecked(&vcc->stats->rx);
25316 }
25317 wake_up(&eni_dev->rx_wait);
25318 }
fe2de317 25319@@ -1228,7 +1228,7 @@ static void dequeue_tx(struct atm_dev *dev)
15a11c5b
MT
25320 PCI_DMA_TODEVICE);
25321 if (vcc->pop) vcc->pop(vcc,skb);
25322 else dev_kfree_skb_irq(skb);
25323- atomic_inc(&vcc->stats->tx);
25324+ atomic_inc_unchecked(&vcc->stats->tx);
25325 wake_up(&eni_dev->tx_wait);
25326 dma_complete++;
25327 }
6e9df6a3
MT
25328@@ -1568,7 +1568,7 @@ tx_complete++;
25329 /*--------------------------------- entries ---------------------------------*/
25330
25331
25332-static const char *media_name[] __devinitdata = {
25333+static const char *media_name[] __devinitconst = {
25334 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
25335 "UTP", "05?", "06?", "07?", /* 4- 7 */
25336 "TAXI","09?", "10?", "11?", /* 8-11 */
fe2de317
MT
25337diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
25338index 5072f8a..fa52520 100644
25339--- a/drivers/atm/firestream.c
25340+++ b/drivers/atm/firestream.c
25341@@ -750,7 +750,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
15a11c5b
MT
25342 }
25343 }
ae4e228f 25344
15a11c5b
MT
25345- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25346+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
ae4e228f 25347
15a11c5b
MT
25348 fs_dprintk (FS_DEBUG_TXMEM, "i");
25349 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
fe2de317 25350@@ -817,7 +817,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
15a11c5b
MT
25351 #endif
25352 skb_put (skb, qe->p1 & 0xffff);
25353 ATM_SKB(skb)->vcc = atm_vcc;
25354- atomic_inc(&atm_vcc->stats->rx);
25355+ atomic_inc_unchecked(&atm_vcc->stats->rx);
25356 __net_timestamp(skb);
25357 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
25358 atm_vcc->push (atm_vcc, skb);
fe2de317 25359@@ -838,12 +838,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
15a11c5b
MT
25360 kfree (pe);
25361 }
25362 if (atm_vcc)
25363- atomic_inc(&atm_vcc->stats->rx_drop);
25364+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25365 break;
25366 case 0x1f: /* Reassembly abort: no buffers. */
25367 /* Silently increment error counter. */
25368 if (atm_vcc)
25369- atomic_inc(&atm_vcc->stats->rx_drop);
25370+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25371 break;
25372 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
25373 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
fe2de317
MT
25374diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
25375index 361f5ae..7fc552d 100644
25376--- a/drivers/atm/fore200e.c
25377+++ b/drivers/atm/fore200e.c
25378@@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
15a11c5b
MT
25379 #endif
25380 /* check error condition */
25381 if (*entry->status & STATUS_ERROR)
25382- atomic_inc(&vcc->stats->tx_err);
25383+ atomic_inc_unchecked(&vcc->stats->tx_err);
25384 else
25385- atomic_inc(&vcc->stats->tx);
25386+ atomic_inc_unchecked(&vcc->stats->tx);
25387 }
25388 }
ae4e228f 25389
fe2de317 25390@@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
15a11c5b
MT
25391 if (skb == NULL) {
25392 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
ae4e228f 25393
15a11c5b
MT
25394- atomic_inc(&vcc->stats->rx_drop);
25395+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25396 return -ENOMEM;
25397 }
ae4e228f 25398
fe2de317 25399@@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
ae4e228f 25400
15a11c5b 25401 dev_kfree_skb_any(skb);
ae4e228f 25402
15a11c5b
MT
25403- atomic_inc(&vcc->stats->rx_drop);
25404+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25405 return -ENOMEM;
25406 }
ae4e228f 25407
15a11c5b 25408 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
ae4e228f 25409
15a11c5b
MT
25410 vcc->push(vcc, skb);
25411- atomic_inc(&vcc->stats->rx);
25412+ atomic_inc_unchecked(&vcc->stats->rx);
ae4e228f 25413
15a11c5b 25414 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
ae4e228f 25415
fe2de317 25416@@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
15a11c5b
MT
25417 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
25418 fore200e->atm_dev->number,
25419 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
25420- atomic_inc(&vcc->stats->rx_err);
25421+ atomic_inc_unchecked(&vcc->stats->rx_err);
25422 }
25423 }
ae4e228f 25424
fe2de317 25425@@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
15a11c5b
MT
25426 goto retry_here;
25427 }
ae4e228f 25428
15a11c5b
MT
25429- atomic_inc(&vcc->stats->tx_err);
25430+ atomic_inc_unchecked(&vcc->stats->tx_err);
ae4e228f 25431
15a11c5b
MT
25432 fore200e->tx_sat++;
25433 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
fe2de317
MT
25434diff --git a/drivers/atm/he.c b/drivers/atm/he.c
25435index 9a51df4..f3bb5f8 100644
25436--- a/drivers/atm/he.c
25437+++ b/drivers/atm/he.c
25438@@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
ae4e228f 25439
15a11c5b
MT
25440 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
25441 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
25442- atomic_inc(&vcc->stats->rx_drop);
25443+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25444 goto return_host_buffers;
25445 }
ae4e228f 25446
fe2de317 25447@@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
15a11c5b
MT
25448 RBRQ_LEN_ERR(he_dev->rbrq_head)
25449 ? "LEN_ERR" : "",
25450 vcc->vpi, vcc->vci);
25451- atomic_inc(&vcc->stats->rx_err);
25452+ atomic_inc_unchecked(&vcc->stats->rx_err);
25453 goto return_host_buffers;
25454 }
ae4e228f 25455
fe2de317 25456@@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
15a11c5b
MT
25457 vcc->push(vcc, skb);
25458 spin_lock(&he_dev->global_lock);
ae4e228f 25459
15a11c5b
MT
25460- atomic_inc(&vcc->stats->rx);
25461+ atomic_inc_unchecked(&vcc->stats->rx);
ae4e228f 25462
15a11c5b
MT
25463 return_host_buffers:
25464 ++pdus_assembled;
fe2de317 25465@@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
15a11c5b
MT
25466 tpd->vcc->pop(tpd->vcc, tpd->skb);
25467 else
25468 dev_kfree_skb_any(tpd->skb);
25469- atomic_inc(&tpd->vcc->stats->tx_err);
25470+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
25471 }
25472 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
25473 return;
fe2de317 25474@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
15a11c5b
MT
25475 vcc->pop(vcc, skb);
25476 else
25477 dev_kfree_skb_any(skb);
25478- atomic_inc(&vcc->stats->tx_err);
25479+ atomic_inc_unchecked(&vcc->stats->tx_err);
25480 return -EINVAL;
25481 }
ae4e228f 25482
fe2de317 25483@@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
15a11c5b
MT
25484 vcc->pop(vcc, skb);
25485 else
25486 dev_kfree_skb_any(skb);
25487- atomic_inc(&vcc->stats->tx_err);
25488+ atomic_inc_unchecked(&vcc->stats->tx_err);
25489 return -EINVAL;
ae4e228f 25490 }
15a11c5b 25491 #endif
fe2de317 25492@@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
15a11c5b
MT
25493 vcc->pop(vcc, skb);
25494 else
25495 dev_kfree_skb_any(skb);
25496- atomic_inc(&vcc->stats->tx_err);
25497+ atomic_inc_unchecked(&vcc->stats->tx_err);
25498 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25499 return -ENOMEM;
25500 }
fe2de317 25501@@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
15a11c5b
MT
25502 vcc->pop(vcc, skb);
25503 else
25504 dev_kfree_skb_any(skb);
25505- atomic_inc(&vcc->stats->tx_err);
25506+ atomic_inc_unchecked(&vcc->stats->tx_err);
25507 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25508 return -ENOMEM;
25509 }
fe2de317 25510@@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
15a11c5b
MT
25511 __enqueue_tpd(he_dev, tpd, cid);
25512 spin_unlock_irqrestore(&he_dev->global_lock, flags);
ae4e228f 25513
15a11c5b
MT
25514- atomic_inc(&vcc->stats->tx);
25515+ atomic_inc_unchecked(&vcc->stats->tx);
ae4e228f 25516
ae4e228f
MT
25517 return 0;
25518 }
fe2de317
MT
25519diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
25520index b812103..e391a49 100644
25521--- a/drivers/atm/horizon.c
25522+++ b/drivers/atm/horizon.c
25523@@ -1035,7 +1035,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
15a11c5b
MT
25524 {
25525 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
25526 // VC layer stats
25527- atomic_inc(&vcc->stats->rx);
25528+ atomic_inc_unchecked(&vcc->stats->rx);
25529 __net_timestamp(skb);
25530 // end of our responsibility
25531 vcc->push (vcc, skb);
fe2de317 25532@@ -1187,7 +1187,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
15a11c5b
MT
25533 dev->tx_iovec = NULL;
25534
25535 // VC layer stats
25536- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25537+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25538
25539 // free the skb
25540 hrz_kfree_skb (skb);
fe2de317
MT
25541diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
25542index db06f34..dcebb61 100644
25543--- a/drivers/atm/idt77252.c
25544+++ b/drivers/atm/idt77252.c
25545@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
15a11c5b
MT
25546 else
25547 dev_kfree_skb(skb);
ae4e228f 25548
15a11c5b
MT
25549- atomic_inc(&vcc->stats->tx);
25550+ atomic_inc_unchecked(&vcc->stats->tx);
25551 }
ae4e228f 25552
15a11c5b 25553 atomic_dec(&scq->used);
fe2de317 25554@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
15a11c5b
MT
25555 if ((sb = dev_alloc_skb(64)) == NULL) {
25556 printk("%s: Can't allocate buffers for aal0.\n",
25557 card->name);
25558- atomic_add(i, &vcc->stats->rx_drop);
25559+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
25560 break;
25561 }
25562 if (!atm_charge(vcc, sb->truesize)) {
25563 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
25564 card->name);
25565- atomic_add(i - 1, &vcc->stats->rx_drop);
25566+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
25567 dev_kfree_skb(sb);
25568 break;
25569 }
fe2de317 25570@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
15a11c5b
MT
25571 ATM_SKB(sb)->vcc = vcc;
25572 __net_timestamp(sb);
25573 vcc->push(vcc, sb);
25574- atomic_inc(&vcc->stats->rx);
25575+ atomic_inc_unchecked(&vcc->stats->rx);
ae4e228f 25576
15a11c5b
MT
25577 cell += ATM_CELL_PAYLOAD;
25578 }
fe2de317 25579@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
15a11c5b
MT
25580 "(CDC: %08x)\n",
25581 card->name, len, rpp->len, readl(SAR_REG_CDC));
25582 recycle_rx_pool_skb(card, rpp);
25583- atomic_inc(&vcc->stats->rx_err);
25584+ atomic_inc_unchecked(&vcc->stats->rx_err);
25585 return;
25586 }
25587 if (stat & SAR_RSQE_CRC) {
25588 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
25589 recycle_rx_pool_skb(card, rpp);
25590- atomic_inc(&vcc->stats->rx_err);
25591+ atomic_inc_unchecked(&vcc->stats->rx_err);
25592 return;
25593 }
25594 if (skb_queue_len(&rpp->queue) > 1) {
fe2de317 25595@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
15a11c5b
MT
25596 RXPRINTK("%s: Can't alloc RX skb.\n",
25597 card->name);
25598 recycle_rx_pool_skb(card, rpp);
25599- atomic_inc(&vcc->stats->rx_err);
25600+ atomic_inc_unchecked(&vcc->stats->rx_err);
25601 return;
25602 }
25603 if (!atm_charge(vcc, skb->truesize)) {
fe2de317 25604@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
15a11c5b 25605 __net_timestamp(skb);
ae4e228f 25606
15a11c5b
MT
25607 vcc->push(vcc, skb);
25608- atomic_inc(&vcc->stats->rx);
25609+ atomic_inc_unchecked(&vcc->stats->rx);
ae4e228f 25610
15a11c5b
MT
25611 return;
25612 }
fe2de317 25613@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
15a11c5b 25614 __net_timestamp(skb);
ae4e228f 25615
15a11c5b
MT
25616 vcc->push(vcc, skb);
25617- atomic_inc(&vcc->stats->rx);
25618+ atomic_inc_unchecked(&vcc->stats->rx);
ae4e228f 25619
15a11c5b
MT
25620 if (skb->truesize > SAR_FB_SIZE_3)
25621 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
fe2de317 25622@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
15a11c5b
MT
25623 if (vcc->qos.aal != ATM_AAL0) {
25624 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
25625 card->name, vpi, vci);
25626- atomic_inc(&vcc->stats->rx_drop);
25627+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25628 goto drop;
25629 }
25630
25631 if ((sb = dev_alloc_skb(64)) == NULL) {
25632 printk("%s: Can't allocate buffers for AAL0.\n",
25633 card->name);
25634- atomic_inc(&vcc->stats->rx_err);
25635+ atomic_inc_unchecked(&vcc->stats->rx_err);
25636 goto drop;
25637 }
ae4e228f 25638
fe2de317 25639@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
15a11c5b
MT
25640 ATM_SKB(sb)->vcc = vcc;
25641 __net_timestamp(sb);
25642 vcc->push(vcc, sb);
25643- atomic_inc(&vcc->stats->rx);
25644+ atomic_inc_unchecked(&vcc->stats->rx);
ae4e228f 25645
15a11c5b
MT
25646 drop:
25647 skb_pull(queue, 64);
fe2de317 25648@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
58c5fc13 25649
15a11c5b
MT
25650 if (vc == NULL) {
25651 printk("%s: NULL connection in send().\n", card->name);
58c5fc13
MT
25652- atomic_inc(&vcc->stats->tx_err);
25653+ atomic_inc_unchecked(&vcc->stats->tx_err);
15a11c5b
MT
25654 dev_kfree_skb(skb);
25655 return -EINVAL;
58c5fc13 25656 }
15a11c5b
MT
25657 if (!test_bit(VCF_TX, &vc->flags)) {
25658 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
58c5fc13
MT
25659- atomic_inc(&vcc->stats->tx_err);
25660+ atomic_inc_unchecked(&vcc->stats->tx_err);
15a11c5b
MT
25661 dev_kfree_skb(skb);
25662 return -EINVAL;
58c5fc13 25663 }
fe2de317 25664@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
15a11c5b
MT
25665 break;
25666 default:
25667 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
58c5fc13
MT
25668- atomic_inc(&vcc->stats->tx_err);
25669+ atomic_inc_unchecked(&vcc->stats->tx_err);
15a11c5b
MT
25670 dev_kfree_skb(skb);
25671 return -EINVAL;
58c5fc13 25672 }
15a11c5b
MT
25673
25674 if (skb_shinfo(skb)->nr_frags != 0) {
25675 printk("%s: No scatter-gather yet.\n", card->name);
25676- atomic_inc(&vcc->stats->tx_err);
25677+ atomic_inc_unchecked(&vcc->stats->tx_err);
25678 dev_kfree_skb(skb);
25679 return -EINVAL;
58c5fc13 25680 }
fe2de317 25681@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
15a11c5b
MT
25682
25683 err = queue_skb(card, vc, skb, oam);
25684 if (err) {
25685- atomic_inc(&vcc->stats->tx_err);
25686+ atomic_inc_unchecked(&vcc->stats->tx_err);
25687 dev_kfree_skb(skb);
25688 return err;
58c5fc13 25689 }
fe2de317 25690@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
15a11c5b
MT
25691 skb = dev_alloc_skb(64);
25692 if (!skb) {
25693 printk("%s: Out of memory in send_oam().\n", card->name);
25694- atomic_inc(&vcc->stats->tx_err);
25695+ atomic_inc_unchecked(&vcc->stats->tx_err);
25696 return -ENOMEM;
58c5fc13
MT
25697 }
25698 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
fe2de317
MT
25699diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
25700index cb90f7a..bd33566 100644
25701--- a/drivers/atm/iphase.c
25702+++ b/drivers/atm/iphase.c
25703@@ -1121,7 +1121,7 @@ static int rx_pkt(struct atm_dev *dev)
58c5fc13
MT
25704 status = (u_short) (buf_desc_ptr->desc_mode);
25705 if (status & (RX_CER | RX_PTE | RX_OFL))
25706 {
25707- atomic_inc(&vcc->stats->rx_err);
25708+ atomic_inc_unchecked(&vcc->stats->rx_err);
25709 IF_ERR(printk("IA: bad packet, dropping it");)
25710 if (status & RX_CER) {
25711 IF_ERR(printk(" cause: packet CRC error\n");)
fe2de317 25712@@ -1144,7 +1144,7 @@ static int rx_pkt(struct atm_dev *dev)
58c5fc13
MT
25713 len = dma_addr - buf_addr;
25714 if (len > iadev->rx_buf_sz) {
25715 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
25716- atomic_inc(&vcc->stats->rx_err);
25717+ atomic_inc_unchecked(&vcc->stats->rx_err);
25718 goto out_free_desc;
25719 }
25720
fe2de317 25721@@ -1294,7 +1294,7 @@ static void rx_dle_intr(struct atm_dev *dev)
58c5fc13
MT
25722 ia_vcc = INPH_IA_VCC(vcc);
25723 if (ia_vcc == NULL)
25724 {
25725- atomic_inc(&vcc->stats->rx_err);
25726+ atomic_inc_unchecked(&vcc->stats->rx_err);
25727 dev_kfree_skb_any(skb);
25728 atm_return(vcc, atm_guess_pdu2truesize(len));
25729 goto INCR_DLE;
fe2de317 25730@@ -1306,7 +1306,7 @@ static void rx_dle_intr(struct atm_dev *dev)
58c5fc13
MT
25731 if ((length > iadev->rx_buf_sz) || (length >
25732 (skb->len - sizeof(struct cpcs_trailer))))
25733 {
25734- atomic_inc(&vcc->stats->rx_err);
25735+ atomic_inc_unchecked(&vcc->stats->rx_err);
25736 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
25737 length, skb->len);)
25738 dev_kfree_skb_any(skb);
fe2de317 25739@@ -1322,7 +1322,7 @@ static void rx_dle_intr(struct atm_dev *dev)
58c5fc13
MT
25740
25741 IF_RX(printk("rx_dle_intr: skb push");)
25742 vcc->push(vcc,skb);
25743- atomic_inc(&vcc->stats->rx);
25744+ atomic_inc_unchecked(&vcc->stats->rx);
25745 iadev->rx_pkt_cnt++;
25746 }
25747 INCR_DLE:
fe2de317 25748@@ -2802,15 +2802,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
58c5fc13
MT
25749 {
25750 struct k_sonet_stats *stats;
25751 stats = &PRIV(_ia_dev[board])->sonet_stats;
25752- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
25753- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
25754- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
25755- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
25756- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
25757- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
25758- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
25759- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
25760- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
25761+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
25762+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
25763+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
25764+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
25765+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
25766+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
25767+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
25768+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
25769+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
25770 }
25771 ia_cmds.status = 0;
25772 break;
fe2de317 25773@@ -2915,7 +2915,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
58c5fc13
MT
25774 if ((desc == 0) || (desc > iadev->num_tx_desc))
25775 {
25776 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
25777- atomic_inc(&vcc->stats->tx);
25778+ atomic_inc_unchecked(&vcc->stats->tx);
25779 if (vcc->pop)
25780 vcc->pop(vcc, skb);
25781 else
fe2de317 25782@@ -3020,14 +3020,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
58c5fc13
MT
25783 ATM_DESC(skb) = vcc->vci;
25784 skb_queue_tail(&iadev->tx_dma_q, skb);
25785
25786- atomic_inc(&vcc->stats->tx);
25787+ atomic_inc_unchecked(&vcc->stats->tx);
25788 iadev->tx_pkt_cnt++;
25789 /* Increment transaction counter */
25790 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
25791
25792 #if 0
25793 /* add flow control logic */
25794- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
25795+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
25796 if (iavcc->vc_desc_cnt > 10) {
25797 vcc->tx_quota = vcc->tx_quota * 3 / 4;
25798 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
fe2de317
MT
25799diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
25800index e828c54..ae83976 100644
25801--- a/drivers/atm/lanai.c
25802+++ b/drivers/atm/lanai.c
25803@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
58c5fc13
MT
25804 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
25805 lanai_endtx(lanai, lvcc);
25806 lanai_free_skb(lvcc->tx.atmvcc, skb);
25807- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
25808+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
25809 }
25810
25811 /* Try to fill the buffer - don't call unless there is backlog */
fe2de317 25812@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
58c5fc13
MT
25813 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
25814 __net_timestamp(skb);
25815 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
25816- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
25817+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
25818 out:
25819 lvcc->rx.buf.ptr = end;
25820 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
fe2de317 25821@@ -1668,7 +1668,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
58c5fc13
MT
25822 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
25823 "vcc %d\n", lanai->number, (unsigned int) s, vci);
25824 lanai->stats.service_rxnotaal5++;
25825- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25826+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25827 return 0;
25828 }
25829 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
fe2de317 25830@@ -1680,7 +1680,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
58c5fc13
MT
25831 int bytes;
25832 read_unlock(&vcc_sklist_lock);
25833 DPRINTK("got trashed rx pdu on vci %d\n", vci);
25834- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25835+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25836 lvcc->stats.x.aal5.service_trash++;
25837 bytes = (SERVICE_GET_END(s) * 16) -
25838 (((unsigned long) lvcc->rx.buf.ptr) -
fe2de317 25839@@ -1692,7 +1692,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
58c5fc13
MT
25840 }
25841 if (s & SERVICE_STREAM) {
25842 read_unlock(&vcc_sklist_lock);
25843- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25844+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25845 lvcc->stats.x.aal5.service_stream++;
25846 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
25847 "PDU on VCI %d!\n", lanai->number, vci);
fe2de317 25848@@ -1700,7 +1700,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
58c5fc13
MT
25849 return 0;
25850 }
25851 DPRINTK("got rx crc error on vci %d\n", vci);
25852- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25853+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25854 lvcc->stats.x.aal5.service_rxcrc++;
25855 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
25856 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
fe2de317
MT
25857diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
25858index 1c70c45..300718d 100644
25859--- a/drivers/atm/nicstar.c
25860+++ b/drivers/atm/nicstar.c
25861@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
6892158b
MT
25862 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
25863 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
25864 card->index);
25865- atomic_inc(&vcc->stats->tx_err);
25866+ atomic_inc_unchecked(&vcc->stats->tx_err);
25867 dev_kfree_skb_any(skb);
25868 return -EINVAL;
25869 }
fe2de317 25870@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
6892158b
MT
25871 if (!vc->tx) {
25872 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
25873 card->index);
25874- atomic_inc(&vcc->stats->tx_err);
25875+ atomic_inc_unchecked(&vcc->stats->tx_err);
25876 dev_kfree_skb_any(skb);
25877 return -EINVAL;
25878 }
fe2de317 25879@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
6892158b
MT
25880 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
25881 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
25882 card->index);
25883- atomic_inc(&vcc->stats->tx_err);
25884+ atomic_inc_unchecked(&vcc->stats->tx_err);
25885 dev_kfree_skb_any(skb);
25886 return -EINVAL;
25887 }
58c5fc13 25888
6892158b
MT
25889 if (skb_shinfo(skb)->nr_frags != 0) {
25890 printk("nicstar%d: No scatter-gather yet.\n", card->index);
25891- atomic_inc(&vcc->stats->tx_err);
25892+ atomic_inc_unchecked(&vcc->stats->tx_err);
25893 dev_kfree_skb_any(skb);
25894 return -EINVAL;
25895 }
fe2de317 25896@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
6892158b
MT
25897 }
25898
25899 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
25900- atomic_inc(&vcc->stats->tx_err);
25901+ atomic_inc_unchecked(&vcc->stats->tx_err);
25902 dev_kfree_skb_any(skb);
25903 return -EIO;
25904 }
25905- atomic_inc(&vcc->stats->tx);
25906+ atomic_inc_unchecked(&vcc->stats->tx);
25907
25908 return 0;
25909 }
fe2de317 25910@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
6892158b
MT
25911 printk
25912 ("nicstar%d: Can't allocate buffers for aal0.\n",
25913 card->index);
25914- atomic_add(i, &vcc->stats->rx_drop);
25915+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
25916 break;
25917 }
25918 if (!atm_charge(vcc, sb->truesize)) {
25919 RXPRINTK
25920 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
25921 card->index);
25922- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
25923+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
25924 dev_kfree_skb_any(sb);
25925 break;
25926 }
fe2de317 25927@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
6892158b
MT
25928 ATM_SKB(sb)->vcc = vcc;
25929 __net_timestamp(sb);
25930 vcc->push(vcc, sb);
25931- atomic_inc(&vcc->stats->rx);
25932+ atomic_inc_unchecked(&vcc->stats->rx);
25933 cell += ATM_CELL_PAYLOAD;
25934 }
25935
fe2de317 25936@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
6892158b
MT
25937 if (iovb == NULL) {
25938 printk("nicstar%d: Out of iovec buffers.\n",
25939 card->index);
25940- atomic_inc(&vcc->stats->rx_drop);
25941+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25942 recycle_rx_buf(card, skb);
25943 return;
25944 }
fe2de317 25945@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
6892158b
MT
25946 small or large buffer itself. */
25947 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
25948 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
25949- atomic_inc(&vcc->stats->rx_err);
25950+ atomic_inc_unchecked(&vcc->stats->rx_err);
25951 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
25952 NS_MAX_IOVECS);
25953 NS_PRV_IOVCNT(iovb) = 0;
fe2de317 25954@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
6892158b
MT
25955 ("nicstar%d: Expected a small buffer, and this is not one.\n",
25956 card->index);
25957 which_list(card, skb);
25958- atomic_inc(&vcc->stats->rx_err);
25959+ atomic_inc_unchecked(&vcc->stats->rx_err);
25960 recycle_rx_buf(card, skb);
25961 vc->rx_iov = NULL;
25962 recycle_iov_buf(card, iovb);
fe2de317 25963@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
6892158b
MT
25964 ("nicstar%d: Expected a large buffer, and this is not one.\n",
25965 card->index);
25966 which_list(card, skb);
25967- atomic_inc(&vcc->stats->rx_err);
25968+ atomic_inc_unchecked(&vcc->stats->rx_err);
25969 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
25970 NS_PRV_IOVCNT(iovb));
25971 vc->rx_iov = NULL;
fe2de317 25972@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
6892158b
MT
25973 printk(" - PDU size mismatch.\n");
25974 else
25975 printk(".\n");
25976- atomic_inc(&vcc->stats->rx_err);
25977+ atomic_inc_unchecked(&vcc->stats->rx_err);
25978 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
25979 NS_PRV_IOVCNT(iovb));
25980 vc->rx_iov = NULL;
fe2de317 25981@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
6892158b
MT
25982 /* skb points to a small buffer */
25983 if (!atm_charge(vcc, skb->truesize)) {
25984 push_rxbufs(card, skb);
25985- atomic_inc(&vcc->stats->rx_drop);
25986+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25987 } else {
25988 skb_put(skb, len);
25989 dequeue_sm_buf(card, skb);
fe2de317 25990@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
6892158b
MT
25991 ATM_SKB(skb)->vcc = vcc;
25992 __net_timestamp(skb);
25993 vcc->push(vcc, skb);
25994- atomic_inc(&vcc->stats->rx);
25995+ atomic_inc_unchecked(&vcc->stats->rx);
25996 }
25997 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
25998 struct sk_buff *sb;
fe2de317 25999@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
6892158b
MT
26000 if (len <= NS_SMBUFSIZE) {
26001 if (!atm_charge(vcc, sb->truesize)) {
26002 push_rxbufs(card, sb);
26003- atomic_inc(&vcc->stats->rx_drop);
26004+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26005 } else {
26006 skb_put(sb, len);
26007 dequeue_sm_buf(card, sb);
fe2de317 26008@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
6892158b
MT
26009 ATM_SKB(sb)->vcc = vcc;
26010 __net_timestamp(sb);
26011 vcc->push(vcc, sb);
26012- atomic_inc(&vcc->stats->rx);
26013+ atomic_inc_unchecked(&vcc->stats->rx);
26014 }
26015
26016 push_rxbufs(card, skb);
fe2de317 26017@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
6892158b
MT
26018
26019 if (!atm_charge(vcc, skb->truesize)) {
26020 push_rxbufs(card, skb);
26021- atomic_inc(&vcc->stats->rx_drop);
26022+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26023 } else {
26024 dequeue_lg_buf(card, skb);
26025 #ifdef NS_USE_DESTRUCTORS
fe2de317 26026@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
6892158b
MT
26027 ATM_SKB(skb)->vcc = vcc;
26028 __net_timestamp(skb);
26029 vcc->push(vcc, skb);
26030- atomic_inc(&vcc->stats->rx);
26031+ atomic_inc_unchecked(&vcc->stats->rx);
26032 }
26033
26034 push_rxbufs(card, sb);
fe2de317 26035@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
6892158b
MT
26036 printk
26037 ("nicstar%d: Out of huge buffers.\n",
26038 card->index);
26039- atomic_inc(&vcc->stats->rx_drop);
26040+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26041 recycle_iovec_rx_bufs(card,
26042 (struct iovec *)
26043 iovb->data,
fe2de317 26044@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
6892158b
MT
26045 card->hbpool.count++;
26046 } else
26047 dev_kfree_skb_any(hb);
26048- atomic_inc(&vcc->stats->rx_drop);
26049+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26050 } else {
26051 /* Copy the small buffer to the huge buffer */
26052 sb = (struct sk_buff *)iov->iov_base;
fe2de317 26053@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
58c5fc13 26054 #endif /* NS_USE_DESTRUCTORS */
6892158b
MT
26055 __net_timestamp(hb);
26056 vcc->push(vcc, hb);
26057- atomic_inc(&vcc->stats->rx);
26058+ atomic_inc_unchecked(&vcc->stats->rx);
26059 }
26060 }
58c5fc13 26061
fe2de317
MT
26062diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
26063index 5d1d076..4f31f42 100644
26064--- a/drivers/atm/solos-pci.c
26065+++ b/drivers/atm/solos-pci.c
15a11c5b 26066@@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
58c5fc13
MT
26067 }
26068 atm_charge(vcc, skb->truesize);
26069 vcc->push(vcc, skb);
26070- atomic_inc(&vcc->stats->rx);
26071+ atomic_inc_unchecked(&vcc->stats->rx);
26072 break;
26073
26074 case PKT_STATUS:
fe2de317 26075@@ -899,6 +899,8 @@ static int print_buffer(struct sk_buff *buf)
66a7e928
MT
26076 char msg[500];
26077 char item[10];
26078
26079+ pax_track_stack();
26080+
26081 len = buf->len;
26082 for (i = 0; i < len; i++){
26083 if(i % 8 == 0)
fe2de317 26084@@ -1008,7 +1010,7 @@ static uint32_t fpga_tx(struct solos_card *card)
58c5fc13
MT
26085 vcc = SKB_CB(oldskb)->vcc;
26086
26087 if (vcc) {
26088- atomic_inc(&vcc->stats->tx);
26089+ atomic_inc_unchecked(&vcc->stats->tx);
26090 solos_pop(vcc, oldskb);
26091 } else
26092 dev_kfree_skb_irq(oldskb);
fe2de317
MT
26093diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
26094index 90f1ccc..04c4a1e 100644
26095--- a/drivers/atm/suni.c
26096+++ b/drivers/atm/suni.c
df50ba0c 26097@@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
58c5fc13
MT
26098
26099
26100 #define ADD_LIMITED(s,v) \
26101- atomic_add((v),&stats->s); \
26102- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
26103+ atomic_add_unchecked((v),&stats->s); \
26104+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
26105
26106
26107 static void suni_hz(unsigned long from_timer)
fe2de317
MT
26108diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
26109index 5120a96..e2572bd 100644
26110--- a/drivers/atm/uPD98402.c
26111+++ b/drivers/atm/uPD98402.c
26112@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
58c5fc13
MT
26113 struct sonet_stats tmp;
26114 int error = 0;
26115
26116- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26117+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26118 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
26119 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
26120 if (zero && !error) {
fe2de317 26121@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
58c5fc13
MT
26122
26123
26124 #define ADD_LIMITED(s,v) \
26125- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
26126- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
26127- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26128+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
26129+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
26130+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26131
26132
26133 static void stat_event(struct atm_dev *dev)
fe2de317 26134@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
58c5fc13
MT
26135 if (reason & uPD98402_INT_PFM) stat_event(dev);
26136 if (reason & uPD98402_INT_PCO) {
26137 (void) GET(PCOCR); /* clear interrupt cause */
26138- atomic_add(GET(HECCT),
26139+ atomic_add_unchecked(GET(HECCT),
26140 &PRIV(dev)->sonet_stats.uncorr_hcs);
26141 }
26142 if ((reason & uPD98402_INT_RFO) &&
fe2de317 26143@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
58c5fc13
MT
26144 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
26145 uPD98402_INT_LOS),PIMR); /* enable them */
26146 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
26147- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26148- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
26149- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
26150+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26151+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
26152+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
26153 return 0;
26154 }
26155
fe2de317
MT
26156diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
26157index d889f56..17eb71e 100644
26158--- a/drivers/atm/zatm.c
26159+++ b/drivers/atm/zatm.c
26160@@ -460,7 +460,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
58c5fc13
MT
26161 }
26162 if (!size) {
26163 dev_kfree_skb_irq(skb);
26164- if (vcc) atomic_inc(&vcc->stats->rx_err);
26165+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
26166 continue;
26167 }
26168 if (!atm_charge(vcc,skb->truesize)) {
fe2de317 26169@@ -470,7 +470,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
58c5fc13
MT
26170 skb->len = size;
26171 ATM_SKB(skb)->vcc = vcc;
26172 vcc->push(vcc,skb);
26173- atomic_inc(&vcc->stats->rx);
26174+ atomic_inc_unchecked(&vcc->stats->rx);
26175 }
26176 zout(pos & 0xffff,MTA(mbx));
26177 #if 0 /* probably a stupid idea */
fe2de317 26178@@ -734,7 +734,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
58c5fc13
MT
26179 skb_queue_head(&zatm_vcc->backlog,skb);
26180 break;
26181 }
26182- atomic_inc(&vcc->stats->tx);
26183+ atomic_inc_unchecked(&vcc->stats->tx);
26184 wake_up(&zatm_vcc->tx_wait);
26185 }
26186
fe2de317
MT
26187diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
26188index a4760e0..51283cf 100644
26189--- a/drivers/base/devtmpfs.c
26190+++ b/drivers/base/devtmpfs.c
6e9df6a3
MT
26191@@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
26192 if (!thread)
26193 return 0;
26194
26195- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
26196+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
26197 if (err)
26198 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
26199 else
fe2de317
MT
26200diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
26201index 84f7c7d..37cfd87 100644
26202--- a/drivers/base/power/wakeup.c
26203+++ b/drivers/base/power/wakeup.c
66a7e928
MT
26204@@ -29,14 +29,14 @@ bool events_check_enabled;
26205 * They need to be modified together atomically, so it's better to use one
26206 * atomic variable to hold them both.
26207 */
26208-static atomic_t combined_event_count = ATOMIC_INIT(0);
26209+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
8308f9c9 26210
66a7e928
MT
26211 #define IN_PROGRESS_BITS (sizeof(int) * 4)
26212 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
26213
26214 static void split_counters(unsigned int *cnt, unsigned int *inpr)
26215 {
26216- unsigned int comb = atomic_read(&combined_event_count);
26217+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
26218
26219 *cnt = (comb >> IN_PROGRESS_BITS);
26220 *inpr = comb & MAX_IN_PROGRESS;
fe2de317 26221@@ -350,7 +350,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
66a7e928
MT
26222 ws->last_time = ktime_get();
26223
26224 /* Increment the counter of events in progress. */
26225- atomic_inc(&combined_event_count);
26226+ atomic_inc_unchecked(&combined_event_count);
8308f9c9
MT
26227 }
26228
66a7e928 26229 /**
fe2de317 26230@@ -440,7 +440,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
66a7e928
MT
26231 * Increment the counter of registered wakeup events and decrement the
26232 * couter of wakeup events in progress simultaneously.
26233 */
26234- atomic_add(MAX_IN_PROGRESS, &combined_event_count);
26235+ atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
26236 }
8308f9c9 26237
66a7e928 26238 /**
fe2de317
MT
26239diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
26240index e086fbb..398e1fe 100644
26241--- a/drivers/block/DAC960.c
26242+++ b/drivers/block/DAC960.c
26243@@ -1980,6 +1980,8 @@ static bool DAC960_V1_ReadDeviceConfiguration(DAC960_Controller_T
26244 unsigned long flags;
26245 int Channel, TargetID;
26246
26247+ pax_track_stack();
26248+
26249 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
26250 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
26251 sizeof(DAC960_SCSI_Inquiry_T) +
26252diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
26253index c2f9b3e..5911988 100644
26254--- a/drivers/block/cciss.c
26255+++ b/drivers/block/cciss.c
26256@@ -1179,6 +1179,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
bc901d79
MT
26257 int err;
26258 u32 cp;
26259
26260+ memset(&arg64, 0, sizeof(arg64));
26261+
26262 err = 0;
26263 err |=
26264 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
15a11c5b
MT
26265@@ -2986,7 +2988,7 @@ static void start_io(ctlr_info_t *h)
26266 while (!list_empty(&h->reqQ)) {
26267 c = list_entry(h->reqQ.next, CommandList_struct, list);
26268 /* can't do anything if fifo is full */
26269- if ((h->access.fifo_full(h))) {
26270+ if ((h->access->fifo_full(h))) {
26271 dev_warn(&h->pdev->dev, "fifo full\n");
26272 break;
26273 }
26274@@ -2996,7 +2998,7 @@ static void start_io(ctlr_info_t *h)
26275 h->Qdepth--;
66a7e928 26276
15a11c5b
MT
26277 /* Tell the controller execute command */
26278- h->access.submit_command(h, c);
26279+ h->access->submit_command(h, c);
66a7e928 26280
15a11c5b
MT
26281 /* Put job onto the completed Q */
26282 addQ(&h->cmpQ, c);
26283@@ -3422,17 +3424,17 @@ startio:
66a7e928 26284
15a11c5b
MT
26285 static inline unsigned long get_next_completion(ctlr_info_t *h)
26286 {
26287- return h->access.command_completed(h);
26288+ return h->access->command_completed(h);
26289 }
26290
26291 static inline int interrupt_pending(ctlr_info_t *h)
26292 {
26293- return h->access.intr_pending(h);
26294+ return h->access->intr_pending(h);
26295 }
26296
26297 static inline long interrupt_not_for_us(ctlr_info_t *h)
26298 {
26299- return ((h->access.intr_pending(h) == 0) ||
26300+ return ((h->access->intr_pending(h) == 0) ||
26301 (h->interrupts_enabled == 0));
26302 }
26303
fe2de317 26304@@ -3465,7 +3467,7 @@ static inline u32 next_command(ctlr_info_t *h)
15a11c5b
MT
26305 u32 a;
26306
26307 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
26308- return h->access.command_completed(h);
26309+ return h->access->command_completed(h);
26310
26311 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
26312 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
fe2de317 26313@@ -4020,7 +4022,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
15a11c5b
MT
26314 trans_support & CFGTBL_Trans_use_short_tags);
26315
26316 /* Change the access methods to the performant access methods */
26317- h->access = SA5_performant_access;
26318+ h->access = &SA5_performant_access;
26319 h->transMethod = CFGTBL_Trans_Performant;
26320
26321 return;
fe2de317 26322@@ -4292,7 +4294,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
15a11c5b
MT
26323 if (prod_index < 0)
26324 return -ENODEV;
26325 h->product_name = products[prod_index].product_name;
26326- h->access = *(products[prod_index].access);
26327+ h->access = products[prod_index].access;
26328
26329 if (cciss_board_disabled(h)) {
26330 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
6e9df6a3 26331@@ -5009,7 +5011,7 @@ reinit_after_soft_reset:
15a11c5b
MT
26332 }
26333
26334 /* make sure the board interrupts are off */
26335- h->access.set_intr_mask(h, CCISS_INTR_OFF);
26336+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
26337 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
26338 if (rc)
26339 goto clean2;
6e9df6a3 26340@@ -5061,7 +5063,7 @@ reinit_after_soft_reset:
15a11c5b
MT
26341 * fake ones to scoop up any residual completions.
26342 */
26343 spin_lock_irqsave(&h->lock, flags);
26344- h->access.set_intr_mask(h, CCISS_INTR_OFF);
26345+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
26346 spin_unlock_irqrestore(&h->lock, flags);
26347 free_irq(h->intr[PERF_MODE_INT], h);
26348 rc = cciss_request_irq(h, cciss_msix_discard_completions,
6e9df6a3 26349@@ -5081,9 +5083,9 @@ reinit_after_soft_reset:
15a11c5b
MT
26350 dev_info(&h->pdev->dev, "Board READY.\n");
26351 dev_info(&h->pdev->dev,
26352 "Waiting for stale completions to drain.\n");
26353- h->access.set_intr_mask(h, CCISS_INTR_ON);
26354+ h->access->set_intr_mask(h, CCISS_INTR_ON);
26355 msleep(10000);
26356- h->access.set_intr_mask(h, CCISS_INTR_OFF);
26357+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
26358
26359 rc = controller_reset_failed(h->cfgtable);
26360 if (rc)
6e9df6a3 26361@@ -5106,7 +5108,7 @@ reinit_after_soft_reset:
15a11c5b
MT
26362 cciss_scsi_setup(h);
26363
26364 /* Turn the interrupts on so we can service requests */
26365- h->access.set_intr_mask(h, CCISS_INTR_ON);
26366+ h->access->set_intr_mask(h, CCISS_INTR_ON);
26367
26368 /* Get the firmware version */
26369 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
fe2de317 26370@@ -5178,7 +5180,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
15a11c5b
MT
26371 kfree(flush_buf);
26372 if (return_code != IO_OK)
26373 dev_warn(&h->pdev->dev, "Error flushing cache\n");
26374- h->access.set_intr_mask(h, CCISS_INTR_OFF);
26375+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
26376 free_irq(h->intr[PERF_MODE_INT], h);
26377 }
26378
fe2de317
MT
26379diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
26380index c049548..a09cb6e 100644
26381--- a/drivers/block/cciss.h
26382+++ b/drivers/block/cciss.h
15a11c5b
MT
26383@@ -100,7 +100,7 @@ struct ctlr_info
26384 /* information about each logical volume */
26385 drive_info_struct *drv[CISS_MAX_LUN];
26386
26387- struct access_method access;
26388+ struct access_method *access;
26389
26390 /* queue and queue Info */
26391 struct list_head reqQ;
fe2de317
MT
26392diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
26393index b2fceb5..87fec83 100644
26394--- a/drivers/block/cpqarray.c
26395+++ b/drivers/block/cpqarray.c
26396@@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
15a11c5b
MT
26397 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
26398 goto Enomem4;
26399 }
26400- hba[i]->access.set_intr_mask(hba[i], 0);
26401+ hba[i]->access->set_intr_mask(hba[i], 0);
26402 if (request_irq(hba[i]->intr, do_ida_intr,
26403 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
26404 {
fe2de317 26405@@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
15a11c5b
MT
26406 add_timer(&hba[i]->timer);
26407
26408 /* Enable IRQ now that spinlock and rate limit timer are set up */
26409- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
26410+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
26411
26412 for(j=0; j<NWD; j++) {
26413 struct gendisk *disk = ida_gendisk[i][j];
26414@@ -694,7 +694,7 @@ DBGINFO(
26415 for(i=0; i<NR_PRODUCTS; i++) {
26416 if (board_id == products[i].board_id) {
26417 c->product_name = products[i].product_name;
26418- c->access = *(products[i].access);
26419+ c->access = products[i].access;
26420 break;
26421 }
26422 }
fe2de317 26423@@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
15a11c5b
MT
26424 hba[ctlr]->intr = intr;
26425 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
26426 hba[ctlr]->product_name = products[j].product_name;
26427- hba[ctlr]->access = *(products[j].access);
26428+ hba[ctlr]->access = products[j].access;
26429 hba[ctlr]->ctlr = ctlr;
26430 hba[ctlr]->board_id = board_id;
26431 hba[ctlr]->pci_dev = NULL; /* not PCI */
fe2de317 26432@@ -911,6 +911,8 @@ static void do_ida_request(struct request_queue *q)
66a7e928
MT
26433 struct scatterlist tmp_sg[SG_MAX];
26434 int i, dir, seg;
26435
26436+ pax_track_stack();
26437+
26438 queue_next:
26439 creq = blk_peek_request(q);
26440 if (!creq)
15a11c5b
MT
26441@@ -980,7 +982,7 @@ static void start_io(ctlr_info_t *h)
26442
26443 while((c = h->reqQ) != NULL) {
26444 /* Can't do anything if we're busy */
26445- if (h->access.fifo_full(h) == 0)
26446+ if (h->access->fifo_full(h) == 0)
26447 return;
66a7e928 26448
15a11c5b
MT
26449 /* Get the first entry from the request Q */
26450@@ -988,7 +990,7 @@ static void start_io(ctlr_info_t *h)
26451 h->Qdepth--;
26452
26453 /* Tell the controller to do our bidding */
26454- h->access.submit_command(h, c);
26455+ h->access->submit_command(h, c);
26456
26457 /* Get onto the completion Q */
26458 addQ(&h->cmpQ, c);
fe2de317 26459@@ -1050,7 +1052,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
15a11c5b
MT
26460 unsigned long flags;
26461 __u32 a,a1;
26462
26463- istat = h->access.intr_pending(h);
26464+ istat = h->access->intr_pending(h);
26465 /* Is this interrupt for us? */
26466 if (istat == 0)
26467 return IRQ_NONE;
fe2de317 26468@@ -1061,7 +1063,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
15a11c5b
MT
26469 */
26470 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
26471 if (istat & FIFO_NOT_EMPTY) {
26472- while((a = h->access.command_completed(h))) {
26473+ while((a = h->access->command_completed(h))) {
26474 a1 = a; a &= ~3;
26475 if ((c = h->cmpQ) == NULL)
26476 {
26477@@ -1449,11 +1451,11 @@ static int sendcmd(
26478 /*
26479 * Disable interrupt
26480 */
26481- info_p->access.set_intr_mask(info_p, 0);
26482+ info_p->access->set_intr_mask(info_p, 0);
26483 /* Make sure there is room in the command FIFO */
26484 /* Actually it should be completely empty at this time. */
26485 for (i = 200000; i > 0; i--) {
26486- temp = info_p->access.fifo_full(info_p);
26487+ temp = info_p->access->fifo_full(info_p);
26488 if (temp != 0) {
26489 break;
26490 }
26491@@ -1466,7 +1468,7 @@ DBG(
26492 /*
26493 * Send the cmd
26494 */
26495- info_p->access.submit_command(info_p, c);
26496+ info_p->access->submit_command(info_p, c);
26497 complete = pollcomplete(ctlr);
26498
26499 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
fe2de317 26500@@ -1549,9 +1551,9 @@ static int revalidate_allvol(ctlr_info_t *host)
15a11c5b
MT
26501 * we check the new geometry. Then turn interrupts back on when
26502 * we're done.
26503 */
26504- host->access.set_intr_mask(host, 0);
26505+ host->access->set_intr_mask(host, 0);
26506 getgeometry(ctlr);
26507- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
26508+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
26509
26510 for(i=0; i<NWD; i++) {
26511 struct gendisk *disk = ida_gendisk[ctlr][i];
26512@@ -1591,7 +1593,7 @@ static int pollcomplete(int ctlr)
26513 /* Wait (up to 2 seconds) for a command to complete */
26514
26515 for (i = 200000; i > 0; i--) {
26516- done = hba[ctlr]->access.command_completed(hba[ctlr]);
26517+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
26518 if (done == 0) {
26519 udelay(10); /* a short fixed delay */
26520 } else
fe2de317
MT
26521diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
26522index be73e9d..7fbf140 100644
26523--- a/drivers/block/cpqarray.h
26524+++ b/drivers/block/cpqarray.h
15a11c5b
MT
26525@@ -99,7 +99,7 @@ struct ctlr_info {
26526 drv_info_t drv[NWD];
26527 struct proc_dir_entry *proc;
26528
26529- struct access_method access;
26530+ struct access_method *access;
26531
26532 cmdlist_t *reqQ;
26533 cmdlist_t *cmpQ;
fe2de317
MT
26534diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
26535index ef2ceed..c9cb18e 100644
26536--- a/drivers/block/drbd/drbd_int.h
26537+++ b/drivers/block/drbd/drbd_int.h
15a11c5b 26538@@ -737,7 +737,7 @@ struct drbd_request;
8308f9c9
MT
26539 struct drbd_epoch {
26540 struct list_head list;
26541 unsigned int barrier_nr;
26542- atomic_t epoch_size; /* increased on every request added. */
26543+ atomic_unchecked_t epoch_size; /* increased on every request added. */
26544 atomic_t active; /* increased on every req. added, and dec on every finished. */
26545 unsigned long flags;
26546 };
15a11c5b 26547@@ -1109,7 +1109,7 @@ struct drbd_conf {
8308f9c9
MT
26548 void *int_dig_in;
26549 void *int_dig_vv;
26550 wait_queue_head_t seq_wait;
26551- atomic_t packet_seq;
26552+ atomic_unchecked_t packet_seq;
26553 unsigned int peer_seq;
26554 spinlock_t peer_seq_lock;
26555 unsigned int minor;
fe2de317 26556@@ -1618,30 +1618,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
6e9df6a3
MT
26557
26558 static inline void drbd_tcp_cork(struct socket *sock)
26559 {
26560- int __user val = 1;
26561+ int val = 1;
26562 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
26563- (char __user *)&val, sizeof(val));
26564+ (char __force_user *)&val, sizeof(val));
26565 }
26566
26567 static inline void drbd_tcp_uncork(struct socket *sock)
26568 {
26569- int __user val = 0;
26570+ int val = 0;
26571 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
26572- (char __user *)&val, sizeof(val));
26573+ (char __force_user *)&val, sizeof(val));
26574 }
26575
26576 static inline void drbd_tcp_nodelay(struct socket *sock)
26577 {
26578- int __user val = 1;
26579+ int val = 1;
26580 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
26581- (char __user *)&val, sizeof(val));
26582+ (char __force_user *)&val, sizeof(val));
26583 }
26584
26585 static inline void drbd_tcp_quickack(struct socket *sock)
26586 {
26587- int __user val = 2;
26588+ int val = 2;
26589 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
26590- (char __user *)&val, sizeof(val));
26591+ (char __force_user *)&val, sizeof(val));
26592 }
26593
26594 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
fe2de317
MT
26595diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
26596index 0358e55..bc33689 100644
26597--- a/drivers/block/drbd/drbd_main.c
26598+++ b/drivers/block/drbd/drbd_main.c
26599@@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
8308f9c9
MT
26600 p.sector = sector;
26601 p.block_id = block_id;
26602 p.blksize = blksize;
26603- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
26604+ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
26605
26606 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
66a7e928 26607 return false;
fe2de317 26608@@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
8308f9c9
MT
26609 p.sector = cpu_to_be64(req->sector);
26610 p.block_id = (unsigned long)req;
26611 p.seq_num = cpu_to_be32(req->seq_num =
26612- atomic_add_return(1, &mdev->packet_seq));
26613+ atomic_add_return_unchecked(1, &mdev->packet_seq));
26614
26615 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
26616
fe2de317 26617@@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
8308f9c9
MT
26618 atomic_set(&mdev->unacked_cnt, 0);
26619 atomic_set(&mdev->local_cnt, 0);
26620 atomic_set(&mdev->net_cnt, 0);
26621- atomic_set(&mdev->packet_seq, 0);
26622+ atomic_set_unchecked(&mdev->packet_seq, 0);
26623 atomic_set(&mdev->pp_in_use, 0);
26624 atomic_set(&mdev->pp_in_use_by_net, 0);
26625 atomic_set(&mdev->rs_sect_in, 0);
fe2de317 26626@@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
8308f9c9
MT
26627 mdev->receiver.t_state);
26628
26629 /* no need to lock it, I'm the only thread alive */
26630- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
26631- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
26632+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
26633+ dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
26634 mdev->al_writ_cnt =
26635 mdev->bm_writ_cnt =
26636 mdev->read_cnt =
fe2de317
MT
26637diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
26638index 0feab26..5d9b3dd 100644
26639--- a/drivers/block/drbd/drbd_nl.c
26640+++ b/drivers/block/drbd/drbd_nl.c
26641@@ -2359,7 +2359,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
8308f9c9
MT
26642 module_put(THIS_MODULE);
26643 }
26644
26645-static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
26646+static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
26647
26648 static unsigned short *
26649 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
fe2de317 26650@@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
8308f9c9
MT
26651 cn_reply->id.idx = CN_IDX_DRBD;
26652 cn_reply->id.val = CN_VAL_DRBD;
26653
26654- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26655+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26656 cn_reply->ack = 0; /* not used here. */
26657 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26658 (int)((char *)tl - (char *)reply->tag_list);
fe2de317 26659@@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
8308f9c9
MT
26660 cn_reply->id.idx = CN_IDX_DRBD;
26661 cn_reply->id.val = CN_VAL_DRBD;
26662
26663- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26664+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26665 cn_reply->ack = 0; /* not used here. */
26666 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26667 (int)((char *)tl - (char *)reply->tag_list);
fe2de317 26668@@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
8308f9c9
MT
26669 cn_reply->id.idx = CN_IDX_DRBD;
26670 cn_reply->id.val = CN_VAL_DRBD;
26671
26672- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
26673+ cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
26674 cn_reply->ack = 0; // not used here.
26675 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26676 (int)((char*)tl - (char*)reply->tag_list);
fe2de317 26677@@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
8308f9c9
MT
26678 cn_reply->id.idx = CN_IDX_DRBD;
26679 cn_reply->id.val = CN_VAL_DRBD;
26680
26681- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26682+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26683 cn_reply->ack = 0; /* not used here. */
26684 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26685 (int)((char *)tl - (char *)reply->tag_list);
fe2de317
MT
26686diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
26687index 43beaca..4a5b1dd 100644
26688--- a/drivers/block/drbd/drbd_receiver.c
26689+++ b/drivers/block/drbd/drbd_receiver.c
66a7e928 26690@@ -894,7 +894,7 @@ retry:
8308f9c9
MT
26691 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
26692 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
26693
26694- atomic_set(&mdev->packet_seq, 0);
26695+ atomic_set_unchecked(&mdev->packet_seq, 0);
26696 mdev->peer_seq = 0;
26697
26698 drbd_thread_start(&mdev->asender);
fe2de317 26699@@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
8308f9c9
MT
26700 do {
26701 next_epoch = NULL;
26702
26703- epoch_size = atomic_read(&epoch->epoch_size);
26704+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
26705
26706 switch (ev & ~EV_CLEANUP) {
26707 case EV_PUT:
fe2de317 26708@@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
8308f9c9
MT
26709 rv = FE_DESTROYED;
26710 } else {
26711 epoch->flags = 0;
26712- atomic_set(&epoch->epoch_size, 0);
26713+ atomic_set_unchecked(&epoch->epoch_size, 0);
26714 /* atomic_set(&epoch->active, 0); is already zero */
26715 if (rv == FE_STILL_LIVE)
26716 rv = FE_RECYCLED;
fe2de317 26717@@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
8308f9c9
MT
26718 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
26719 drbd_flush(mdev);
26720
26721- if (atomic_read(&mdev->current_epoch->epoch_size)) {
26722+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
26723 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
26724 if (epoch)
26725 break;
26726 }
26727
26728 epoch = mdev->current_epoch;
26729- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
26730+ wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
26731
26732 D_ASSERT(atomic_read(&epoch->active) == 0);
26733 D_ASSERT(epoch->flags == 0);
fe2de317 26734@@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
8308f9c9
MT
26735 }
26736
26737 epoch->flags = 0;
26738- atomic_set(&epoch->epoch_size, 0);
26739+ atomic_set_unchecked(&epoch->epoch_size, 0);
26740 atomic_set(&epoch->active, 0);
26741
26742 spin_lock(&mdev->epoch_lock);
26743- if (atomic_read(&mdev->current_epoch->epoch_size)) {
26744+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
26745 list_add(&epoch->list, &mdev->current_epoch->list);
26746 mdev->current_epoch = epoch;
26747 mdev->epochs++;
fe2de317 26748@@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
8308f9c9
MT
26749 spin_unlock(&mdev->peer_seq_lock);
26750
26751 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
26752- atomic_inc(&mdev->current_epoch->epoch_size);
26753+ atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
26754 return drbd_drain_block(mdev, data_size);
26755 }
26756
fe2de317 26757@@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
8308f9c9
MT
26758
26759 spin_lock(&mdev->epoch_lock);
26760 e->epoch = mdev->current_epoch;
26761- atomic_inc(&e->epoch->epoch_size);
26762+ atomic_inc_unchecked(&e->epoch->epoch_size);
26763 atomic_inc(&e->epoch->active);
26764 spin_unlock(&mdev->epoch_lock);
26765
fe2de317 26766@@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
8308f9c9
MT
26767 D_ASSERT(list_empty(&mdev->done_ee));
26768
26769 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
26770- atomic_set(&mdev->current_epoch->epoch_size, 0);
26771+ atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
26772 D_ASSERT(list_empty(&mdev->current_epoch->list));
26773 }
26774
fe2de317
MT
26775diff --git a/drivers/block/loop.c b/drivers/block/loop.c
26776index 4720c7a..2c49af1 100644
26777--- a/drivers/block/loop.c
26778+++ b/drivers/block/loop.c
26779@@ -283,7 +283,7 @@ static int __do_lo_send_write(struct file *file,
6e9df6a3
MT
26780 mm_segment_t old_fs = get_fs();
26781
26782 set_fs(get_ds());
26783- bw = file->f_op->write(file, buf, len, &pos);
26784+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
26785 set_fs(old_fs);
26786 if (likely(bw == len))
26787 return 0;
fe2de317
MT
26788diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
26789index f533f33..6177bcb 100644
26790--- a/drivers/block/nbd.c
26791+++ b/drivers/block/nbd.c
26792@@ -157,6 +157,8 @@ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size,
66a7e928
MT
26793 struct kvec iov;
26794 sigset_t blocked, oldset;
26795
26796+ pax_track_stack();
26797+
26798 if (unlikely(!sock)) {
26799 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
26800 lo->disk->disk_name, (send ? "send" : "recv"));
fe2de317 26801@@ -572,6 +574,8 @@ static void do_nbd_request(struct request_queue *q)
66a7e928
MT
26802 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
26803 unsigned int cmd, unsigned long arg)
26804 {
26805+ pax_track_stack();
26806+
26807 switch (cmd) {
26808 case NBD_DISCONNECT: {
26809 struct request sreq;
fe2de317
MT
26810diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
26811index 423fd56..06d3be0 100644
26812--- a/drivers/char/Kconfig
26813+++ b/drivers/char/Kconfig
26814@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
26815
26816 config DEVKMEM
26817 bool "/dev/kmem virtual device support"
26818- default y
26819+ default n
26820+ depends on !GRKERNSEC_KMEM
26821 help
26822 Say Y here if you want to support the /dev/kmem device. The
26823 /dev/kmem device is rarely used, but can be used for certain
26824@@ -596,6 +597,7 @@ config DEVPORT
26825 bool
26826 depends on !M68K
26827 depends on ISA || PCI
26828+ depends on !GRKERNSEC_KMEM
26829 default y
26830
26831 source "drivers/s390/char/Kconfig"
26832diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
26833index 2e04433..22afc64 100644
26834--- a/drivers/char/agp/frontend.c
26835+++ b/drivers/char/agp/frontend.c
26836@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
58c5fc13
MT
26837 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
26838 return -EFAULT;
26839
26840- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
26841+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
26842 return -EFAULT;
26843
26844 client = agp_find_client_by_pid(reserve.pid);
fe2de317
MT
26845diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
26846index 095ab90..afad0a4 100644
26847--- a/drivers/char/briq_panel.c
26848+++ b/drivers/char/briq_panel.c
71d190be
MT
26849@@ -9,6 +9,7 @@
26850 #include <linux/types.h>
26851 #include <linux/errno.h>
26852 #include <linux/tty.h>
26853+#include <linux/mutex.h>
26854 #include <linux/timer.h>
26855 #include <linux/kernel.h>
26856 #include <linux/wait.h>
26857@@ -34,6 +35,7 @@ static int vfd_is_open;
26858 static unsigned char vfd[40];
26859 static int vfd_cursor;
26860 static unsigned char ledpb, led;
26861+static DEFINE_MUTEX(vfd_mutex);
26862
26863 static void update_vfd(void)
26864 {
fe2de317 26865@@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
71d190be
MT
26866 if (!vfd_is_open)
26867 return -EBUSY;
26868
26869+ mutex_lock(&vfd_mutex);
26870 for (;;) {
26871 char c;
26872 if (!indx)
26873 break;
26874- if (get_user(c, buf))
26875+ if (get_user(c, buf)) {
26876+ mutex_unlock(&vfd_mutex);
26877 return -EFAULT;
26878+ }
26879 if (esc) {
26880 set_led(c);
26881 esc = 0;
fe2de317 26882@@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
71d190be
MT
26883 buf++;
26884 }
26885 update_vfd();
26886+ mutex_unlock(&vfd_mutex);
26887
26888 return len;
26889 }
fe2de317
MT
26890diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
26891index f773a9d..65cd683 100644
26892--- a/drivers/char/genrtc.c
26893+++ b/drivers/char/genrtc.c
26894@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
71d190be
MT
26895 switch (cmd) {
26896
26897 case RTC_PLL_GET:
26898+ memset(&pll, 0, sizeof(pll));
26899 if (get_rtc_pll(&pll))
26900 return -EINVAL;
26901 else
fe2de317
MT
26902diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
26903index 0833896..cccce52 100644
26904--- a/drivers/char/hpet.c
26905+++ b/drivers/char/hpet.c
26906@@ -572,7 +572,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
ae4e228f
MT
26907 }
26908
df50ba0c 26909 static int
bc901d79
MT
26910-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
26911+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
26912 struct hpet_info *info)
ae4e228f 26913 {
df50ba0c 26914 struct hpet_timer __iomem *timer;
fe2de317
MT
26915diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
26916index 58c0e63..25aed94 100644
26917--- a/drivers/char/ipmi/ipmi_msghandler.c
26918+++ b/drivers/char/ipmi/ipmi_msghandler.c
15a11c5b 26919@@ -415,7 +415,7 @@ struct ipmi_smi {
58c5fc13
MT
26920 struct proc_dir_entry *proc_dir;
26921 char proc_dir_name[10];
26922
26923- atomic_t stats[IPMI_NUM_STATS];
26924+ atomic_unchecked_t stats[IPMI_NUM_STATS];
26925
26926 /*
26927 * run_to_completion duplicate of smb_info, smi_info
15a11c5b 26928@@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
58c5fc13
MT
26929
26930
26931 #define ipmi_inc_stat(intf, stat) \
26932- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
26933+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
26934 #define ipmi_get_stat(intf, stat) \
26935- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
26936+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
26937
26938 static int is_lan_addr(struct ipmi_addr *addr)
26939 {
fe2de317 26940@@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
58c5fc13
MT
26941 INIT_LIST_HEAD(&intf->cmd_rcvrs);
26942 init_waitqueue_head(&intf->waitq);
26943 for (i = 0; i < IPMI_NUM_STATS; i++)
26944- atomic_set(&intf->stats[i], 0);
26945+ atomic_set_unchecked(&intf->stats[i], 0);
26946
26947 intf->proc_dir = NULL;
26948
15a11c5b 26949@@ -4220,6 +4220,8 @@ static void send_panic_events(char *str)
66a7e928
MT
26950 struct ipmi_smi_msg smi_msg;
26951 struct ipmi_recv_msg recv_msg;
26952
26953+ pax_track_stack();
26954+
26955 si = (struct ipmi_system_interface_addr *) &addr;
26956 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
26957 si->channel = IPMI_BMC_CHANNEL;
fe2de317
MT
26958diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
26959index 9397ab4..d01bee1 100644
26960--- a/drivers/char/ipmi/ipmi_si_intf.c
26961+++ b/drivers/char/ipmi/ipmi_si_intf.c
15a11c5b 26962@@ -277,7 +277,7 @@ struct smi_info {
58c5fc13
MT
26963 unsigned char slave_addr;
26964
26965 /* Counters and things for the proc filesystem. */
26966- atomic_t stats[SI_NUM_STATS];
26967+ atomic_unchecked_t stats[SI_NUM_STATS];
26968
26969 struct task_struct *thread;
26970
15a11c5b 26971@@ -286,9 +286,9 @@ struct smi_info {
58c5fc13
MT
26972 };
26973
26974 #define smi_inc_stat(smi, stat) \
26975- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
26976+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
26977 #define smi_get_stat(smi, stat) \
26978- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
26979+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
26980
26981 #define SI_MAX_PARMS 4
26982
fe2de317 26983@@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info *new_smi)
58c5fc13
MT
26984 atomic_set(&new_smi->req_events, 0);
26985 new_smi->run_to_completion = 0;
26986 for (i = 0; i < SI_NUM_STATS; i++)
26987- atomic_set(&new_smi->stats[i], 0);
26988+ atomic_set_unchecked(&new_smi->stats[i], 0);
26989
57199397 26990 new_smi->interrupt_disabled = 1;
58c5fc13 26991 atomic_set(&new_smi->stop_operation, 0);
fe2de317
MT
26992diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
26993index 1aeaaba..e018570 100644
26994--- a/drivers/char/mbcs.c
26995+++ b/drivers/char/mbcs.c
26996@@ -800,7 +800,7 @@ static int mbcs_remove(struct cx_dev *dev)
6e9df6a3
MT
26997 return 0;
26998 }
26999
27000-static const struct cx_device_id __devinitdata mbcs_id_table[] = {
27001+static const struct cx_device_id __devinitconst mbcs_id_table[] = {
27002 {
27003 .part_num = MBCS_PART_NUM,
27004 .mfg_num = MBCS_MFG_NUM,
fe2de317
MT
27005diff --git a/drivers/char/mem.c b/drivers/char/mem.c
27006index 8fc04b4..cebdeec 100644
27007--- a/drivers/char/mem.c
27008+++ b/drivers/char/mem.c
58c5fc13
MT
27009@@ -18,6 +18,7 @@
27010 #include <linux/raw.h>
27011 #include <linux/tty.h>
27012 #include <linux/capability.h>
27013+#include <linux/security.h>
27014 #include <linux/ptrace.h>
27015 #include <linux/device.h>
27016 #include <linux/highmem.h>
ae4e228f 27017@@ -34,6 +35,10 @@
58c5fc13
MT
27018 # include <linux/efi.h>
27019 #endif
27020
27021+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
6e9df6a3 27022+extern const struct file_operations grsec_fops;
58c5fc13
MT
27023+#endif
27024+
ae4e228f
MT
27025 static inline unsigned long size_inside_page(unsigned long start,
27026 unsigned long size)
27027 {
fe2de317 27028@@ -65,9 +70,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
71d190be
MT
27029
27030 while (cursor < to) {
27031 if (!devmem_is_allowed(pfn)) {
27032+#ifdef CONFIG_GRKERNSEC_KMEM
27033+ gr_handle_mem_readwrite(from, to);
27034+#else
27035 printk(KERN_INFO
27036 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
27037 current->comm, from, to);
27038+#endif
27039 return 0;
27040 }
27041 cursor += PAGE_SIZE;
fe2de317 27042@@ -75,6 +84,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
71d190be
MT
27043 }
27044 return 1;
27045 }
27046+#elif defined(CONFIG_GRKERNSEC_KMEM)
27047+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27048+{
27049+ return 0;
27050+}
27051 #else
27052 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27053 {
fe2de317 27054@@ -117,6 +131,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
57199397
MT
27055
27056 while (count > 0) {
27057 unsigned long remaining;
27058+ char *temp;
27059
27060 sz = size_inside_page(p, count);
27061
fe2de317 27062@@ -132,7 +147,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
57199397
MT
27063 if (!ptr)
27064 return -EFAULT;
27065
27066- remaining = copy_to_user(buf, ptr, sz);
27067+#ifdef CONFIG_PAX_USERCOPY
27068+ temp = kmalloc(sz, GFP_KERNEL);
27069+ if (!temp) {
27070+ unxlate_dev_mem_ptr(p, ptr);
27071+ return -ENOMEM;
27072+ }
27073+ memcpy(temp, ptr, sz);
27074+#else
27075+ temp = ptr;
27076+#endif
27077+
27078+ remaining = copy_to_user(buf, temp, sz);
27079+
27080+#ifdef CONFIG_PAX_USERCOPY
27081+ kfree(temp);
27082+#endif
27083+
27084 unxlate_dev_mem_ptr(p, ptr);
27085 if (remaining)
27086 return -EFAULT;
fe2de317 27087@@ -395,9 +426,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
57199397
MT
27088 size_t count, loff_t *ppos)
27089 {
27090 unsigned long p = *ppos;
27091- ssize_t low_count, read, sz;
27092+ ssize_t low_count, read, sz, err = 0;
27093 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
27094- int err = 0;
27095
27096 read = 0;
27097 if (p < (unsigned long) high_memory) {
fe2de317 27098@@ -419,6 +449,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
57199397
MT
27099 }
27100 #endif
27101 while (low_count > 0) {
27102+ char *temp;
27103+
27104 sz = size_inside_page(p, low_count);
27105
27106 /*
fe2de317 27107@@ -428,7 +460,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
57199397
MT
27108 */
27109 kbuf = xlate_dev_kmem_ptr((char *)p);
27110
27111- if (copy_to_user(buf, kbuf, sz))
27112+#ifdef CONFIG_PAX_USERCOPY
27113+ temp = kmalloc(sz, GFP_KERNEL);
27114+ if (!temp)
27115+ return -ENOMEM;
27116+ memcpy(temp, kbuf, sz);
27117+#else
27118+ temp = kbuf;
27119+#endif
27120+
27121+ err = copy_to_user(buf, temp, sz);
27122+
27123+#ifdef CONFIG_PAX_USERCOPY
27124+ kfree(temp);
27125+#endif
27126+
27127+ if (err)
27128 return -EFAULT;
27129 buf += sz;
27130 p += sz;
15a11c5b 27131@@ -866,6 +913,9 @@ static const struct memdev {
58c5fc13 27132 #ifdef CONFIG_CRASH_DUMP
ae4e228f 27133 [12] = { "oldmem", 0, &oldmem_fops, NULL },
58c5fc13
MT
27134 #endif
27135+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
ae4e228f 27136+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
58c5fc13
MT
27137+#endif
27138 };
27139
27140 static int memory_open(struct inode *inode, struct file *filp)
fe2de317
MT
27141diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
27142index da3cfee..a5a6606 100644
27143--- a/drivers/char/nvram.c
27144+++ b/drivers/char/nvram.c
27145@@ -248,7 +248,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
ae4e228f
MT
27146
27147 spin_unlock_irq(&rtc_lock);
58c5fc13 27148
ae4e228f
MT
27149- if (copy_to_user(buf, contents, tmp - contents))
27150+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
27151 return -EFAULT;
27152
27153 *ppos = i;
fe2de317
MT
27154diff --git a/drivers/char/random.c b/drivers/char/random.c
27155index c35a785..6d82202 100644
27156--- a/drivers/char/random.c
27157+++ b/drivers/char/random.c
66a7e928 27158@@ -261,8 +261,13 @@
58c5fc13
MT
27159 /*
27160 * Configuration information
27161 */
27162+#ifdef CONFIG_GRKERNSEC_RANDNET
27163+#define INPUT_POOL_WORDS 512
27164+#define OUTPUT_POOL_WORDS 128
27165+#else
27166 #define INPUT_POOL_WORDS 128
27167 #define OUTPUT_POOL_WORDS 32
27168+#endif
27169 #define SEC_XFER_SIZE 512
57199397 27170 #define EXTRACT_SIZE 10
58c5fc13 27171
66a7e928 27172@@ -300,10 +305,17 @@ static struct poolinfo {
58c5fc13
MT
27173 int poolwords;
27174 int tap1, tap2, tap3, tap4, tap5;
27175 } poolinfo_table[] = {
27176+#ifdef CONFIG_GRKERNSEC_RANDNET
27177+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
27178+ { 512, 411, 308, 208, 104, 1 },
27179+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
27180+ { 128, 103, 76, 51, 25, 1 },
27181+#else
27182 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
27183 { 128, 103, 76, 51, 25, 1 },
27184 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
27185 { 32, 26, 20, 14, 7, 1 },
27186+#endif
27187 #if 0
27188 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
27189 { 2048, 1638, 1231, 819, 411, 1 },
fe2de317 27190@@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
ae4e228f
MT
27191
27192 extract_buf(r, tmp);
27193 i = min_t(int, nbytes, EXTRACT_SIZE);
27194- if (copy_to_user(buf, tmp, i)) {
27195+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
27196 ret = -EFAULT;
27197 break;
27198 }
66a7e928 27199@@ -1214,7 +1226,7 @@ EXPORT_SYMBOL(generate_random_uuid);
58c5fc13
MT
27200 #include <linux/sysctl.h>
27201
27202 static int min_read_thresh = 8, min_write_thresh;
27203-static int max_read_thresh = INPUT_POOL_WORDS * 32;
27204+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
27205 static int max_write_thresh = INPUT_POOL_WORDS * 32;
27206 static char sysctl_bootid[16];
27207
fe2de317
MT
27208diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
27209index 1ee8ce7..b778bef 100644
27210--- a/drivers/char/sonypi.c
27211+++ b/drivers/char/sonypi.c
c52201e0
MT
27212@@ -55,6 +55,7 @@
27213 #include <asm/uaccess.h>
27214 #include <asm/io.h>
27215 #include <asm/system.h>
27216+#include <asm/local.h>
27217
27218 #include <linux/sonypi.h>
27219
27220@@ -491,7 +492,7 @@ static struct sonypi_device {
58c5fc13
MT
27221 spinlock_t fifo_lock;
27222 wait_queue_head_t fifo_proc_list;
27223 struct fasync_struct *fifo_async;
27224- int open_count;
c52201e0 27225+ local_t open_count;
58c5fc13
MT
27226 int model;
27227 struct input_dev *input_jog_dev;
27228 struct input_dev *input_key_dev;
fe2de317 27229@@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
58c5fc13
MT
27230 static int sonypi_misc_release(struct inode *inode, struct file *file)
27231 {
27232 mutex_lock(&sonypi_device.lock);
27233- sonypi_device.open_count--;
c52201e0 27234+ local_dec(&sonypi_device.open_count);
58c5fc13
MT
27235 mutex_unlock(&sonypi_device.lock);
27236 return 0;
27237 }
fe2de317 27238@@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
ae4e228f 27239 {
58c5fc13
MT
27240 mutex_lock(&sonypi_device.lock);
27241 /* Flush input queue on first open */
27242- if (!sonypi_device.open_count)
c52201e0 27243+ if (!local_read(&sonypi_device.open_count))
ae4e228f 27244 kfifo_reset(&sonypi_device.fifo);
58c5fc13 27245- sonypi_device.open_count++;
c52201e0 27246+ local_inc(&sonypi_device.open_count);
58c5fc13 27247 mutex_unlock(&sonypi_device.lock);
ae4e228f 27248
58c5fc13 27249 return 0;
fe2de317
MT
27250diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
27251index 9ca5c02..7ce352c 100644
27252--- a/drivers/char/tpm/tpm.c
27253+++ b/drivers/char/tpm/tpm.c
27254@@ -414,7 +414,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
27255 chip->vendor.req_complete_val)
27256 goto out_recv;
27257
27258- if ((status == chip->vendor.req_canceled)) {
27259+ if (status == chip->vendor.req_canceled) {
27260 dev_err(chip->dev, "Operation Canceled\n");
27261 rc = -ECANCELED;
27262 goto out;
27263@@ -862,6 +862,8 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
27264
27265 struct tpm_chip *chip = dev_get_drvdata(dev);
27266
27267+ pax_track_stack();
27268+
27269 tpm_cmd.header.in = tpm_readpubek_header;
27270 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
27271 "attempting to read the PUBEK");
27272diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
27273index 0636520..169c1d0 100644
27274--- a/drivers/char/tpm/tpm_bios.c
27275+++ b/drivers/char/tpm/tpm_bios.c
27276@@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
ae4e228f
MT
27277 event = addr;
27278
27279 if ((event->event_type == 0 && event->event_size == 0) ||
27280- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
27281+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
27282 return NULL;
27283
27284 return addr;
fe2de317 27285@@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
ae4e228f
MT
27286 return NULL;
27287
27288 if ((event->event_type == 0 && event->event_size == 0) ||
27289- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
27290+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
27291 return NULL;
27292
27293 (*pos)++;
fe2de317 27294@@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
ae4e228f
MT
27295 int i;
27296
27297 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
27298- seq_putc(m, data[i]);
27299+ if (!seq_putc(m, data[i]))
27300+ return -EFAULT;
27301
58c5fc13
MT
27302 return 0;
27303 }
fe2de317 27304@@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log *log)
ae4e228f 27305 log->bios_event_log_end = log->bios_event_log + len;
58c5fc13 27306
ae4e228f
MT
27307 virt = acpi_os_map_memory(start, len);
27308+ if (!virt) {
27309+ kfree(log->bios_event_log);
27310+ log->bios_event_log = NULL;
27311+ return -EFAULT;
27312+ }
27313
6e9df6a3
MT
27314- memcpy(log->bios_event_log, virt, len);
27315+ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
ae4e228f 27316
6e9df6a3
MT
27317 acpi_os_unmap_memory(virt, len);
27318 return 0;
fe2de317
MT
27319diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
27320index fb68b12..0f6c6ca 100644
27321--- a/drivers/char/virtio_console.c
27322+++ b/drivers/char/virtio_console.c
27323@@ -555,7 +555,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
6e9df6a3
MT
27324 if (to_user) {
27325 ssize_t ret;
27326
27327- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
27328+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
27329 if (ret)
27330 return -EFAULT;
27331 } else {
fe2de317 27332@@ -654,7 +654,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
6e9df6a3
MT
27333 if (!port_has_data(port) && !port->host_connected)
27334 return 0;
27335
27336- return fill_readbuf(port, ubuf, count, true);
27337+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
27338 }
27339
27340 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
fe2de317
MT
27341diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
27342index a84250a..68c725e 100644
27343--- a/drivers/crypto/hifn_795x.c
27344+++ b/drivers/crypto/hifn_795x.c
27345@@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device *dev, int encdec, u8 snum)
66a7e928
MT
27346 0xCA, 0x34, 0x2B, 0x2E};
27347 struct scatterlist sg;
27348
27349+ pax_track_stack();
27350+
27351 memset(src, 0, sizeof(src));
27352 memset(ctx.key, 0, sizeof(ctx.key));
27353
fe2de317
MT
27354diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
27355index db33d30..7823369 100644
27356--- a/drivers/crypto/padlock-aes.c
27357+++ b/drivers/crypto/padlock-aes.c
27358@@ -109,6 +109,8 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
66a7e928
MT
27359 struct crypto_aes_ctx gen_aes;
27360 int cpu;
27361
27362+ pax_track_stack();
27363+
27364 if (key_len % 8) {
27365 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
27366 return -EINVAL;
fe2de317
MT
27367diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
27368index 9a8bebc..b1e4989 100644
27369--- a/drivers/edac/amd64_edac.c
27370+++ b/drivers/edac/amd64_edac.c
27371@@ -2670,7 +2670,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
6e9df6a3
MT
27372 * PCI core identifies what devices are on a system during boot, and then
27373 * inquiry this table to see if this driver is for a given device found.
27374 */
27375-static const struct pci_device_id amd64_pci_table[] __devinitdata = {
27376+static const struct pci_device_id amd64_pci_table[] __devinitconst = {
27377 {
27378 .vendor = PCI_VENDOR_ID_AMD,
27379 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
fe2de317
MT
27380diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
27381index e47e73b..348e0bd 100644
27382--- a/drivers/edac/amd76x_edac.c
27383+++ b/drivers/edac/amd76x_edac.c
27384@@ -321,7 +321,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
6e9df6a3
MT
27385 edac_mc_free(mci);
27386 }
27387
27388-static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
27389+static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
27390 {
27391 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27392 AMD762},
fe2de317
MT
27393diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
27394index 1af531a..3a8ff27 100644
27395--- a/drivers/edac/e752x_edac.c
27396+++ b/drivers/edac/e752x_edac.c
27397@@ -1380,7 +1380,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
6e9df6a3
MT
27398 edac_mc_free(mci);
27399 }
27400
27401-static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
27402+static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
27403 {
27404 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27405 E7520},
fe2de317
MT
27406diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
27407index 6ffb6d2..383d8d7 100644
27408--- a/drivers/edac/e7xxx_edac.c
27409+++ b/drivers/edac/e7xxx_edac.c
27410@@ -525,7 +525,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
6e9df6a3
MT
27411 edac_mc_free(mci);
27412 }
27413
27414-static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
27415+static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
27416 {
27417 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27418 E7205},
fe2de317
MT
27419diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
27420index 495198a..ac08c85 100644
27421--- a/drivers/edac/edac_pci_sysfs.c
27422+++ b/drivers/edac/edac_pci_sysfs.c
27423@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
8308f9c9
MT
27424 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
27425 static int edac_pci_poll_msec = 1000; /* one second workq period */
27426
27427-static atomic_t pci_parity_count = ATOMIC_INIT(0);
27428-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
27429+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
27430+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
27431
27432 static struct kobject *edac_pci_top_main_kobj;
27433 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
fe2de317 27434@@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
8308f9c9
MT
27435 edac_printk(KERN_CRIT, EDAC_PCI,
27436 "Signaled System Error on %s\n",
27437 pci_name(dev));
27438- atomic_inc(&pci_nonparity_count);
27439+ atomic_inc_unchecked(&pci_nonparity_count);
27440 }
27441
27442 if (status & (PCI_STATUS_PARITY)) {
fe2de317 27443@@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
8308f9c9
MT
27444 "Master Data Parity Error on %s\n",
27445 pci_name(dev));
27446
27447- atomic_inc(&pci_parity_count);
27448+ atomic_inc_unchecked(&pci_parity_count);
27449 }
27450
27451 if (status & (PCI_STATUS_DETECTED_PARITY)) {
fe2de317 27452@@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
8308f9c9
MT
27453 "Detected Parity Error on %s\n",
27454 pci_name(dev));
27455
27456- atomic_inc(&pci_parity_count);
27457+ atomic_inc_unchecked(&pci_parity_count);
27458 }
27459 }
27460
fe2de317 27461@@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
8308f9c9
MT
27462 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
27463 "Signaled System Error on %s\n",
27464 pci_name(dev));
27465- atomic_inc(&pci_nonparity_count);
27466+ atomic_inc_unchecked(&pci_nonparity_count);
27467 }
27468
27469 if (status & (PCI_STATUS_PARITY)) {
fe2de317 27470@@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
8308f9c9
MT
27471 "Master Data Parity Error on "
27472 "%s\n", pci_name(dev));
27473
27474- atomic_inc(&pci_parity_count);
27475+ atomic_inc_unchecked(&pci_parity_count);
27476 }
27477
27478 if (status & (PCI_STATUS_DETECTED_PARITY)) {
fe2de317 27479@@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
8308f9c9
MT
27480 "Detected Parity Error on %s\n",
27481 pci_name(dev));
27482
27483- atomic_inc(&pci_parity_count);
27484+ atomic_inc_unchecked(&pci_parity_count);
27485 }
27486 }
27487 }
27488@@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
27489 if (!check_pci_errors)
27490 return;
27491
27492- before_count = atomic_read(&pci_parity_count);
27493+ before_count = atomic_read_unchecked(&pci_parity_count);
27494
27495 /* scan all PCI devices looking for a Parity Error on devices and
27496 * bridges.
27497@@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
27498 /* Only if operator has selected panic on PCI Error */
27499 if (edac_pci_get_panic_on_pe()) {
27500 /* If the count is different 'after' from 'before' */
27501- if (before_count != atomic_read(&pci_parity_count))
27502+ if (before_count != atomic_read_unchecked(&pci_parity_count))
27503 panic("EDAC: PCI Parity Error");
27504 }
27505 }
fe2de317
MT
27506diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
27507index c0510b3..6e2a954 100644
27508--- a/drivers/edac/i3000_edac.c
27509+++ b/drivers/edac/i3000_edac.c
27510@@ -470,7 +470,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
6e9df6a3
MT
27511 edac_mc_free(mci);
27512 }
27513
27514-static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
27515+static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
27516 {
27517 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27518 I3000},
fe2de317
MT
27519diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
27520index aa08497..7e6822a 100644
27521--- a/drivers/edac/i3200_edac.c
27522+++ b/drivers/edac/i3200_edac.c
27523@@ -456,7 +456,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
6e9df6a3
MT
27524 edac_mc_free(mci);
27525 }
27526
27527-static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
27528+static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
27529 {
27530 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27531 I3200},
fe2de317
MT
27532diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
27533index 4dc3ac2..67d05a6 100644
27534--- a/drivers/edac/i5000_edac.c
27535+++ b/drivers/edac/i5000_edac.c
27536@@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
6e9df6a3
MT
27537 *
27538 * The "E500P" device is the first device supported.
27539 */
27540-static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
27541+static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
27542 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
27543 .driver_data = I5000P},
27544
fe2de317
MT
27545diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
27546index bcbdeec..9886d16 100644
27547--- a/drivers/edac/i5100_edac.c
27548+++ b/drivers/edac/i5100_edac.c
27549@@ -1051,7 +1051,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
6e9df6a3
MT
27550 edac_mc_free(mci);
27551 }
27552
27553-static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
27554+static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
27555 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
27556 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
27557 { 0, }
fe2de317
MT
27558diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
27559index 74d6ec34..baff517 100644
27560--- a/drivers/edac/i5400_edac.c
27561+++ b/drivers/edac/i5400_edac.c
27562@@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
6e9df6a3
MT
27563 *
27564 * The "E500P" device is the first device supported.
27565 */
27566-static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
27567+static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
27568 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
27569 {0,} /* 0 terminated list. */
27570 };
fe2de317
MT
27571diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
27572index a76fe83..15479e6 100644
27573--- a/drivers/edac/i7300_edac.c
27574+++ b/drivers/edac/i7300_edac.c
27575@@ -1191,7 +1191,7 @@ static void __devexit i7300_remove_one(struct pci_dev *pdev)
6e9df6a3
MT
27576 *
27577 * Has only 8086:360c PCI ID
27578 */
27579-static const struct pci_device_id i7300_pci_tbl[] __devinitdata = {
27580+static const struct pci_device_id i7300_pci_tbl[] __devinitconst = {
27581 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
27582 {0,} /* 0 terminated list. */
27583 };
fe2de317
MT
27584diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
27585index f6cf448..3f612e9 100644
27586--- a/drivers/edac/i7core_edac.c
27587+++ b/drivers/edac/i7core_edac.c
27588@@ -359,7 +359,7 @@ static const struct pci_id_table pci_dev_table[] = {
6e9df6a3
MT
27589 /*
27590 * pci_device_id table for which devices we are looking for
27591 */
27592-static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
27593+static const struct pci_device_id i7core_pci_tbl[] __devinitconst = {
27594 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
27595 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
27596 {0,} /* 0 terminated list. */
fe2de317
MT
27597diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
27598index 4329d39..f3022ef 100644
27599--- a/drivers/edac/i82443bxgx_edac.c
27600+++ b/drivers/edac/i82443bxgx_edac.c
27601@@ -380,7 +380,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
6e9df6a3
MT
27602
27603 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
27604
27605-static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
27606+static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
27607 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
27608 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
27609 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
fe2de317
MT
27610diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
27611index 931a057..fd28340 100644
27612--- a/drivers/edac/i82860_edac.c
27613+++ b/drivers/edac/i82860_edac.c
27614@@ -270,7 +270,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
6e9df6a3
MT
27615 edac_mc_free(mci);
27616 }
27617
27618-static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
27619+static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
27620 {
27621 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27622 I82860},
fe2de317
MT
27623diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
27624index 33864c6..01edc61 100644
27625--- a/drivers/edac/i82875p_edac.c
27626+++ b/drivers/edac/i82875p_edac.c
27627@@ -511,7 +511,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
6e9df6a3
MT
27628 edac_mc_free(mci);
27629 }
27630
27631-static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
27632+static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
27633 {
27634 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27635 I82875P},
fe2de317
MT
27636diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
27637index a5da732..983363b 100644
27638--- a/drivers/edac/i82975x_edac.c
27639+++ b/drivers/edac/i82975x_edac.c
27640@@ -604,7 +604,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
6e9df6a3
MT
27641 edac_mc_free(mci);
27642 }
27643
27644-static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
27645+static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
27646 {
27647 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27648 I82975X
fe2de317
MT
27649diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
27650index 795a320..3bbc3d3 100644
27651--- a/drivers/edac/mce_amd.h
27652+++ b/drivers/edac/mce_amd.h
15a11c5b
MT
27653@@ -83,7 +83,7 @@ struct amd_decoder_ops {
27654 bool (*dc_mce)(u16, u8);
27655 bool (*ic_mce)(u16, u8);
27656 bool (*nb_mce)(u16, u8);
27657-};
27658+} __no_const;
27659
27660 void amd_report_gart_errors(bool);
27661 void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32));
fe2de317
MT
27662diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
27663index b153674..ad2ba9b 100644
27664--- a/drivers/edac/r82600_edac.c
27665+++ b/drivers/edac/r82600_edac.c
27666@@ -373,7 +373,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
6e9df6a3
MT
27667 edac_mc_free(mci);
27668 }
27669
27670-static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
27671+static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
27672 {
27673 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
27674 },
fe2de317
MT
27675diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
27676index b6f47de..c5acf3a 100644
27677--- a/drivers/edac/x38_edac.c
27678+++ b/drivers/edac/x38_edac.c
27679@@ -440,7 +440,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
6e9df6a3
MT
27680 edac_mc_free(mci);
27681 }
27682
27683-static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
27684+static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
27685 {
27686 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27687 X38},
fe2de317
MT
27688diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
27689index 85661b0..c784559a 100644
27690--- a/drivers/firewire/core-card.c
27691+++ b/drivers/firewire/core-card.c
15a11c5b
MT
27692@@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
27693
27694 void fw_core_remove_card(struct fw_card *card)
27695 {
27696- struct fw_card_driver dummy_driver = dummy_driver_template;
27697+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
27698
27699 card->driver->update_phy_reg(card, 4,
27700 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
fe2de317
MT
27701diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
27702index 4799393..37bd3ab 100644
27703--- a/drivers/firewire/core-cdev.c
27704+++ b/drivers/firewire/core-cdev.c
27705@@ -1331,8 +1331,7 @@ static int init_iso_resource(struct client *client,
df50ba0c 27706 int ret;
ae4e228f 27707
df50ba0c
MT
27708 if ((request->channels == 0 && request->bandwidth == 0) ||
27709- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
27710- request->bandwidth < 0)
27711+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
27712 return -EINVAL;
ae4e228f 27713
df50ba0c 27714 r = kmalloc(sizeof(*r), GFP_KERNEL);
fe2de317
MT
27715diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
27716index 334b82a..ea5261d 100644
27717--- a/drivers/firewire/core-transaction.c
27718+++ b/drivers/firewire/core-transaction.c
15a11c5b 27719@@ -37,6 +37,7 @@
66a7e928
MT
27720 #include <linux/timer.h>
27721 #include <linux/types.h>
15a11c5b 27722 #include <linux/workqueue.h>
66a7e928
MT
27723+#include <linux/sched.h>
27724
27725 #include <asm/byteorder.h>
27726
fe2de317 27727@@ -422,6 +423,8 @@ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
66a7e928
MT
27728 struct transaction_callback_data d;
27729 struct fw_transaction t;
27730
27731+ pax_track_stack();
27732+
27733 init_timer_on_stack(&t.split_timeout_timer);
27734 init_completion(&d.done);
27735 d.payload = payload;
fe2de317
MT
27736diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
27737index b45be57..5fad18b 100644
27738--- a/drivers/firewire/core.h
27739+++ b/drivers/firewire/core.h
27740@@ -101,6 +101,7 @@ struct fw_card_driver {
27741
27742 int (*stop_iso)(struct fw_iso_context *ctx);
27743 };
27744+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
27745
27746 void fw_card_initialize(struct fw_card *card,
27747 const struct fw_card_driver *driver, struct device *device);
27748diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
27749index bcb1126..2cc2121 100644
27750--- a/drivers/firmware/dmi_scan.c
27751+++ b/drivers/firmware/dmi_scan.c
c52201e0 27752@@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
58c5fc13
MT
27753 }
27754 }
27755 else {
27756- /*
27757- * no iounmap() for that ioremap(); it would be a no-op, but
27758- * it's so early in setup that sucker gets confused into doing
27759- * what it shouldn't if we actually call it.
27760- */
27761 p = dmi_ioremap(0xF0000, 0x10000);
27762 if (p == NULL)
27763 goto error;
fe2de317 27764@@ -725,7 +720,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
6e9df6a3
MT
27765 if (buf == NULL)
27766 return -1;
27767
27768- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
27769+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
27770
27771 iounmap(buf);
27772 return 0;
fe2de317
MT
27773diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
27774index 98723cb..10ca85b 100644
27775--- a/drivers/gpio/gpio-vr41xx.c
27776+++ b/drivers/gpio/gpio-vr41xx.c
8308f9c9
MT
27777@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
27778 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
27779 maskl, pendl, maskh, pendh);
27780
27781- atomic_inc(&irq_err_count);
27782+ atomic_inc_unchecked(&irq_err_count);
27783
27784 return -EINVAL;
27785 }
fe2de317
MT
27786diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
27787index 2410c40..2d03563 100644
27788--- a/drivers/gpu/drm/drm_crtc.c
27789+++ b/drivers/gpu/drm/drm_crtc.c
27790@@ -1374,7 +1374,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
6e9df6a3
MT
27791 */
27792 if ((out_resp->count_modes >= mode_count) && mode_count) {
27793 copied = 0;
27794- mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
27795+ mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
27796 list_for_each_entry(mode, &connector->modes, head) {
27797 drm_crtc_convert_to_umode(&u_mode, mode);
27798 if (copy_to_user(mode_ptr + copied,
fe2de317 27799@@ -1389,8 +1389,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
6e9df6a3
MT
27800
27801 if ((out_resp->count_props >= props_count) && props_count) {
27802 copied = 0;
27803- prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
27804- prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
27805+ prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
27806+ prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
27807 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
27808 if (connector->property_ids[i] != 0) {
27809 if (put_user(connector->property_ids[i],
fe2de317 27810@@ -1412,7 +1412,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
6e9df6a3
MT
27811
27812 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
27813 copied = 0;
27814- encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
27815+ encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
27816 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
27817 if (connector->encoder_ids[i] != 0) {
27818 if (put_user(connector->encoder_ids[i],
fe2de317 27819@@ -1571,7 +1571,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
6e9df6a3
MT
27820 }
27821
27822 for (i = 0; i < crtc_req->count_connectors; i++) {
27823- set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
27824+ set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
27825 if (get_user(out_id, &set_connectors_ptr[i])) {
27826 ret = -EFAULT;
27827 goto out;
fe2de317 27828@@ -1852,7 +1852,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
6e9df6a3
MT
27829 fb = obj_to_fb(obj);
27830
27831 num_clips = r->num_clips;
27832- clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
27833+ clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
27834
27835 if (!num_clips != !clips_ptr) {
27836 ret = -EINVAL;
fe2de317 27837@@ -2276,7 +2276,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
6e9df6a3
MT
27838 out_resp->flags = property->flags;
27839
27840 if ((out_resp->count_values >= value_count) && value_count) {
27841- values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
27842+ values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
27843 for (i = 0; i < value_count; i++) {
27844 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
27845 ret = -EFAULT;
fe2de317 27846@@ -2289,7 +2289,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
6e9df6a3
MT
27847 if (property->flags & DRM_MODE_PROP_ENUM) {
27848 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
27849 copied = 0;
27850- enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
27851+ enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
27852 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
27853
27854 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
fe2de317 27855@@ -2312,7 +2312,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
6e9df6a3
MT
27856 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
27857 copied = 0;
27858 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
27859- blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
27860+ blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
27861
27862 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
27863 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
fe2de317 27864@@ -2373,7 +2373,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
6e9df6a3
MT
27865 struct drm_mode_get_blob *out_resp = data;
27866 struct drm_property_blob *blob;
27867 int ret = 0;
27868- void *blob_ptr;
27869+ void __user *blob_ptr;
27870
27871 if (!drm_core_check_feature(dev, DRIVER_MODESET))
27872 return -EINVAL;
fe2de317 27873@@ -2387,7 +2387,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
6e9df6a3
MT
27874 blob = obj_to_blob(obj);
27875
27876 if (out_resp->length == blob->length) {
27877- blob_ptr = (void *)(unsigned long)out_resp->data;
27878+ blob_ptr = (void __user *)(unsigned long)out_resp->data;
27879 if (copy_to_user(blob_ptr, blob->data, blob->length)){
27880 ret = -EFAULT;
27881 goto done;
fe2de317
MT
27882diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
27883index f88a9b2..8f4078f 100644
27884--- a/drivers/gpu/drm/drm_crtc_helper.c
27885+++ b/drivers/gpu/drm/drm_crtc_helper.c
27886@@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
6892158b
MT
27887 struct drm_crtc *tmp;
27888 int crtc_mask = 1;
27889
bc901d79 27890- WARN(!crtc, "checking null crtc?\n");
6892158b
MT
27891+ BUG_ON(!crtc);
27892
27893 dev = crtc->dev;
27894
fe2de317 27895@@ -343,6 +343,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
66a7e928
MT
27896 struct drm_encoder *encoder;
27897 bool ret = true;
27898
27899+ pax_track_stack();
27900+
27901 crtc->enabled = drm_helper_crtc_in_use(crtc);
27902 if (!crtc->enabled)
27903 return true;
fe2de317
MT
27904diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
27905index 93a112d..c8b065d 100644
27906--- a/drivers/gpu/drm/drm_drv.c
27907+++ b/drivers/gpu/drm/drm_drv.c
6e9df6a3
MT
27908@@ -307,7 +307,7 @@ module_exit(drm_core_exit);
27909 /**
27910 * Copy and IOCTL return string to user space
27911 */
27912-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
27913+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
27914 {
27915 int len;
27916
66a7e928 27917@@ -386,7 +386,7 @@ long drm_ioctl(struct file *filp,
ae4e228f
MT
27918
27919 dev = file_priv->minor->dev;
58c5fc13
MT
27920 atomic_inc(&dev->ioctl_count);
27921- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
27922+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
27923 ++file_priv->ioctl_count;
27924
27925 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
fe2de317
MT
27926diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
27927index 2ec7d48..be14bb1 100644
27928--- a/drivers/gpu/drm/drm_fops.c
27929+++ b/drivers/gpu/drm/drm_fops.c
27930@@ -70,7 +70,7 @@ static int drm_setup(struct drm_device * dev)
58c5fc13
MT
27931 }
27932
27933 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
27934- atomic_set(&dev->counts[i], 0);
27935+ atomic_set_unchecked(&dev->counts[i], 0);
27936
27937 dev->sigdata.lock = NULL;
27938
fe2de317 27939@@ -134,8 +134,8 @@ int drm_open(struct inode *inode, struct file *filp)
58c5fc13
MT
27940
27941 retcode = drm_open_helper(inode, filp, dev);
27942 if (!retcode) {
27943- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
6892158b 27944- if (!dev->open_count++)
58c5fc13 27945+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
c52201e0 27946+ if (local_inc_return(&dev->open_count) == 1)
58c5fc13 27947 retcode = drm_setup(dev);
6892158b
MT
27948 }
27949 if (!retcode) {
fe2de317 27950@@ -472,7 +472,7 @@ int drm_release(struct inode *inode, struct file *filp)
58c5fc13 27951
6892158b 27952 mutex_lock(&drm_global_mutex);
58c5fc13
MT
27953
27954- DRM_DEBUG("open_count = %d\n", dev->open_count);
c52201e0 27955+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
58c5fc13
MT
27956
27957 if (dev->driver->preclose)
27958 dev->driver->preclose(dev, file_priv);
fe2de317 27959@@ -484,7 +484,7 @@ int drm_release(struct inode *inode, struct file *filp)
58c5fc13
MT
27960 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
27961 task_pid_nr(current),
27962 (long)old_encode_dev(file_priv->minor->device),
27963- dev->open_count);
c52201e0 27964+ local_read(&dev->open_count));
58c5fc13
MT
27965
27966 /* if the master has gone away we can't do anything with the lock */
27967 if (file_priv->minor->master)
fe2de317 27968@@ -565,8 +565,8 @@ int drm_release(struct inode *inode, struct file *filp)
58c5fc13
MT
27969 * End inline drm_release
27970 */
27971
27972- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
58c5fc13 27973- if (!--dev->open_count) {
6892158b 27974+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
c52201e0 27975+ if (local_dec_and_test(&dev->open_count)) {
58c5fc13
MT
27976 if (atomic_read(&dev->ioctl_count)) {
27977 DRM_ERROR("Device busy: %d\n",
27978 atomic_read(&dev->ioctl_count));
fe2de317
MT
27979diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
27980index c87dc96..326055d 100644
27981--- a/drivers/gpu/drm/drm_global.c
27982+++ b/drivers/gpu/drm/drm_global.c
6892158b
MT
27983@@ -36,7 +36,7 @@
27984 struct drm_global_item {
27985 struct mutex mutex;
27986 void *object;
27987- int refcount;
27988+ atomic_t refcount;
27989 };
27990
27991 static struct drm_global_item glob[DRM_GLOBAL_NUM];
27992@@ -49,7 +49,7 @@ void drm_global_init(void)
27993 struct drm_global_item *item = &glob[i];
27994 mutex_init(&item->mutex);
27995 item->object = NULL;
27996- item->refcount = 0;
27997+ atomic_set(&item->refcount, 0);
27998 }
27999 }
28000
28001@@ -59,7 +59,7 @@ void drm_global_release(void)
28002 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
28003 struct drm_global_item *item = &glob[i];
28004 BUG_ON(item->object != NULL);
28005- BUG_ON(item->refcount != 0);
28006+ BUG_ON(atomic_read(&item->refcount) != 0);
28007 }
28008 }
28009
fe2de317 28010@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
6892158b
MT
28011 void *object;
28012
28013 mutex_lock(&item->mutex);
28014- if (item->refcount == 0) {
28015+ if (atomic_read(&item->refcount) == 0) {
28016 item->object = kzalloc(ref->size, GFP_KERNEL);
28017 if (unlikely(item->object == NULL)) {
28018 ret = -ENOMEM;
fe2de317 28019@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
6892158b
MT
28020 goto out_err;
28021
28022 }
28023- ++item->refcount;
28024+ atomic_inc(&item->refcount);
28025 ref->object = item->object;
28026 object = item->object;
28027 mutex_unlock(&item->mutex);
fe2de317 28028@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
6892158b
MT
28029 struct drm_global_item *item = &glob[ref->global_type];
28030
28031 mutex_lock(&item->mutex);
28032- BUG_ON(item->refcount == 0);
28033+ BUG_ON(atomic_read(&item->refcount) == 0);
28034 BUG_ON(ref->object != item->object);
28035- if (--item->refcount == 0) {
28036+ if (atomic_dec_and_test(&item->refcount)) {
28037 ref->release(ref);
28038 item->object = NULL;
28039 }
fe2de317
MT
28040diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
28041index ab1162d..42587b2 100644
28042--- a/drivers/gpu/drm/drm_info.c
28043+++ b/drivers/gpu/drm/drm_info.c
28044@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
6892158b
MT
28045 struct drm_local_map *map;
28046 struct drm_map_list *r_list;
28047
28048- /* Hardcoded from _DRM_FRAME_BUFFER,
28049- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
28050- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
28051- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
28052+ static const char * const types[] = {
28053+ [_DRM_FRAME_BUFFER] = "FB",
28054+ [_DRM_REGISTERS] = "REG",
28055+ [_DRM_SHM] = "SHM",
28056+ [_DRM_AGP] = "AGP",
28057+ [_DRM_SCATTER_GATHER] = "SG",
28058+ [_DRM_CONSISTENT] = "PCI",
28059+ [_DRM_GEM] = "GEM" };
28060 const char *type;
28061 int i;
28062
fe2de317 28063@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
6892158b
MT
28064 map = r_list->map;
28065 if (!map)
28066 continue;
28067- if (map->type < 0 || map->type > 5)
28068+ if (map->type >= ARRAY_SIZE(types))
28069 type = "??";
28070 else
28071 type = types[map->type];
fe2de317 28072@@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, void *data)
16454cff
MT
28073 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
28074 vma->vm_flags & VM_LOCKED ? 'l' : '-',
28075 vma->vm_flags & VM_IO ? 'i' : '-',
28076+#ifdef CONFIG_GRKERNSEC_HIDESYM
28077+ 0);
28078+#else
28079 vma->vm_pgoff);
28080+#endif
28081
28082 #if defined(__i386__)
28083 pgprot = pgprot_val(vma->vm_page_prot);
fe2de317
MT
28084diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
28085index 4a058c7..b42cd92 100644
28086--- a/drivers/gpu/drm/drm_ioc32.c
28087+++ b/drivers/gpu/drm/drm_ioc32.c
28088@@ -455,7 +455,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
6e9df6a3
MT
28089 request = compat_alloc_user_space(nbytes);
28090 if (!access_ok(VERIFY_WRITE, request, nbytes))
28091 return -EFAULT;
28092- list = (struct drm_buf_desc *) (request + 1);
28093+ list = (struct drm_buf_desc __user *) (request + 1);
28094
28095 if (__put_user(count, &request->count)
28096 || __put_user(list, &request->list))
fe2de317 28097@@ -516,7 +516,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
6e9df6a3
MT
28098 request = compat_alloc_user_space(nbytes);
28099 if (!access_ok(VERIFY_WRITE, request, nbytes))
28100 return -EFAULT;
28101- list = (struct drm_buf_pub *) (request + 1);
28102+ list = (struct drm_buf_pub __user *) (request + 1);
28103
28104 if (__put_user(count, &request->count)
28105 || __put_user(list, &request->list))
fe2de317
MT
28106diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
28107index 904d7e9..ab88581 100644
28108--- a/drivers/gpu/drm/drm_ioctl.c
28109+++ b/drivers/gpu/drm/drm_ioctl.c
28110@@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev, void *data,
58c5fc13
MT
28111 stats->data[i].value =
28112 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
28113 else
28114- stats->data[i].value = atomic_read(&dev->counts[i]);
28115+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
28116 stats->data[i].type = dev->types[i];
28117 }
28118
fe2de317
MT
28119diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
28120index 632ae24..244cf4a 100644
28121--- a/drivers/gpu/drm/drm_lock.c
28122+++ b/drivers/gpu/drm/drm_lock.c
28123@@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
58c5fc13
MT
28124 if (drm_lock_take(&master->lock, lock->context)) {
28125 master->lock.file_priv = file_priv;
28126 master->lock.lock_time = jiffies;
28127- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
28128+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
28129 break; /* Got lock */
28130 }
28131
fe2de317 28132@@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
58c5fc13
MT
28133 return -EINVAL;
28134 }
28135
28136- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
28137+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
28138
bc901d79
MT
28139 if (drm_lock_free(&master->lock, lock->context)) {
28140 /* FIXME: Should really bail out here. */
fe2de317
MT
28141diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
28142index 8f371e8..9f85d52 100644
28143--- a/drivers/gpu/drm/i810/i810_dma.c
28144+++ b/drivers/gpu/drm/i810/i810_dma.c
28145@@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
58c5fc13
MT
28146 dma->buflist[vertex->idx],
28147 vertex->discard, vertex->used);
28148
28149- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28150- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28151+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28152+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28153 sarea_priv->last_enqueue = dev_priv->counter - 1;
28154 sarea_priv->last_dispatch = (int)hw_status[5];
28155
fe2de317 28156@@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
58c5fc13
MT
28157 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
28158 mc->last_render);
28159
28160- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28161- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28162+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28163+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28164 sarea_priv->last_enqueue = dev_priv->counter - 1;
28165 sarea_priv->last_dispatch = (int)hw_status[5];
28166
fe2de317
MT
28167diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
28168index c9339f4..f5e1b9d 100644
28169--- a/drivers/gpu/drm/i810/i810_drv.h
28170+++ b/drivers/gpu/drm/i810/i810_drv.h
8308f9c9
MT
28171@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
28172 int page_flipping;
28173
28174 wait_queue_head_t irq_queue;
28175- atomic_t irq_received;
28176- atomic_t irq_emitted;
28177+ atomic_unchecked_t irq_received;
28178+ atomic_unchecked_t irq_emitted;
28179
28180 int front_offset;
28181 } drm_i810_private_t;
fe2de317
MT
28182diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
28183index 3c395a5..02889c2 100644
28184--- a/drivers/gpu/drm/i915/i915_debugfs.c
28185+++ b/drivers/gpu/drm/i915/i915_debugfs.c
28186@@ -497,7 +497,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
8308f9c9
MT
28187 I915_READ(GTIMR));
28188 }
28189 seq_printf(m, "Interrupts received: %d\n",
28190- atomic_read(&dev_priv->irq_received));
28191+ atomic_read_unchecked(&dev_priv->irq_received));
28192 for (i = 0; i < I915_NUM_RINGS; i++) {
6e9df6a3 28193 if (IS_GEN6(dev) || IS_GEN7(dev)) {
8308f9c9 28194 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
fe2de317 28195@@ -1185,7 +1185,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
6e9df6a3
MT
28196 return ret;
28197
28198 if (opregion->header)
28199- seq_write(m, opregion->header, OPREGION_SIZE);
28200+ seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
28201
28202 mutex_unlock(&dev->struct_mutex);
28203
fe2de317
MT
28204diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
28205index 8a3942c..1b73bf1 100644
28206--- a/drivers/gpu/drm/i915/i915_dma.c
28207+++ b/drivers/gpu/drm/i915/i915_dma.c
28208@@ -1171,7 +1171,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
efbe55a5
MT
28209 bool can_switch;
28210
28211 spin_lock(&dev->count_lock);
28212- can_switch = (dev->open_count == 0);
c52201e0 28213+ can_switch = (local_read(&dev->open_count) == 0);
efbe55a5
MT
28214 spin_unlock(&dev->count_lock);
28215 return can_switch;
28216 }
fe2de317
MT
28217diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
28218index 7916bd9..7c17a0f 100644
28219--- a/drivers/gpu/drm/i915/i915_drv.h
28220+++ b/drivers/gpu/drm/i915/i915_drv.h
6e9df6a3 28221@@ -222,7 +222,7 @@ struct drm_i915_display_funcs {
15a11c5b
MT
28222 /* render clock increase/decrease */
28223 /* display clock increase/decrease */
28224 /* pll clock increase/decrease */
28225-};
28226+} __no_const;
28227
28228 struct intel_device_info {
28229 u8 gen;
6e9df6a3 28230@@ -305,7 +305,7 @@ typedef struct drm_i915_private {
8308f9c9
MT
28231 int current_page;
28232 int page_flipping;
28233
28234- atomic_t irq_received;
28235+ atomic_unchecked_t irq_received;
8308f9c9
MT
28236
28237 /* protects the irq masks */
66a7e928 28238 spinlock_t irq_lock;
6e9df6a3 28239@@ -882,7 +882,7 @@ struct drm_i915_gem_object {
8308f9c9
MT
28240 * will be page flipped away on the next vblank. When it
28241 * reaches 0, dev_priv->pending_flip_queue will be woken up.
28242 */
28243- atomic_t pending_flip;
28244+ atomic_unchecked_t pending_flip;
28245 };
28246
28247 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
fe2de317 28248@@ -1262,7 +1262,7 @@ extern int intel_setup_gmbus(struct drm_device *dev);
71d190be
MT
28249 extern void intel_teardown_gmbus(struct drm_device *dev);
28250 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
28251 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
28252-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
28253+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
28254 {
28255 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
28256 }
fe2de317
MT
28257diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28258index 4934cf8..1da9c84 100644
28259--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28260+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28261@@ -188,7 +188,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
15a11c5b 28262 i915_gem_clflush_object(obj);
66a7e928
MT
28263
28264 if (obj->base.pending_write_domain)
28265- cd->flips |= atomic_read(&obj->pending_flip);
28266+ cd->flips |= atomic_read_unchecked(&obj->pending_flip);
28267
28268 /* The actual obj->write_domain will be updated with
28269 * pending_write_domain after we emit the accumulated flush for all
fe2de317
MT
28270@@ -864,9 +864,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
28271
28272 static int
28273 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
28274- int count)
28275+ unsigned int count)
28276 {
28277- int i;
28278+ unsigned int i;
28279
28280 for (i = 0; i < count; i++) {
28281 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
28282diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
28283index 73248d0..f7bac29 100644
28284--- a/drivers/gpu/drm/i915/i915_irq.c
28285+++ b/drivers/gpu/drm/i915/i915_irq.c
28286@@ -475,7 +475,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
15a11c5b
MT
28287 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
28288 struct drm_i915_master_private *master_priv;
28289
28290- atomic_inc(&dev_priv->irq_received);
28291+ atomic_inc_unchecked(&dev_priv->irq_received);
28292
28293 /* disable master interrupt before clearing iir */
28294 de_ier = I915_READ(DEIER);
fe2de317 28295@@ -565,7 +565,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
15a11c5b
MT
28296 struct drm_i915_master_private *master_priv;
28297 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
28298
28299- atomic_inc(&dev_priv->irq_received);
28300+ atomic_inc_unchecked(&dev_priv->irq_received);
28301
28302 if (IS_GEN6(dev))
28303 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
fe2de317 28304@@ -1229,7 +1229,7 @@ static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
66a7e928
MT
28305 int ret = IRQ_NONE, pipe;
28306 bool blc_event = false;
8308f9c9
MT
28307
28308- atomic_inc(&dev_priv->irq_received);
28309+ atomic_inc_unchecked(&dev_priv->irq_received);
28310
15a11c5b
MT
28311 iir = I915_READ(IIR);
28312
fe2de317 28313@@ -1741,7 +1741,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
15a11c5b
MT
28314 {
28315 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
28316
28317- atomic_set(&dev_priv->irq_received, 0);
28318+ atomic_set_unchecked(&dev_priv->irq_received, 0);
28319
28320 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
28321 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
fe2de317 28322@@ -1905,7 +1905,7 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
8308f9c9 28323 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
66a7e928 28324 int pipe;
8308f9c9
MT
28325
28326- atomic_set(&dev_priv->irq_received, 0);
28327+ atomic_set_unchecked(&dev_priv->irq_received, 0);
28328
28329 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
28330 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
fe2de317
MT
28331diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
28332index 07e7cf3..c75f312 100644
28333--- a/drivers/gpu/drm/i915/intel_display.c
28334+++ b/drivers/gpu/drm/i915/intel_display.c
28335@@ -2205,7 +2205,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
8308f9c9
MT
28336
28337 wait_event(dev_priv->pending_flip_queue,
28338 atomic_read(&dev_priv->mm.wedged) ||
28339- atomic_read(&obj->pending_flip) == 0);
28340+ atomic_read_unchecked(&obj->pending_flip) == 0);
28341
28342 /* Big Hammer, we also need to ensure that any pending
28343 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
fe2de317 28344@@ -2826,7 +2826,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
8308f9c9
MT
28345 obj = to_intel_framebuffer(crtc->fb)->obj;
28346 dev_priv = crtc->dev->dev_private;
28347 wait_event(dev_priv->pending_flip_queue,
28348- atomic_read(&obj->pending_flip) == 0);
28349+ atomic_read_unchecked(&obj->pending_flip) == 0);
28350 }
28351
28352 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
fe2de317 28353@@ -6676,7 +6676,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
8308f9c9
MT
28354
28355 atomic_clear_mask(1 << intel_crtc->plane,
28356 &obj->pending_flip.counter);
28357- if (atomic_read(&obj->pending_flip) == 0)
28358+ if (atomic_read_unchecked(&obj->pending_flip) == 0)
28359 wake_up(&dev_priv->pending_flip_queue);
28360
28361 schedule_work(&work->work);
fe2de317 28362@@ -6965,7 +6965,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
8308f9c9
MT
28363 /* Block clients from rendering to the new back buffer until
28364 * the flip occurs and the object is no longer visible.
28365 */
28366- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28367+ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28368
15a11c5b
MT
28369 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
28370 if (ret)
fe2de317 28371@@ -6979,7 +6979,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
15a11c5b
MT
28372 return 0;
28373
28374 cleanup_pending:
28375- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28376+ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28377 cleanup_objs:
28378 drm_gem_object_unreference(&work->old_fb_obj->base);
28379 drm_gem_object_unreference(&obj->base);
fe2de317
MT
28380diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
28381index 54558a0..2d97005 100644
28382--- a/drivers/gpu/drm/mga/mga_drv.h
28383+++ b/drivers/gpu/drm/mga/mga_drv.h
8308f9c9
MT
28384@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
28385 u32 clear_cmd;
28386 u32 maccess;
28387
28388- atomic_t vbl_received; /**< Number of vblanks received. */
28389+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
28390 wait_queue_head_t fence_queue;
28391- atomic_t last_fence_retired;
28392+ atomic_unchecked_t last_fence_retired;
28393 u32 next_fence_to_post;
28394
28395 unsigned int fb_cpp;
fe2de317
MT
28396diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
28397index 2581202..f230a8d9 100644
28398--- a/drivers/gpu/drm/mga/mga_irq.c
28399+++ b/drivers/gpu/drm/mga/mga_irq.c
28400@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
8308f9c9
MT
28401 if (crtc != 0)
28402 return 0;
28403
28404- return atomic_read(&dev_priv->vbl_received);
28405+ return atomic_read_unchecked(&dev_priv->vbl_received);
28406 }
28407
28408
fe2de317 28409@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
8308f9c9
MT
28410 /* VBLANK interrupt */
28411 if (status & MGA_VLINEPEN) {
28412 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
28413- atomic_inc(&dev_priv->vbl_received);
28414+ atomic_inc_unchecked(&dev_priv->vbl_received);
28415 drm_handle_vblank(dev, 0);
28416 handled = 1;
28417 }
fe2de317 28418@@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
8308f9c9
MT
28419 if ((prim_start & ~0x03) != (prim_end & ~0x03))
28420 MGA_WRITE(MGA_PRIMEND, prim_end);
28421
28422- atomic_inc(&dev_priv->last_fence_retired);
28423+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
28424 DRM_WAKEUP(&dev_priv->fence_queue);
28425 handled = 1;
28426 }
fe2de317 28427@@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
8308f9c9
MT
28428 * using fences.
28429 */
28430 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
28431- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
28432+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
28433 - *sequence) <= (1 << 23)));
28434
28435 *sequence = cur_fence;
fe2de317
MT
28436diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
28437index b311fab..dc11d6a 100644
28438--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
28439+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
6e9df6a3 28440@@ -201,7 +201,7 @@ struct methods {
15a11c5b
MT
28441 const char desc[8];
28442 void (*loadbios)(struct drm_device *, uint8_t *);
28443 const bool rw;
28444-};
28445+} __do_const;
28446
28447 static struct methods shadow_methods[] = {
28448 { "PRAMIN", load_vbios_pramin, true },
fe2de317 28449@@ -5489,7 +5489,7 @@ parse_bit_displayport_tbl_entry(struct drm_device *dev, struct nvbios *bios,
15a11c5b
MT
28450 struct bit_table {
28451 const char id;
28452 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
28453-};
28454+} __no_const;
28455
28456 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
28457
fe2de317
MT
28458diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
28459index d7d51de..7c6a7f1 100644
28460--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
28461+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
6e9df6a3 28462@@ -238,7 +238,7 @@ struct nouveau_channel {
8308f9c9
MT
28463 struct list_head pending;
28464 uint32_t sequence;
28465 uint32_t sequence_ack;
28466- atomic_t last_sequence_irq;
28467+ atomic_unchecked_t last_sequence_irq;
6e9df6a3 28468 struct nouveau_vma vma;
8308f9c9
MT
28469 } fence;
28470
6e9df6a3 28471@@ -319,7 +319,7 @@ struct nouveau_exec_engine {
15a11c5b
MT
28472 u32 handle, u16 class);
28473 void (*set_tile_region)(struct drm_device *dev, int i);
28474 void (*tlb_flush)(struct drm_device *, int engine);
28475-};
28476+} __no_const;
28477
28478 struct nouveau_instmem_engine {
28479 void *priv;
6e9df6a3 28480@@ -341,13 +341,13 @@ struct nouveau_instmem_engine {
15a11c5b
MT
28481 struct nouveau_mc_engine {
28482 int (*init)(struct drm_device *dev);
28483 void (*takedown)(struct drm_device *dev);
28484-};
28485+} __no_const;
28486
28487 struct nouveau_timer_engine {
28488 int (*init)(struct drm_device *dev);
28489 void (*takedown)(struct drm_device *dev);
28490 uint64_t (*read)(struct drm_device *dev);
28491-};
28492+} __no_const;
28493
28494 struct nouveau_fb_engine {
28495 int num_tiles;
6e9df6a3 28496@@ -513,7 +513,7 @@ struct nouveau_vram_engine {
15a11c5b
MT
28497 void (*put)(struct drm_device *, struct nouveau_mem **);
28498
28499 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
28500-};
28501+} __no_const;
66a7e928
MT
28502
28503 struct nouveau_engine {
28504 struct nouveau_instmem_engine instmem;
6e9df6a3 28505@@ -660,7 +660,7 @@ struct drm_nouveau_private {
8308f9c9
MT
28506 struct drm_global_reference mem_global_ref;
28507 struct ttm_bo_global_ref bo_global_ref;
28508 struct ttm_bo_device bdev;
28509- atomic_t validate_sequence;
28510+ atomic_unchecked_t validate_sequence;
28511 } ttm;
28512
28513 struct {
fe2de317
MT
28514diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
28515index ae22dfa..4f09960 100644
28516--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
28517+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
28518@@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
8308f9c9
MT
28519 if (USE_REFCNT(dev))
28520 sequence = nvchan_rd32(chan, 0x48);
28521 else
28522- sequence = atomic_read(&chan->fence.last_sequence_irq);
28523+ sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
28524
28525 if (chan->fence.sequence_ack == sequence)
28526 goto out;
fe2de317 28527@@ -539,7 +539,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
883a9837
MT
28528 return ret;
28529 }
15a11c5b 28530
8308f9c9
MT
28531- atomic_set(&chan->fence.last_sequence_irq, 0);
28532+ atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
8308f9c9
MT
28533 return 0;
28534 }
66a7e928 28535
fe2de317
MT
28536diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
28537index 5f0bc57..eb9fac8 100644
28538--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
28539+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
28540@@ -314,7 +314,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
8308f9c9
MT
28541 int trycnt = 0;
28542 int ret, i;
28543
28544- sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
28545+ sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
28546 retry:
28547 if (++trycnt > 100000) {
28548 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
fe2de317
MT
28549diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
28550index 10656e4..59bf2a4 100644
28551--- a/drivers/gpu/drm/nouveau/nouveau_state.c
28552+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
28553@@ -496,7 +496,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
efbe55a5
MT
28554 bool can_switch;
28555
28556 spin_lock(&dev->count_lock);
28557- can_switch = (dev->open_count == 0);
c52201e0 28558+ can_switch = (local_read(&dev->open_count) == 0);
efbe55a5
MT
28559 spin_unlock(&dev->count_lock);
28560 return can_switch;
28561 }
fe2de317
MT
28562diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
28563index dbdea8e..cd6eeeb 100644
28564--- a/drivers/gpu/drm/nouveau/nv04_graph.c
28565+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
6e9df6a3 28566@@ -554,7 +554,7 @@ static int
8308f9c9
MT
28567 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
28568 u32 class, u32 mthd, u32 data)
28569 {
28570- atomic_set(&chan->fence.last_sequence_irq, data);
28571+ atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
28572 return 0;
28573 }
28574
fe2de317
MT
28575diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
28576index 570e190..084a31a 100644
28577--- a/drivers/gpu/drm/r128/r128_cce.c
28578+++ b/drivers/gpu/drm/r128/r128_cce.c
28579@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
8308f9c9
MT
28580
28581 /* GH: Simple idle check.
28582 */
28583- atomic_set(&dev_priv->idle_count, 0);
28584+ atomic_set_unchecked(&dev_priv->idle_count, 0);
28585
28586 /* We don't support anything other than bus-mastering ring mode,
28587 * but the ring can be in either AGP or PCI space for the ring
fe2de317
MT
28588diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
28589index 930c71b..499aded 100644
28590--- a/drivers/gpu/drm/r128/r128_drv.h
28591+++ b/drivers/gpu/drm/r128/r128_drv.h
8308f9c9
MT
28592@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
28593 int is_pci;
28594 unsigned long cce_buffers_offset;
28595
28596- atomic_t idle_count;
28597+ atomic_unchecked_t idle_count;
28598
28599 int page_flipping;
28600 int current_page;
28601 u32 crtc_offset;
28602 u32 crtc_offset_cntl;
28603
28604- atomic_t vbl_received;
28605+ atomic_unchecked_t vbl_received;
28606
28607 u32 color_fmt;
28608 unsigned int front_offset;
fe2de317
MT
28609diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
28610index 429d5a0..7e899ed 100644
28611--- a/drivers/gpu/drm/r128/r128_irq.c
28612+++ b/drivers/gpu/drm/r128/r128_irq.c
28613@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
8308f9c9
MT
28614 if (crtc != 0)
28615 return 0;
28616
28617- return atomic_read(&dev_priv->vbl_received);
28618+ return atomic_read_unchecked(&dev_priv->vbl_received);
28619 }
28620
28621 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
fe2de317 28622@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
8308f9c9
MT
28623 /* VBLANK interrupt */
28624 if (status & R128_CRTC_VBLANK_INT) {
28625 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
28626- atomic_inc(&dev_priv->vbl_received);
28627+ atomic_inc_unchecked(&dev_priv->vbl_received);
28628 drm_handle_vblank(dev, 0);
28629 return IRQ_HANDLED;
28630 }
fe2de317
MT
28631diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
28632index a9e33ce..09edd4b 100644
28633--- a/drivers/gpu/drm/r128/r128_state.c
28634+++ b/drivers/gpu/drm/r128/r128_state.c
28635@@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
8308f9c9
MT
28636
28637 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
28638 {
28639- if (atomic_read(&dev_priv->idle_count) == 0)
28640+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
28641 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
28642 else
28643- atomic_set(&dev_priv->idle_count, 0);
28644+ atomic_set_unchecked(&dev_priv->idle_count, 0);
28645 }
28646
28647 #endif
fe2de317
MT
28648diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
28649index 14cc88a..cc7b3a5 100644
28650--- a/drivers/gpu/drm/radeon/atom.c
28651+++ b/drivers/gpu/drm/radeon/atom.c
28652@@ -1254,6 +1254,8 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
66a7e928
MT
28653 char name[512];
28654 int i;
28655
28656+ pax_track_stack();
28657+
6e9df6a3
MT
28658 if (!ctx)
28659 return NULL;
66a7e928 28660
fe2de317
MT
28661diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
28662index 5a82b6b..9e69c73 100644
28663--- a/drivers/gpu/drm/radeon/mkregtable.c
28664+++ b/drivers/gpu/drm/radeon/mkregtable.c
28665@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
ae4e228f
MT
28666 regex_t mask_rex;
28667 regmatch_t match[4];
28668 char buf[1024];
28669- size_t end;
28670+ long end;
28671 int len;
28672 int done = 0;
28673 int r;
28674 unsigned o;
28675 struct offset *offset;
28676 char last_reg_s[10];
28677- int last_reg;
28678+ unsigned long last_reg;
28679
28680 if (regcomp
28681 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
fe2de317
MT
28682diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
28683index 184628c..30e1725 100644
28684--- a/drivers/gpu/drm/radeon/radeon.h
28685+++ b/drivers/gpu/drm/radeon/radeon.h
28686@@ -192,7 +192,7 @@ extern int sumo_get_temp(struct radeon_device *rdev);
28687 */
28688 struct radeon_fence_driver {
28689 uint32_t scratch_reg;
28690- atomic_t seq;
28691+ atomic_unchecked_t seq;
28692 uint32_t last_seq;
28693 unsigned long last_jiffies;
28694 unsigned long last_timeout;
28695@@ -962,7 +962,7 @@ struct radeon_asic {
28696 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
28697 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
28698 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
28699-};
28700+} __no_const;
28701
28702 /*
28703 * Asic structures
28704diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
28705index 285acc4..f4d909f 100644
28706--- a/drivers/gpu/drm/radeon/radeon_atombios.c
28707+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
28708@@ -569,6 +569,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
66a7e928
MT
28709 struct radeon_gpio_rec gpio;
28710 struct radeon_hpd hpd;
28711
28712+ pax_track_stack();
28713+
28714 if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
28715 return false;
28716
fe2de317
MT
28717diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
28718index b51e157..8f14fb9 100644
28719--- a/drivers/gpu/drm/radeon/radeon_device.c
28720+++ b/drivers/gpu/drm/radeon/radeon_device.c
28721@@ -684,7 +684,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
df50ba0c 28722 bool can_switch;
58c5fc13 28723
df50ba0c
MT
28724 spin_lock(&dev->count_lock);
28725- can_switch = (dev->open_count == 0);
c52201e0 28726+ can_switch = (local_read(&dev->open_count) == 0);
df50ba0c
MT
28727 spin_unlock(&dev->count_lock);
28728 return can_switch;
28729 }
fe2de317
MT
28730diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
28731index 6adb3e5..b91553e2 100644
28732--- a/drivers/gpu/drm/radeon/radeon_display.c
28733+++ b/drivers/gpu/drm/radeon/radeon_display.c
28734@@ -925,6 +925,8 @@ void radeon_compute_pll_legacy(struct radeon_pll *pll,
66a7e928
MT
28735 uint32_t post_div;
28736 u32 pll_out_min, pll_out_max;
28737
28738+ pax_track_stack();
28739+
28740 DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
28741 freq = freq * 1000;
28742
fe2de317
MT
28743diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
28744index a1b59ca..86f2d44 100644
28745--- a/drivers/gpu/drm/radeon/radeon_drv.h
28746+++ b/drivers/gpu/drm/radeon/radeon_drv.h
8308f9c9
MT
28747@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
28748
28749 /* SW interrupt */
28750 wait_queue_head_t swi_queue;
28751- atomic_t swi_emitted;
28752+ atomic_unchecked_t swi_emitted;
28753 int vblank_crtc;
28754 uint32_t irq_enable_reg;
28755 uint32_t r500_disp_irq_reg;
fe2de317
MT
28756diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
28757index 7fd4e3e..9748ab5 100644
28758--- a/drivers/gpu/drm/radeon/radeon_fence.c
28759+++ b/drivers/gpu/drm/radeon/radeon_fence.c
28760@@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
8308f9c9
MT
28761 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
28762 return 0;
28763 }
28764- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
28765+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
15a11c5b 28766 if (!rdev->cp.ready)
8308f9c9
MT
28767 /* FIXME: cp is not running assume everythings is done right
28768 * away
fe2de317 28769@@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
8308f9c9
MT
28770 return r;
28771 }
15a11c5b 28772 radeon_fence_write(rdev, 0);
8308f9c9
MT
28773- atomic_set(&rdev->fence_drv.seq, 0);
28774+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
28775 INIT_LIST_HEAD(&rdev->fence_drv.created);
28776 INIT_LIST_HEAD(&rdev->fence_drv.emited);
28777 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
fe2de317
MT
28778diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
28779index 48b7cea..342236f 100644
28780--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
28781+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
28782@@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
71d190be
MT
28783 request = compat_alloc_user_space(sizeof(*request));
28784 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
28785 || __put_user(req32.param, &request->param)
28786- || __put_user((void __user *)(unsigned long)req32.value,
28787+ || __put_user((unsigned long)req32.value,
28788 &request->value))
28789 return -EFAULT;
28790
fe2de317
MT
28791diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
28792index 465746b..cb2b055 100644
28793--- a/drivers/gpu/drm/radeon/radeon_irq.c
28794+++ b/drivers/gpu/drm/radeon/radeon_irq.c
28795@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
8308f9c9
MT
28796 unsigned int ret;
28797 RING_LOCALS;
28798
28799- atomic_inc(&dev_priv->swi_emitted);
28800- ret = atomic_read(&dev_priv->swi_emitted);
28801+ atomic_inc_unchecked(&dev_priv->swi_emitted);
28802+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
28803
28804 BEGIN_RING(4);
28805 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
fe2de317 28806@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
8308f9c9
MT
28807 drm_radeon_private_t *dev_priv =
28808 (drm_radeon_private_t *) dev->dev_private;
28809
28810- atomic_set(&dev_priv->swi_emitted, 0);
28811+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
28812 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
28813
28814 dev->max_vblank_count = 0x001fffff;
fe2de317
MT
28815diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
28816index 92e7ea7..147ffad 100644
28817--- a/drivers/gpu/drm/radeon/radeon_state.c
28818+++ b/drivers/gpu/drm/radeon/radeon_state.c
28819@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
ae4e228f
MT
28820 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
28821 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
28822
28823- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
28824+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
28825 sarea_priv->nbox * sizeof(depth_boxes[0])))
28826 return -EFAULT;
28827
fe2de317 28828@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
58c5fc13
MT
28829 {
28830 drm_radeon_private_t *dev_priv = dev->dev_private;
28831 drm_radeon_getparam_t *param = data;
28832- int value;
28833+ int value = 0;
28834
28835 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
28836
fe2de317
MT
28837diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
28838index 0b5468b..9c4b308 100644
28839--- a/drivers/gpu/drm/radeon/radeon_ttm.c
28840+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
28841@@ -672,8 +672,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
15a11c5b
MT
28842 }
28843 if (unlikely(ttm_vm_ops == NULL)) {
28844 ttm_vm_ops = vma->vm_ops;
58c5fc13
MT
28845- radeon_ttm_vm_ops = *ttm_vm_ops;
28846- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
15a11c5b
MT
28847+ pax_open_kernel();
28848+ memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
28849+ *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
28850+ pax_close_kernel();
28851 }
57199397
MT
28852 vma->vm_ops = &radeon_ttm_vm_ops;
28853 return 0;
fe2de317
MT
28854diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
28855index a9049ed..501f284 100644
28856--- a/drivers/gpu/drm/radeon/rs690.c
28857+++ b/drivers/gpu/drm/radeon/rs690.c
28858@@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
71d190be
MT
28859 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
28860 rdev->pm.sideport_bandwidth.full)
28861 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
28862- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
28863+ read_delay_latency.full = dfixed_const(800 * 1000);
28864 read_delay_latency.full = dfixed_div(read_delay_latency,
28865 rdev->pm.igp_sideport_mclk);
28866+ a.full = dfixed_const(370);
28867+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
28868 } else {
28869 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
28870 rdev->pm.k8_bandwidth.full)
fe2de317
MT
28871diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
28872index 727e93d..1565650 100644
28873--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
28874+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
28875@@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages(void)
15a11c5b
MT
28876 static int ttm_pool_mm_shrink(struct shrinker *shrink,
28877 struct shrink_control *sc)
8308f9c9
MT
28878 {
28879- static atomic_t start_pool = ATOMIC_INIT(0);
28880+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
28881 unsigned i;
28882- unsigned pool_offset = atomic_add_return(1, &start_pool);
28883+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
28884 struct ttm_page_pool *pool;
15a11c5b 28885 int shrink_pages = sc->nr_to_scan;
8308f9c9 28886
fe2de317
MT
28887diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
28888index 9cf87d9..2000b7d 100644
28889--- a/drivers/gpu/drm/via/via_drv.h
28890+++ b/drivers/gpu/drm/via/via_drv.h
8308f9c9
MT
28891@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
28892 typedef uint32_t maskarray_t[5];
28893
28894 typedef struct drm_via_irq {
28895- atomic_t irq_received;
28896+ atomic_unchecked_t irq_received;
28897 uint32_t pending_mask;
28898 uint32_t enable_mask;
28899 wait_queue_head_t irq_queue;
28900@@ -75,7 +75,7 @@ typedef struct drm_via_private {
28901 struct timeval last_vblank;
28902 int last_vblank_valid;
28903 unsigned usec_per_vblank;
28904- atomic_t vbl_received;
28905+ atomic_unchecked_t vbl_received;
28906 drm_via_state_t hc_state;
28907 char pci_buf[VIA_PCI_BUF_SIZE];
28908 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
fe2de317
MT
28909diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
28910index d391f48..10c8ca3 100644
28911--- a/drivers/gpu/drm/via/via_irq.c
28912+++ b/drivers/gpu/drm/via/via_irq.c
28913@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
8308f9c9
MT
28914 if (crtc != 0)
28915 return 0;
28916
28917- return atomic_read(&dev_priv->vbl_received);
28918+ return atomic_read_unchecked(&dev_priv->vbl_received);
28919 }
28920
28921 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
fe2de317 28922@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
8308f9c9
MT
28923
28924 status = VIA_READ(VIA_REG_INTERRUPT);
28925 if (status & VIA_IRQ_VBLANK_PENDING) {
28926- atomic_inc(&dev_priv->vbl_received);
28927- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
28928+ atomic_inc_unchecked(&dev_priv->vbl_received);
28929+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
28930 do_gettimeofday(&cur_vblank);
28931 if (dev_priv->last_vblank_valid) {
28932 dev_priv->usec_per_vblank =
fe2de317 28933@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
8308f9c9
MT
28934 dev_priv->last_vblank = cur_vblank;
28935 dev_priv->last_vblank_valid = 1;
28936 }
28937- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
28938+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
28939 DRM_DEBUG("US per vblank is: %u\n",
28940 dev_priv->usec_per_vblank);
28941 }
fe2de317 28942@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
8308f9c9
MT
28943
28944 for (i = 0; i < dev_priv->num_irqs; ++i) {
28945 if (status & cur_irq->pending_mask) {
28946- atomic_inc(&cur_irq->irq_received);
28947+ atomic_inc_unchecked(&cur_irq->irq_received);
28948 DRM_WAKEUP(&cur_irq->irq_queue);
28949 handled = 1;
28950 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
fe2de317 28951@@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
8308f9c9
MT
28952 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
28953 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
28954 masks[irq][4]));
28955- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
28956+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
28957 } else {
28958 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
28959 (((cur_irq_sequence =
28960- atomic_read(&cur_irq->irq_received)) -
28961+ atomic_read_unchecked(&cur_irq->irq_received)) -
28962 *sequence) <= (1 << 23)));
28963 }
28964 *sequence = cur_irq_sequence;
fe2de317 28965@@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
8308f9c9
MT
28966 }
28967
28968 for (i = 0; i < dev_priv->num_irqs; ++i) {
28969- atomic_set(&cur_irq->irq_received, 0);
28970+ atomic_set_unchecked(&cur_irq->irq_received, 0);
28971 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
28972 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
28973 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
fe2de317 28974@@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
8308f9c9
MT
28975 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
28976 case VIA_IRQ_RELATIVE:
28977 irqwait->request.sequence +=
28978- atomic_read(&cur_irq->irq_received);
28979+ atomic_read_unchecked(&cur_irq->irq_received);
28980 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
28981 case VIA_IRQ_ABSOLUTE:
28982 break;
fe2de317
MT
28983diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
28984index 10fc01f..b4e9822 100644
28985--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
28986+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
8308f9c9
MT
28987@@ -240,7 +240,7 @@ struct vmw_private {
28988 * Fencing and IRQs.
28989 */
28990
28991- atomic_t fence_seq;
28992+ atomic_unchecked_t fence_seq;
28993 wait_queue_head_t fence_queue;
28994 wait_queue_head_t fifo_queue;
28995 atomic_t fence_queue_waiters;
fe2de317
MT
28996diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
28997index 41b95ed..69ea504 100644
28998--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
28999+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
29000@@ -610,7 +610,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
6e9df6a3
MT
29001 struct drm_vmw_fence_rep fence_rep;
29002 struct drm_vmw_fence_rep __user *user_fence_rep;
29003 int ret;
29004- void *user_cmd;
29005+ void __user *user_cmd;
29006 void *cmd;
29007 uint32_t sequence;
29008 struct vmw_sw_context *sw_context = &dev_priv->ctx;
fe2de317
MT
29009diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
29010index 61eacc1..ee38ce8 100644
29011--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
29012+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
29013@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
8308f9c9
MT
29014 while (!vmw_lag_lt(queue, us)) {
29015 spin_lock(&queue->lock);
29016 if (list_empty(&queue->head))
29017- sequence = atomic_read(&dev_priv->fence_seq);
29018+ sequence = atomic_read_unchecked(&dev_priv->fence_seq);
29019 else {
29020 fence = list_first_entry(&queue->head,
29021 struct vmw_fence, head);
fe2de317
MT
29022diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
29023index 635c0ff..2641bbb 100644
29024--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
29025+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
29026@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
8308f9c9
MT
29027 (unsigned int) min,
29028 (unsigned int) fifo->capabilities);
29029
29030- atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
29031+ atomic_set_unchecked(&dev_priv->fence_seq, dev_priv->last_read_sequence);
29032 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
29033 vmw_fence_queue_init(&fifo->fence_queue);
29034 return vmw_fifo_send_fence(dev_priv, &dummy);
fe2de317 29035@@ -356,7 +356,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
6e9df6a3
MT
29036 if (reserveable)
29037 iowrite32(bytes, fifo_mem +
29038 SVGA_FIFO_RESERVED);
29039- return fifo_mem + (next_cmd >> 2);
29040+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
29041 } else {
29042 need_bounce = true;
29043 }
fe2de317 29044@@ -476,7 +476,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
8308f9c9
MT
29045
29046 fm = vmw_fifo_reserve(dev_priv, bytes);
29047 if (unlikely(fm == NULL)) {
29048- *sequence = atomic_read(&dev_priv->fence_seq);
29049+ *sequence = atomic_read_unchecked(&dev_priv->fence_seq);
29050 ret = -ENOMEM;
29051 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
29052 false, 3*HZ);
fe2de317 29053@@ -484,7 +484,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
8308f9c9
MT
29054 }
29055
29056 do {
29057- *sequence = atomic_add_return(1, &dev_priv->fence_seq);
29058+ *sequence = atomic_add_return_unchecked(1, &dev_priv->fence_seq);
29059 } while (*sequence == 0);
29060
29061 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
fe2de317
MT
29062diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
29063index e92298a..f68f2d6 100644
29064--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
29065+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
29066@@ -100,7 +100,7 @@ bool vmw_fence_signaled(struct vmw_private *dev_priv,
8308f9c9
MT
29067 * emitted. Then the fence is stale and signaled.
29068 */
29069
29070- ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
29071+ ret = ((atomic_read_unchecked(&dev_priv->fence_seq) - sequence)
29072 > VMW_FENCE_WRAP);
29073
29074 return ret;
fe2de317 29075@@ -131,7 +131,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
8308f9c9
MT
29076
29077 if (fifo_idle)
29078 down_read(&fifo_state->rwsem);
29079- signal_seq = atomic_read(&dev_priv->fence_seq);
29080+ signal_seq = atomic_read_unchecked(&dev_priv->fence_seq);
29081 ret = 0;
29082
29083 for (;;) {
fe2de317
MT
29084diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
29085index c72f1c0..18376f1 100644
29086--- a/drivers/gpu/vga/vgaarb.c
29087+++ b/drivers/gpu/vga/vgaarb.c
29088@@ -993,14 +993,20 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
29089 uc = &priv->cards[i];
29090 }
29091
29092- if (!uc)
29093- return -EINVAL;
29094+ if (!uc) {
29095+ ret_val = -EINVAL;
29096+ goto done;
29097+ }
29098
29099- if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0)
29100- return -EINVAL;
29101+ if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) {
29102+ ret_val = -EINVAL;
29103+ goto done;
29104+ }
29105
29106- if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0)
29107- return -EINVAL;
29108+ if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) {
29109+ ret_val = -EINVAL;
29110+ goto done;
29111+ }
29112
29113 vga_put(pdev, io_state);
29114
29115diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
29116index f26ae31..721fe1b 100644
29117--- a/drivers/hid/hid-core.c
29118+++ b/drivers/hid/hid-core.c
29119@@ -1951,7 +1951,7 @@ static bool hid_ignore(struct hid_device *hdev)
8308f9c9
MT
29120
29121 int hid_add_device(struct hid_device *hdev)
29122 {
29123- static atomic_t id = ATOMIC_INIT(0);
29124+ static atomic_unchecked_t id = ATOMIC_INIT(0);
29125 int ret;
29126
29127 if (WARN_ON(hdev->status & HID_STAT_ADDED))
fe2de317 29128@@ -1966,7 +1966,7 @@ int hid_add_device(struct hid_device *hdev)
8308f9c9
MT
29129 /* XXX hack, any other cleaner solution after the driver core
29130 * is converted to allow more than 20 bytes as the device name? */
29131 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
29132- hdev->vendor, hdev->product, atomic_inc_return(&id));
29133+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
29134
29135 hid_debug_register(hdev, dev_name(&hdev->dev));
29136 ret = device_add(&hdev->dev);
fe2de317
MT
29137diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
29138index 7c1188b..5a64357 100644
29139--- a/drivers/hid/usbhid/hiddev.c
29140+++ b/drivers/hid/usbhid/hiddev.c
29141@@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
16454cff 29142 break;
ae4e228f 29143
df50ba0c
MT
29144 case HIDIOCAPPLICATION:
29145- if (arg < 0 || arg >= hid->maxapplication)
29146+ if (arg >= hid->maxapplication)
16454cff 29147 break;
df50ba0c
MT
29148
29149 for (i = 0; i < hid->maxcollection; i++)
fe2de317
MT
29150diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
29151index 66f6729..2d6de0a 100644
29152--- a/drivers/hwmon/acpi_power_meter.c
29153+++ b/drivers/hwmon/acpi_power_meter.c
29154@@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
15a11c5b 29155 return res;
8308f9c9 29156
15a11c5b
MT
29157 temp /= 1000;
29158- if (temp < 0)
29159- return -EINVAL;
8308f9c9 29160
15a11c5b
MT
29161 mutex_lock(&resource->lock);
29162 resource->trip[attr->index - 7] = temp;
fe2de317
MT
29163diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
29164index fe4104c..346febb 100644
29165--- a/drivers/hwmon/sht15.c
29166+++ b/drivers/hwmon/sht15.c
15a11c5b 29167@@ -166,7 +166,7 @@ struct sht15_data {
8308f9c9 29168 int supply_uV;
15a11c5b 29169 bool supply_uV_valid;
8308f9c9
MT
29170 struct work_struct update_supply_work;
29171- atomic_t interrupt_handled;
29172+ atomic_unchecked_t interrupt_handled;
29173 };
29174
29175 /**
fe2de317 29176@@ -509,13 +509,13 @@ static int sht15_measurement(struct sht15_data *data,
8308f9c9
MT
29177 return ret;
29178
29179 gpio_direction_input(data->pdata->gpio_data);
29180- atomic_set(&data->interrupt_handled, 0);
29181+ atomic_set_unchecked(&data->interrupt_handled, 0);
29182
29183 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29184 if (gpio_get_value(data->pdata->gpio_data) == 0) {
29185 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
66a7e928 29186 /* Only relevant if the interrupt hasn't occurred. */
8308f9c9
MT
29187- if (!atomic_read(&data->interrupt_handled))
29188+ if (!atomic_read_unchecked(&data->interrupt_handled))
29189 schedule_work(&data->read_work);
29190 }
29191 ret = wait_event_timeout(data->wait_queue,
fe2de317 29192@@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
15a11c5b 29193
8308f9c9
MT
29194 /* First disable the interrupt */
29195 disable_irq_nosync(irq);
29196- atomic_inc(&data->interrupt_handled);
29197+ atomic_inc_unchecked(&data->interrupt_handled);
29198 /* Then schedule a reading work struct */
15a11c5b 29199 if (data->state != SHT15_READING_NOTHING)
8308f9c9 29200 schedule_work(&data->read_work);
fe2de317 29201@@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
15a11c5b
MT
29202 * If not, then start the interrupt again - care here as could
29203 * have gone low in meantime so verify it hasn't!
29204 */
8308f9c9
MT
29205- atomic_set(&data->interrupt_handled, 0);
29206+ atomic_set_unchecked(&data->interrupt_handled, 0);
29207 enable_irq(gpio_to_irq(data->pdata->gpio_data));
66a7e928 29208 /* If still not occurred or another handler has been scheduled */
8308f9c9
MT
29209 if (gpio_get_value(data->pdata->gpio_data)
29210- || atomic_read(&data->interrupt_handled))
29211+ || atomic_read_unchecked(&data->interrupt_handled))
29212 return;
29213 }
15a11c5b 29214
fe2de317
MT
29215diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
29216index 378fcb5..5e91fa8 100644
29217--- a/drivers/i2c/busses/i2c-amd756-s4882.c
29218+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
15a11c5b
MT
29219@@ -43,7 +43,7 @@
29220 extern struct i2c_adapter amd756_smbus;
29221
29222 static struct i2c_adapter *s4882_adapter;
29223-static struct i2c_algorithm *s4882_algo;
29224+static i2c_algorithm_no_const *s4882_algo;
29225
29226 /* Wrapper access functions for multiplexed SMBus */
29227 static DEFINE_MUTEX(amd756_lock);
fe2de317
MT
29228diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
29229index 29015eb..af2d8e9 100644
29230--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
29231+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
15a11c5b
MT
29232@@ -41,7 +41,7 @@
29233 extern struct i2c_adapter *nforce2_smbus;
66a7e928 29234
15a11c5b
MT
29235 static struct i2c_adapter *s4985_adapter;
29236-static struct i2c_algorithm *s4985_algo;
29237+static i2c_algorithm_no_const *s4985_algo;
66a7e928 29238
15a11c5b
MT
29239 /* Wrapper access functions for multiplexed SMBus */
29240 static DEFINE_MUTEX(nforce2_lock);
fe2de317
MT
29241diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
29242index d7a4833..7fae376 100644
29243--- a/drivers/i2c/i2c-mux.c
29244+++ b/drivers/i2c/i2c-mux.c
15a11c5b
MT
29245@@ -28,7 +28,7 @@
29246 /* multiplexer per channel data */
29247 struct i2c_mux_priv {
29248 struct i2c_adapter adap;
29249- struct i2c_algorithm algo;
29250+ i2c_algorithm_no_const algo;
29251
29252 struct i2c_adapter *parent;
29253 void *mux_dev; /* the mux chip/device */
fe2de317
MT
29254diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
29255index 57d00ca..0145194 100644
29256--- a/drivers/ide/aec62xx.c
29257+++ b/drivers/ide/aec62xx.c
29258@@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = {
6e9df6a3
MT
29259 .cable_detect = atp86x_cable_detect,
29260 };
29261
29262-static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
29263+static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
29264 { /* 0: AEC6210 */
29265 .name = DRV_NAME,
29266 .init_chipset = init_chipset_aec62xx,
fe2de317
MT
29267diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
29268index 2c8016a..911a27c 100644
29269--- a/drivers/ide/alim15x3.c
29270+++ b/drivers/ide/alim15x3.c
29271@@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_ops = {
6e9df6a3
MT
29272 .dma_sff_read_status = ide_dma_sff_read_status,
29273 };
29274
29275-static const struct ide_port_info ali15x3_chipset __devinitdata = {
29276+static const struct ide_port_info ali15x3_chipset __devinitconst = {
29277 .name = DRV_NAME,
29278 .init_chipset = init_chipset_ali15x3,
29279 .init_hwif = init_hwif_ali15x3,
fe2de317
MT
29280diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
29281index 3747b25..56fc995 100644
29282--- a/drivers/ide/amd74xx.c
29283+++ b/drivers/ide/amd74xx.c
29284@@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = {
6e9df6a3
MT
29285 .udma_mask = udma, \
29286 }
29287
29288-static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
29289+static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
29290 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
29291 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
29292 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
fe2de317
MT
29293diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
29294index 15f0ead..cb43480 100644
29295--- a/drivers/ide/atiixp.c
29296+++ b/drivers/ide/atiixp.c
29297@@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = {
6e9df6a3
MT
29298 .cable_detect = atiixp_cable_detect,
29299 };
29300
29301-static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
29302+static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
29303 { /* 0: IXP200/300/400/700 */
29304 .name = DRV_NAME,
29305 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
fe2de317
MT
29306diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
29307index 5f80312..d1fc438 100644
29308--- a/drivers/ide/cmd64x.c
29309+++ b/drivers/ide/cmd64x.c
29310@@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
6e9df6a3
MT
29311 .dma_sff_read_status = ide_dma_sff_read_status,
29312 };
29313
29314-static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
29315+static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
29316 { /* 0: CMD643 */
29317 .name = DRV_NAME,
29318 .init_chipset = init_chipset_cmd64x,
fe2de317
MT
29319diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
29320index 2c1e5f7..1444762 100644
29321--- a/drivers/ide/cs5520.c
29322+++ b/drivers/ide/cs5520.c
29323@@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = {
6e9df6a3
MT
29324 .set_dma_mode = cs5520_set_dma_mode,
29325 };
29326
29327-static const struct ide_port_info cyrix_chipset __devinitdata = {
29328+static const struct ide_port_info cyrix_chipset __devinitconst = {
29329 .name = DRV_NAME,
29330 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
29331 .port_ops = &cs5520_port_ops,
fe2de317
MT
29332diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
29333index 4dc4eb9..49b40ad 100644
29334--- a/drivers/ide/cs5530.c
29335+++ b/drivers/ide/cs5530.c
29336@@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = {
6e9df6a3
MT
29337 .udma_filter = cs5530_udma_filter,
29338 };
29339
29340-static const struct ide_port_info cs5530_chipset __devinitdata = {
29341+static const struct ide_port_info cs5530_chipset __devinitconst = {
29342 .name = DRV_NAME,
29343 .init_chipset = init_chipset_cs5530,
29344 .init_hwif = init_hwif_cs5530,
fe2de317
MT
29345diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
29346index 5059faf..18d4c85 100644
29347--- a/drivers/ide/cs5535.c
29348+++ b/drivers/ide/cs5535.c
29349@@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
6e9df6a3
MT
29350 .cable_detect = cs5535_cable_detect,
29351 };
29352
29353-static const struct ide_port_info cs5535_chipset __devinitdata = {
29354+static const struct ide_port_info cs5535_chipset __devinitconst = {
29355 .name = DRV_NAME,
29356 .port_ops = &cs5535_port_ops,
29357 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
fe2de317
MT
29358diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
29359index 67cbcfa..37ea151 100644
29360--- a/drivers/ide/cy82c693.c
29361+++ b/drivers/ide/cy82c693.c
29362@@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
6e9df6a3
MT
29363 .set_dma_mode = cy82c693_set_dma_mode,
29364 };
29365
29366-static const struct ide_port_info cy82c693_chipset __devinitdata = {
29367+static const struct ide_port_info cy82c693_chipset __devinitconst = {
29368 .name = DRV_NAME,
29369 .init_iops = init_iops_cy82c693,
29370 .port_ops = &cy82c693_port_ops,
fe2de317
MT
29371diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
29372index 58c51cd..4aec3b8 100644
29373--- a/drivers/ide/hpt366.c
29374+++ b/drivers/ide/hpt366.c
29375@@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = {
6e9df6a3
MT
29376 }
29377 };
29378
29379-static const struct hpt_info hpt36x __devinitdata = {
29380+static const struct hpt_info hpt36x __devinitconst = {
29381 .chip_name = "HPT36x",
29382 .chip_type = HPT36x,
29383 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
fe2de317 29384@@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitdata = {
6e9df6a3
MT
29385 .timings = &hpt36x_timings
29386 };
29387
29388-static const struct hpt_info hpt370 __devinitdata = {
29389+static const struct hpt_info hpt370 __devinitconst = {
29390 .chip_name = "HPT370",
29391 .chip_type = HPT370,
29392 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
fe2de317 29393@@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitdata = {
6e9df6a3
MT
29394 .timings = &hpt37x_timings
29395 };
29396
29397-static const struct hpt_info hpt370a __devinitdata = {
29398+static const struct hpt_info hpt370a __devinitconst = {
29399 .chip_name = "HPT370A",
29400 .chip_type = HPT370A,
29401 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
fe2de317 29402@@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitdata = {
6e9df6a3
MT
29403 .timings = &hpt37x_timings
29404 };
29405
29406-static const struct hpt_info hpt374 __devinitdata = {
29407+static const struct hpt_info hpt374 __devinitconst = {
29408 .chip_name = "HPT374",
29409 .chip_type = HPT374,
29410 .udma_mask = ATA_UDMA5,
fe2de317 29411@@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitdata = {
6e9df6a3
MT
29412 .timings = &hpt37x_timings
29413 };
29414
29415-static const struct hpt_info hpt372 __devinitdata = {
29416+static const struct hpt_info hpt372 __devinitconst = {
29417 .chip_name = "HPT372",
29418 .chip_type = HPT372,
29419 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
fe2de317 29420@@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitdata = {
6e9df6a3
MT
29421 .timings = &hpt37x_timings
29422 };
29423
29424-static const struct hpt_info hpt372a __devinitdata = {
29425+static const struct hpt_info hpt372a __devinitconst = {
29426 .chip_name = "HPT372A",
29427 .chip_type = HPT372A,
29428 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
fe2de317 29429@@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitdata = {
6e9df6a3
MT
29430 .timings = &hpt37x_timings
29431 };
29432
29433-static const struct hpt_info hpt302 __devinitdata = {
29434+static const struct hpt_info hpt302 __devinitconst = {
29435 .chip_name = "HPT302",
29436 .chip_type = HPT302,
29437 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
fe2de317 29438@@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitdata = {
6e9df6a3
MT
29439 .timings = &hpt37x_timings
29440 };
29441
29442-static const struct hpt_info hpt371 __devinitdata = {
29443+static const struct hpt_info hpt371 __devinitconst = {
29444 .chip_name = "HPT371",
29445 .chip_type = HPT371,
29446 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
fe2de317 29447@@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitdata = {
6e9df6a3
MT
29448 .timings = &hpt37x_timings
29449 };
29450
29451-static const struct hpt_info hpt372n __devinitdata = {
29452+static const struct hpt_info hpt372n __devinitconst = {
29453 .chip_name = "HPT372N",
29454 .chip_type = HPT372N,
29455 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
fe2de317 29456@@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitdata = {
6e9df6a3
MT
29457 .timings = &hpt37x_timings
29458 };
29459
29460-static const struct hpt_info hpt302n __devinitdata = {
29461+static const struct hpt_info hpt302n __devinitconst = {
29462 .chip_name = "HPT302N",
29463 .chip_type = HPT302N,
29464 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
fe2de317 29465@@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitdata = {
6e9df6a3
MT
29466 .timings = &hpt37x_timings
29467 };
29468
29469-static const struct hpt_info hpt371n __devinitdata = {
29470+static const struct hpt_info hpt371n __devinitconst = {
29471 .chip_name = "HPT371N",
29472 .chip_type = HPT371N,
29473 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
fe2de317 29474@@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
6e9df6a3
MT
29475 .dma_sff_read_status = ide_dma_sff_read_status,
29476 };
29477
29478-static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
29479+static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
29480 { /* 0: HPT36x */
29481 .name = DRV_NAME,
29482 .init_chipset = init_chipset_hpt366,
fe2de317
MT
29483diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
29484index 04b0956..f5b47dc 100644
29485--- a/drivers/ide/ide-cd.c
29486+++ b/drivers/ide/ide-cd.c
29487@@ -769,7 +769,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
ae4e228f
MT
29488 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
29489 if ((unsigned long)buf & alignment
29490 || blk_rq_bytes(rq) & q->dma_pad_mask
29491- || object_is_on_stack(buf))
29492+ || object_starts_on_stack(buf))
29493 drive->dma = 0;
29494 }
29495 }
fe2de317
MT
29496diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
29497index 61fdf54..2834ea6 100644
29498--- a/drivers/ide/ide-floppy.c
29499+++ b/drivers/ide/ide-floppy.c
29500@@ -379,6 +379,8 @@ static int ide_floppy_get_capacity(ide_drive_t *drive)
66a7e928
MT
29501 u8 pc_buf[256], header_len, desc_cnt;
29502 int i, rc = 1, blocks, length;
29503
29504+ pax_track_stack();
29505+
29506 ide_debug_log(IDE_DBG_FUNC, "enter");
29507
29508 drive->bios_cyl = 0;
fe2de317
MT
29509diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
29510index a743e68..1cfd674 100644
29511--- a/drivers/ide/ide-pci-generic.c
29512+++ b/drivers/ide/ide-pci-generic.c
29513@@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
6e9df6a3
MT
29514 .udma_mask = ATA_UDMA6, \
29515 }
29516
29517-static const struct ide_port_info generic_chipsets[] __devinitdata = {
29518+static const struct ide_port_info generic_chipsets[] __devinitconst = {
29519 /* 0: Unknown */
29520 DECLARE_GENERIC_PCI_DEV(0),
29521
fe2de317
MT
29522diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
29523index 560e66d..d5dd180 100644
29524--- a/drivers/ide/it8172.c
29525+++ b/drivers/ide/it8172.c
29526@@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
6e9df6a3
MT
29527 .set_dma_mode = it8172_set_dma_mode,
29528 };
29529
29530-static const struct ide_port_info it8172_port_info __devinitdata = {
29531+static const struct ide_port_info it8172_port_info __devinitconst = {
29532 .name = DRV_NAME,
29533 .port_ops = &it8172_port_ops,
29534 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
fe2de317
MT
29535diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
29536index 46816ba..1847aeb 100644
29537--- a/drivers/ide/it8213.c
29538+++ b/drivers/ide/it8213.c
29539@@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
6e9df6a3
MT
29540 .cable_detect = it8213_cable_detect,
29541 };
29542
29543-static const struct ide_port_info it8213_chipset __devinitdata = {
29544+static const struct ide_port_info it8213_chipset __devinitconst = {
29545 .name = DRV_NAME,
29546 .enablebits = { {0x41, 0x80, 0x80} },
29547 .port_ops = &it8213_port_ops,
fe2de317
MT
29548diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
29549index 2e3169f..c5611db 100644
29550--- a/drivers/ide/it821x.c
29551+++ b/drivers/ide/it821x.c
29552@@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = {
6e9df6a3
MT
29553 .cable_detect = it821x_cable_detect,
29554 };
29555
29556-static const struct ide_port_info it821x_chipset __devinitdata = {
29557+static const struct ide_port_info it821x_chipset __devinitconst = {
29558 .name = DRV_NAME,
29559 .init_chipset = init_chipset_it821x,
29560 .init_hwif = init_hwif_it821x,
fe2de317
MT
29561diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
29562index 74c2c4a..efddd7d 100644
29563--- a/drivers/ide/jmicron.c
29564+++ b/drivers/ide/jmicron.c
29565@@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
6e9df6a3
MT
29566 .cable_detect = jmicron_cable_detect,
29567 };
29568
29569-static const struct ide_port_info jmicron_chipset __devinitdata = {
29570+static const struct ide_port_info jmicron_chipset __devinitconst = {
29571 .name = DRV_NAME,
29572 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
29573 .port_ops = &jmicron_port_ops,
fe2de317
MT
29574diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
29575index 95327a2..73f78d8 100644
29576--- a/drivers/ide/ns87415.c
29577+++ b/drivers/ide/ns87415.c
29578@@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
6e9df6a3
MT
29579 .dma_sff_read_status = superio_dma_sff_read_status,
29580 };
29581
29582-static const struct ide_port_info ns87415_chipset __devinitdata = {
29583+static const struct ide_port_info ns87415_chipset __devinitconst = {
29584 .name = DRV_NAME,
29585 .init_hwif = init_hwif_ns87415,
29586 .tp_ops = &ns87415_tp_ops,
fe2de317
MT
29587diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
29588index 1a53a4c..39edc66 100644
29589--- a/drivers/ide/opti621.c
29590+++ b/drivers/ide/opti621.c
29591@@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = {
6e9df6a3
MT
29592 .set_pio_mode = opti621_set_pio_mode,
29593 };
29594
29595-static const struct ide_port_info opti621_chipset __devinitdata = {
29596+static const struct ide_port_info opti621_chipset __devinitconst = {
29597 .name = DRV_NAME,
29598 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
29599 .port_ops = &opti621_port_ops,
fe2de317
MT
29600diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
29601index 9546fe2..2e5ceb6 100644
29602--- a/drivers/ide/pdc202xx_new.c
29603+++ b/drivers/ide/pdc202xx_new.c
29604@@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
6e9df6a3
MT
29605 .udma_mask = udma, \
29606 }
29607
29608-static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
29609+static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
29610 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
29611 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
29612 };
fe2de317
MT
29613diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
29614index 3a35ec6..5634510 100644
29615--- a/drivers/ide/pdc202xx_old.c
29616+++ b/drivers/ide/pdc202xx_old.c
29617@@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
6e9df6a3
MT
29618 .max_sectors = sectors, \
29619 }
29620
29621-static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
29622+static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
29623 { /* 0: PDC20246 */
29624 .name = DRV_NAME,
29625 .init_chipset = init_chipset_pdc202xx,
fe2de317
MT
29626diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
29627index b59d04c..368c2a7 100644
29628--- a/drivers/ide/piix.c
29629+++ b/drivers/ide/piix.c
29630@@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
6e9df6a3
MT
29631 .udma_mask = udma, \
29632 }
29633
29634-static const struct ide_port_info piix_pci_info[] __devinitdata = {
29635+static const struct ide_port_info piix_pci_info[] __devinitconst = {
29636 /* 0: MPIIX */
29637 { /*
29638 * MPIIX actually has only a single IDE channel mapped to
fe2de317
MT
29639diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
29640index a6414a8..c04173e 100644
29641--- a/drivers/ide/rz1000.c
29642+++ b/drivers/ide/rz1000.c
29643@@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
6e9df6a3
MT
29644 }
29645 }
29646
29647-static const struct ide_port_info rz1000_chipset __devinitdata = {
29648+static const struct ide_port_info rz1000_chipset __devinitconst = {
29649 .name = DRV_NAME,
29650 .host_flags = IDE_HFLAG_NO_DMA,
29651 };
fe2de317
MT
29652diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
29653index 356b9b5..d4758eb 100644
29654--- a/drivers/ide/sc1200.c
29655+++ b/drivers/ide/sc1200.c
29656@@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
6e9df6a3
MT
29657 .dma_sff_read_status = ide_dma_sff_read_status,
29658 };
29659
29660-static const struct ide_port_info sc1200_chipset __devinitdata = {
29661+static const struct ide_port_info sc1200_chipset __devinitconst = {
29662 .name = DRV_NAME,
29663 .port_ops = &sc1200_port_ops,
29664 .dma_ops = &sc1200_dma_ops,
fe2de317
MT
29665diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
29666index b7f5b0c..9701038 100644
29667--- a/drivers/ide/scc_pata.c
29668+++ b/drivers/ide/scc_pata.c
29669@@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
6e9df6a3
MT
29670 .dma_sff_read_status = scc_dma_sff_read_status,
29671 };
29672
29673-static const struct ide_port_info scc_chipset __devinitdata = {
29674+static const struct ide_port_info scc_chipset __devinitconst = {
29675 .name = "sccIDE",
29676 .init_iops = init_iops_scc,
29677 .init_dma = scc_init_dma,
fe2de317
MT
29678diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
29679index 35fb8da..24d72ef 100644
29680--- a/drivers/ide/serverworks.c
29681+++ b/drivers/ide/serverworks.c
29682@@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = {
6e9df6a3
MT
29683 .cable_detect = svwks_cable_detect,
29684 };
29685
29686-static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
29687+static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
29688 { /* 0: OSB4 */
29689 .name = DRV_NAME,
29690 .init_chipset = init_chipset_svwks,
fe2de317
MT
29691diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
29692index ab3db61..afed580 100644
29693--- a/drivers/ide/setup-pci.c
29694+++ b/drivers/ide/setup-pci.c
29695@@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
66a7e928
MT
29696 int ret, i, n_ports = dev2 ? 4 : 2;
29697 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
29698
29699+ pax_track_stack();
29700+
29701 for (i = 0; i < n_ports / 2; i++) {
29702 ret = ide_setup_pci_controller(pdev[i], d, !i);
29703 if (ret < 0)
fe2de317
MT
29704diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
29705index ddeda44..46f7e30 100644
29706--- a/drivers/ide/siimage.c
29707+++ b/drivers/ide/siimage.c
29708@@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
6e9df6a3
MT
29709 .udma_mask = ATA_UDMA6, \
29710 }
29711
29712-static const struct ide_port_info siimage_chipsets[] __devinitdata = {
29713+static const struct ide_port_info siimage_chipsets[] __devinitconst = {
29714 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
29715 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
29716 };
fe2de317
MT
29717diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
29718index 4a00225..09e61b4 100644
29719--- a/drivers/ide/sis5513.c
29720+++ b/drivers/ide/sis5513.c
29721@@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
6e9df6a3
MT
29722 .cable_detect = sis_cable_detect,
29723 };
29724
29725-static const struct ide_port_info sis5513_chipset __devinitdata = {
29726+static const struct ide_port_info sis5513_chipset __devinitconst = {
29727 .name = DRV_NAME,
29728 .init_chipset = init_chipset_sis5513,
29729 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
fe2de317
MT
29730diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
29731index f21dc2a..d051cd2 100644
29732--- a/drivers/ide/sl82c105.c
29733+++ b/drivers/ide/sl82c105.c
29734@@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
6e9df6a3
MT
29735 .dma_sff_read_status = ide_dma_sff_read_status,
29736 };
29737
29738-static const struct ide_port_info sl82c105_chipset __devinitdata = {
29739+static const struct ide_port_info sl82c105_chipset __devinitconst = {
29740 .name = DRV_NAME,
29741 .init_chipset = init_chipset_sl82c105,
29742 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
fe2de317
MT
29743diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
29744index 864ffe0..863a5e9 100644
29745--- a/drivers/ide/slc90e66.c
29746+++ b/drivers/ide/slc90e66.c
29747@@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
6e9df6a3
MT
29748 .cable_detect = slc90e66_cable_detect,
29749 };
29750
29751-static const struct ide_port_info slc90e66_chipset __devinitdata = {
29752+static const struct ide_port_info slc90e66_chipset __devinitconst = {
29753 .name = DRV_NAME,
29754 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
29755 .port_ops = &slc90e66_port_ops,
fe2de317
MT
29756diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
29757index e444d24..ba577de 100644
29758--- a/drivers/ide/tc86c001.c
29759+++ b/drivers/ide/tc86c001.c
29760@@ -191,7 +191,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
6e9df6a3
MT
29761 .dma_sff_read_status = ide_dma_sff_read_status,
29762 };
29763
29764-static const struct ide_port_info tc86c001_chipset __devinitdata = {
29765+static const struct ide_port_info tc86c001_chipset __devinitconst = {
29766 .name = DRV_NAME,
29767 .init_hwif = init_hwif_tc86c001,
29768 .port_ops = &tc86c001_port_ops,
fe2de317
MT
29769diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
29770index e53a1b7..d11aff7 100644
29771--- a/drivers/ide/triflex.c
29772+++ b/drivers/ide/triflex.c
29773@@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
6e9df6a3
MT
29774 .set_dma_mode = triflex_set_mode,
29775 };
29776
29777-static const struct ide_port_info triflex_device __devinitdata = {
29778+static const struct ide_port_info triflex_device __devinitconst = {
29779 .name = DRV_NAME,
29780 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
29781 .port_ops = &triflex_port_ops,
fe2de317
MT
29782diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
29783index 4b42ca0..e494a98 100644
29784--- a/drivers/ide/trm290.c
29785+++ b/drivers/ide/trm290.c
29786@@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
6e9df6a3
MT
29787 .dma_check = trm290_dma_check,
29788 };
29789
29790-static const struct ide_port_info trm290_chipset __devinitdata = {
29791+static const struct ide_port_info trm290_chipset __devinitconst = {
29792 .name = DRV_NAME,
29793 .init_hwif = init_hwif_trm290,
29794 .tp_ops = &trm290_tp_ops,
fe2de317
MT
29795diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
29796index f46f49c..eb77678 100644
29797--- a/drivers/ide/via82cxxx.c
29798+++ b/drivers/ide/via82cxxx.c
29799@@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = {
6e9df6a3
MT
29800 .cable_detect = via82cxxx_cable_detect,
29801 };
29802
29803-static const struct ide_port_info via82cxxx_chipset __devinitdata = {
29804+static const struct ide_port_info via82cxxx_chipset __devinitconst = {
29805 .name = DRV_NAME,
29806 .init_chipset = init_chipset_via82cxxx,
29807 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
fe2de317
MT
29808diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
29809index fc0f2bd..ac2f8a5 100644
29810--- a/drivers/infiniband/core/cm.c
29811+++ b/drivers/infiniband/core/cm.c
29812@@ -113,7 +113,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
ae4e228f
MT
29813
29814 struct cm_counter_group {
29815 struct kobject obj;
29816- atomic_long_t counter[CM_ATTR_COUNT];
29817+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
29818 };
29819
29820 struct cm_counter_attribute {
fe2de317 29821@@ -1387,7 +1387,7 @@ static void cm_dup_req_handler(struct cm_work *work,
ae4e228f
MT
29822 struct ib_mad_send_buf *msg = NULL;
29823 int ret;
29824
29825- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29826+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29827 counter[CM_REQ_COUNTER]);
29828
29829 /* Quick state check to discard duplicate REQs. */
fe2de317 29830@@ -1765,7 +1765,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
ae4e228f
MT
29831 if (!cm_id_priv)
29832 return;
58c5fc13 29833
ae4e228f
MT
29834- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29835+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29836 counter[CM_REP_COUNTER]);
29837 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
29838 if (ret)
fe2de317 29839@@ -1932,7 +1932,7 @@ static int cm_rtu_handler(struct cm_work *work)
ae4e228f
MT
29840 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
29841 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
29842 spin_unlock_irq(&cm_id_priv->lock);
29843- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29844+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29845 counter[CM_RTU_COUNTER]);
29846 goto out;
29847 }
fe2de317 29848@@ -2115,7 +2115,7 @@ static int cm_dreq_handler(struct cm_work *work)
ae4e228f
MT
29849 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
29850 dreq_msg->local_comm_id);
29851 if (!cm_id_priv) {
29852- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29853+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29854 counter[CM_DREQ_COUNTER]);
29855 cm_issue_drep(work->port, work->mad_recv_wc);
29856 return -EINVAL;
fe2de317 29857@@ -2140,7 +2140,7 @@ static int cm_dreq_handler(struct cm_work *work)
ae4e228f
MT
29858 case IB_CM_MRA_REP_RCVD:
29859 break;
29860 case IB_CM_TIMEWAIT:
29861- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29862+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29863 counter[CM_DREQ_COUNTER]);
29864 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
29865 goto unlock;
fe2de317 29866@@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_work *work)
ae4e228f
MT
29867 cm_free_msg(msg);
29868 goto deref;
29869 case IB_CM_DREQ_RCVD:
29870- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29871+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29872 counter[CM_DREQ_COUNTER]);
29873 goto unlock;
29874 default:
fe2de317 29875@@ -2521,7 +2521,7 @@ static int cm_mra_handler(struct cm_work *work)
ae4e228f
MT
29876 ib_modify_mad(cm_id_priv->av.port->mad_agent,
29877 cm_id_priv->msg, timeout)) {
29878 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
29879- atomic_long_inc(&work->port->
29880+ atomic_long_inc_unchecked(&work->port->
29881 counter_group[CM_RECV_DUPLICATES].
29882 counter[CM_MRA_COUNTER]);
29883 goto out;
fe2de317 29884@@ -2530,7 +2530,7 @@ static int cm_mra_handler(struct cm_work *work)
ae4e228f
MT
29885 break;
29886 case IB_CM_MRA_REQ_RCVD:
29887 case IB_CM_MRA_REP_RCVD:
29888- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29889+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29890 counter[CM_MRA_COUNTER]);
29891 /* fall through */
29892 default:
fe2de317 29893@@ -2692,7 +2692,7 @@ static int cm_lap_handler(struct cm_work *work)
ae4e228f
MT
29894 case IB_CM_LAP_IDLE:
29895 break;
29896 case IB_CM_MRA_LAP_SENT:
29897- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29898+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29899 counter[CM_LAP_COUNTER]);
29900 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
29901 goto unlock;
fe2de317 29902@@ -2708,7 +2708,7 @@ static int cm_lap_handler(struct cm_work *work)
ae4e228f
MT
29903 cm_free_msg(msg);
29904 goto deref;
29905 case IB_CM_LAP_RCVD:
29906- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29907+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29908 counter[CM_LAP_COUNTER]);
29909 goto unlock;
29910 default:
fe2de317 29911@@ -2992,7 +2992,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
ae4e228f
MT
29912 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
29913 if (cur_cm_id_priv) {
29914 spin_unlock_irq(&cm.lock);
29915- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29916+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29917 counter[CM_SIDR_REQ_COUNTER]);
29918 goto out; /* Duplicate message. */
29919 }
fe2de317 29920@@ -3204,10 +3204,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
ae4e228f
MT
29921 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
29922 msg->retries = 1;
29923
29924- atomic_long_add(1 + msg->retries,
29925+ atomic_long_add_unchecked(1 + msg->retries,
29926 &port->counter_group[CM_XMIT].counter[attr_index]);
29927 if (msg->retries)
29928- atomic_long_add(msg->retries,
29929+ atomic_long_add_unchecked(msg->retries,
29930 &port->counter_group[CM_XMIT_RETRIES].
29931 counter[attr_index]);
29932
fe2de317 29933@@ -3417,7 +3417,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
ae4e228f
MT
29934 }
29935
29936 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
29937- atomic_long_inc(&port->counter_group[CM_RECV].
29938+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
29939 counter[attr_id - CM_ATTR_ID_OFFSET]);
29940
29941 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
fe2de317 29942@@ -3615,7 +3615,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
ae4e228f
MT
29943 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
29944
29945 return sprintf(buf, "%ld\n",
29946- atomic_long_read(&group->counter[cm_attr->index]));
29947+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
29948 }
29949
df50ba0c 29950 static const struct sysfs_ops cm_counter_ops = {
fe2de317
MT
29951diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
29952index 4507043..14ad522 100644
29953--- a/drivers/infiniband/core/fmr_pool.c
29954+++ b/drivers/infiniband/core/fmr_pool.c
8308f9c9
MT
29955@@ -97,8 +97,8 @@ struct ib_fmr_pool {
29956
29957 struct task_struct *thread;
29958
29959- atomic_t req_ser;
29960- atomic_t flush_ser;
29961+ atomic_unchecked_t req_ser;
29962+ atomic_unchecked_t flush_ser;
29963
29964 wait_queue_head_t force_wait;
29965 };
fe2de317 29966@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
8308f9c9
MT
29967 struct ib_fmr_pool *pool = pool_ptr;
29968
29969 do {
29970- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
29971+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
29972 ib_fmr_batch_release(pool);
29973
29974- atomic_inc(&pool->flush_ser);
29975+ atomic_inc_unchecked(&pool->flush_ser);
29976 wake_up_interruptible(&pool->force_wait);
29977
29978 if (pool->flush_function)
fe2de317 29979@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
8308f9c9
MT
29980 }
29981
29982 set_current_state(TASK_INTERRUPTIBLE);
29983- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
29984+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
29985 !kthread_should_stop())
29986 schedule();
29987 __set_current_state(TASK_RUNNING);
fe2de317 29988@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
8308f9c9
MT
29989 pool->dirty_watermark = params->dirty_watermark;
29990 pool->dirty_len = 0;
29991 spin_lock_init(&pool->pool_lock);
29992- atomic_set(&pool->req_ser, 0);
29993- atomic_set(&pool->flush_ser, 0);
29994+ atomic_set_unchecked(&pool->req_ser, 0);
29995+ atomic_set_unchecked(&pool->flush_ser, 0);
29996 init_waitqueue_head(&pool->force_wait);
29997
29998 pool->thread = kthread_run(ib_fmr_cleanup_thread,
fe2de317 29999@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
8308f9c9
MT
30000 }
30001 spin_unlock_irq(&pool->pool_lock);
30002
30003- serial = atomic_inc_return(&pool->req_ser);
30004+ serial = atomic_inc_return_unchecked(&pool->req_ser);
30005 wake_up_process(pool->thread);
30006
30007 if (wait_event_interruptible(pool->force_wait,
30008- atomic_read(&pool->flush_ser) - serial >= 0))
30009+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
30010 return -EINTR;
30011
30012 return 0;
fe2de317 30013@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
8308f9c9
MT
30014 } else {
30015 list_add_tail(&fmr->list, &pool->dirty_list);
30016 if (++pool->dirty_len >= pool->dirty_watermark) {
30017- atomic_inc(&pool->req_ser);
30018+ atomic_inc_unchecked(&pool->req_ser);
30019 wake_up_process(pool->thread);
30020 }
30021 }
fe2de317
MT
30022diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
30023index 40c8353..946b0e4 100644
30024--- a/drivers/infiniband/hw/cxgb4/mem.c
30025+++ b/drivers/infiniband/hw/cxgb4/mem.c
30026@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
8308f9c9
MT
30027 int err;
30028 struct fw_ri_tpte tpt;
30029 u32 stag_idx;
30030- static atomic_t key;
30031+ static atomic_unchecked_t key;
30032
30033 if (c4iw_fatal_error(rdev))
30034 return -EIO;
fe2de317 30035@@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
8308f9c9
MT
30036 &rdev->resource.tpt_fifo_lock);
30037 if (!stag_idx)
30038 return -ENOMEM;
30039- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
30040+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
30041 }
30042 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
30043 __func__, stag_state, type, pdid, stag_idx);
fe2de317
MT
30044diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
30045index 31ae1b1..2f5b038 100644
30046--- a/drivers/infiniband/hw/ipath/ipath_fs.c
30047+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
30048@@ -113,6 +113,8 @@ static ssize_t atomic_counters_read(struct file *file, char __user *buf,
66a7e928
MT
30049 struct infinipath_counters counters;
30050 struct ipath_devdata *dd;
30051
30052+ pax_track_stack();
30053+
30054 dd = file->f_path.dentry->d_inode->i_private;
30055 dd->ipath_f_read_counters(dd, &counters);
30056
fe2de317
MT
30057diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
30058index 79b3dbc..96e5fcc 100644
30059--- a/drivers/infiniband/hw/ipath/ipath_rc.c
30060+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
30061@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
66a7e928
MT
30062 struct ib_atomic_eth *ateth;
30063 struct ipath_ack_entry *e;
30064 u64 vaddr;
30065- atomic64_t *maddr;
30066+ atomic64_unchecked_t *maddr;
30067 u64 sdata;
30068 u32 rkey;
30069 u8 next;
fe2de317 30070@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
66a7e928
MT
30071 IB_ACCESS_REMOTE_ATOMIC)))
30072 goto nack_acc_unlck;
30073 /* Perform atomic OP and save result. */
30074- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
30075+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
30076 sdata = be64_to_cpu(ateth->swap_data);
30077 e = &qp->s_ack_queue[qp->r_head_ack_queue];
30078 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
30079- (u64) atomic64_add_return(sdata, maddr) - sdata :
30080+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
30081 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
30082 be64_to_cpu(ateth->compare_data),
30083 sdata);
fe2de317
MT
30084diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
30085index 1f95bba..9530f87 100644
30086--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
30087+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
30088@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
66a7e928
MT
30089 unsigned long flags;
30090 struct ib_wc wc;
30091 u64 sdata;
30092- atomic64_t *maddr;
30093+ atomic64_unchecked_t *maddr;
30094 enum ib_wc_status send_status;
30095
30096 /*
30097@@ -382,11 +382,11 @@ again:
30098 IB_ACCESS_REMOTE_ATOMIC)))
30099 goto acc_err;
30100 /* Perform atomic OP and save result. */
30101- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
30102+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
30103 sdata = wqe->wr.wr.atomic.compare_add;
30104 *(u64 *) sqp->s_sge.sge.vaddr =
30105 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
30106- (u64) atomic64_add_return(sdata, maddr) - sdata :
30107+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
30108 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
30109 sdata, wqe->wr.wr.atomic.swap);
30110 goto send_comp;
fe2de317
MT
30111diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
30112index 2d668c6..3312bb7 100644
30113--- a/drivers/infiniband/hw/nes/nes.c
30114+++ b/drivers/infiniband/hw/nes/nes.c
30115@@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
8308f9c9
MT
30116 LIST_HEAD(nes_adapter_list);
30117 static LIST_HEAD(nes_dev_list);
30118
30119-atomic_t qps_destroyed;
30120+atomic_unchecked_t qps_destroyed;
30121
30122 static unsigned int ee_flsh_adapter;
30123 static unsigned int sysfs_nonidx_addr;
fe2de317 30124@@ -275,7 +275,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
8308f9c9
MT
30125 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
30126 struct nes_adapter *nesadapter = nesdev->nesadapter;
30127
30128- atomic_inc(&qps_destroyed);
30129+ atomic_inc_unchecked(&qps_destroyed);
30130
30131 /* Free the control structures */
30132
fe2de317
MT
30133diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
30134index 6fe7987..68637b5 100644
30135--- a/drivers/infiniband/hw/nes/nes.h
30136+++ b/drivers/infiniband/hw/nes/nes.h
30137@@ -175,17 +175,17 @@ extern unsigned int nes_debug_level;
30138 extern unsigned int wqm_quanta;
30139 extern struct list_head nes_adapter_list;
30140
30141-extern atomic_t cm_connects;
30142-extern atomic_t cm_accepts;
30143-extern atomic_t cm_disconnects;
30144-extern atomic_t cm_closes;
30145-extern atomic_t cm_connecteds;
30146-extern atomic_t cm_connect_reqs;
30147-extern atomic_t cm_rejects;
30148-extern atomic_t mod_qp_timouts;
30149-extern atomic_t qps_created;
30150-extern atomic_t qps_destroyed;
30151-extern atomic_t sw_qps_destroyed;
30152+extern atomic_unchecked_t cm_connects;
30153+extern atomic_unchecked_t cm_accepts;
30154+extern atomic_unchecked_t cm_disconnects;
30155+extern atomic_unchecked_t cm_closes;
30156+extern atomic_unchecked_t cm_connecteds;
30157+extern atomic_unchecked_t cm_connect_reqs;
30158+extern atomic_unchecked_t cm_rejects;
30159+extern atomic_unchecked_t mod_qp_timouts;
30160+extern atomic_unchecked_t qps_created;
30161+extern atomic_unchecked_t qps_destroyed;
30162+extern atomic_unchecked_t sw_qps_destroyed;
30163 extern u32 mh_detected;
30164 extern u32 mh_pauses_sent;
30165 extern u32 cm_packets_sent;
30166@@ -194,14 +194,14 @@ extern u32 cm_packets_created;
30167 extern u32 cm_packets_received;
30168 extern u32 cm_packets_dropped;
30169 extern u32 cm_packets_retrans;
30170-extern atomic_t cm_listens_created;
30171-extern atomic_t cm_listens_destroyed;
30172+extern atomic_unchecked_t cm_listens_created;
30173+extern atomic_unchecked_t cm_listens_destroyed;
30174 extern u32 cm_backlog_drops;
30175-extern atomic_t cm_loopbacks;
30176-extern atomic_t cm_nodes_created;
30177-extern atomic_t cm_nodes_destroyed;
30178-extern atomic_t cm_accel_dropped_pkts;
30179-extern atomic_t cm_resets_recvd;
30180+extern atomic_unchecked_t cm_loopbacks;
30181+extern atomic_unchecked_t cm_nodes_created;
30182+extern atomic_unchecked_t cm_nodes_destroyed;
30183+extern atomic_unchecked_t cm_accel_dropped_pkts;
30184+extern atomic_unchecked_t cm_resets_recvd;
30185
30186 extern u32 int_mod_timer_init;
30187 extern u32 int_mod_cq_depth_256;
30188diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
30189index a237547..28a9819 100644
30190--- a/drivers/infiniband/hw/nes/nes_cm.c
30191+++ b/drivers/infiniband/hw/nes/nes_cm.c
8308f9c9
MT
30192@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
30193 u32 cm_packets_retrans;
30194 u32 cm_packets_created;
30195 u32 cm_packets_received;
30196-atomic_t cm_listens_created;
30197-atomic_t cm_listens_destroyed;
30198+atomic_unchecked_t cm_listens_created;
30199+atomic_unchecked_t cm_listens_destroyed;
30200 u32 cm_backlog_drops;
30201-atomic_t cm_loopbacks;
30202-atomic_t cm_nodes_created;
30203-atomic_t cm_nodes_destroyed;
30204-atomic_t cm_accel_dropped_pkts;
30205-atomic_t cm_resets_recvd;
30206+atomic_unchecked_t cm_loopbacks;
30207+atomic_unchecked_t cm_nodes_created;
30208+atomic_unchecked_t cm_nodes_destroyed;
30209+atomic_unchecked_t cm_accel_dropped_pkts;
30210+atomic_unchecked_t cm_resets_recvd;
30211
30212 static inline int mini_cm_accelerated(struct nes_cm_core *,
30213 struct nes_cm_node *);
30214@@ -151,13 +151,13 @@ static struct nes_cm_ops nes_cm_api = {
30215
30216 static struct nes_cm_core *g_cm_core;
30217
30218-atomic_t cm_connects;
30219-atomic_t cm_accepts;
30220-atomic_t cm_disconnects;
30221-atomic_t cm_closes;
30222-atomic_t cm_connecteds;
30223-atomic_t cm_connect_reqs;
30224-atomic_t cm_rejects;
30225+atomic_unchecked_t cm_connects;
30226+atomic_unchecked_t cm_accepts;
30227+atomic_unchecked_t cm_disconnects;
30228+atomic_unchecked_t cm_closes;
30229+atomic_unchecked_t cm_connecteds;
30230+atomic_unchecked_t cm_connect_reqs;
30231+atomic_unchecked_t cm_rejects;
30232
30233
30234 /**
fe2de317 30235@@ -1045,7 +1045,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
8308f9c9
MT
30236 kfree(listener);
30237 listener = NULL;
30238 ret = 0;
30239- atomic_inc(&cm_listens_destroyed);
30240+ atomic_inc_unchecked(&cm_listens_destroyed);
30241 } else {
30242 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
30243 }
fe2de317 30244@@ -1242,7 +1242,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
8308f9c9
MT
30245 cm_node->rem_mac);
30246
30247 add_hte_node(cm_core, cm_node);
30248- atomic_inc(&cm_nodes_created);
30249+ atomic_inc_unchecked(&cm_nodes_created);
30250
30251 return cm_node;
30252 }
fe2de317 30253@@ -1300,7 +1300,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
8308f9c9
MT
30254 }
30255
30256 atomic_dec(&cm_core->node_cnt);
30257- atomic_inc(&cm_nodes_destroyed);
30258+ atomic_inc_unchecked(&cm_nodes_destroyed);
30259 nesqp = cm_node->nesqp;
30260 if (nesqp) {
30261 nesqp->cm_node = NULL;
fe2de317 30262@@ -1367,7 +1367,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
8308f9c9
MT
30263
30264 static void drop_packet(struct sk_buff *skb)
30265 {
30266- atomic_inc(&cm_accel_dropped_pkts);
30267+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
30268 dev_kfree_skb_any(skb);
30269 }
30270
fe2de317 30271@@ -1430,7 +1430,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
8308f9c9
MT
30272 {
30273
30274 int reset = 0; /* whether to send reset in case of err.. */
30275- atomic_inc(&cm_resets_recvd);
30276+ atomic_inc_unchecked(&cm_resets_recvd);
30277 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
30278 " refcnt=%d\n", cm_node, cm_node->state,
30279 atomic_read(&cm_node->ref_count));
fe2de317 30280@@ -2059,7 +2059,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
8308f9c9
MT
30281 rem_ref_cm_node(cm_node->cm_core, cm_node);
30282 return NULL;
30283 }
30284- atomic_inc(&cm_loopbacks);
30285+ atomic_inc_unchecked(&cm_loopbacks);
30286 loopbackremotenode->loopbackpartner = cm_node;
30287 loopbackremotenode->tcp_cntxt.rcv_wscale =
30288 NES_CM_DEFAULT_RCV_WND_SCALE;
fe2de317 30289@@ -2334,7 +2334,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
8308f9c9
MT
30290 add_ref_cm_node(cm_node);
30291 } else if (cm_node->state == NES_CM_STATE_TSA) {
30292 rem_ref_cm_node(cm_core, cm_node);
30293- atomic_inc(&cm_accel_dropped_pkts);
30294+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
30295 dev_kfree_skb_any(skb);
30296 break;
30297 }
fe2de317 30298@@ -2640,7 +2640,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
8308f9c9
MT
30299
30300 if ((cm_id) && (cm_id->event_handler)) {
30301 if (issue_disconn) {
30302- atomic_inc(&cm_disconnects);
30303+ atomic_inc_unchecked(&cm_disconnects);
30304 cm_event.event = IW_CM_EVENT_DISCONNECT;
30305 cm_event.status = disconn_status;
30306 cm_event.local_addr = cm_id->local_addr;
fe2de317 30307@@ -2662,7 +2662,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
8308f9c9
MT
30308 }
30309
30310 if (issue_close) {
30311- atomic_inc(&cm_closes);
30312+ atomic_inc_unchecked(&cm_closes);
30313 nes_disconnect(nesqp, 1);
30314
30315 cm_id->provider_data = nesqp;
fe2de317 30316@@ -2793,7 +2793,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
8308f9c9
MT
30317
30318 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
30319 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
30320- atomic_inc(&cm_accepts);
30321+ atomic_inc_unchecked(&cm_accepts);
30322
30323 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
30324 netdev_refcnt_read(nesvnic->netdev));
fe2de317 30325@@ -3003,7 +3003,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
8308f9c9
MT
30326
30327 struct nes_cm_core *cm_core;
30328
30329- atomic_inc(&cm_rejects);
30330+ atomic_inc_unchecked(&cm_rejects);
30331 cm_node = (struct nes_cm_node *) cm_id->provider_data;
30332 loopback = cm_node->loopbackpartner;
30333 cm_core = cm_node->cm_core;
fe2de317 30334@@ -3069,7 +3069,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
8308f9c9
MT
30335 ntohl(cm_id->local_addr.sin_addr.s_addr),
30336 ntohs(cm_id->local_addr.sin_port));
30337
30338- atomic_inc(&cm_connects);
30339+ atomic_inc_unchecked(&cm_connects);
30340 nesqp->active_conn = 1;
30341
30342 /* cache the cm_id in the qp */
fe2de317 30343@@ -3175,7 +3175,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
8308f9c9
MT
30344 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
30345 return err;
30346 }
30347- atomic_inc(&cm_listens_created);
30348+ atomic_inc_unchecked(&cm_listens_created);
30349 }
30350
30351 cm_id->add_ref(cm_id);
fe2de317 30352@@ -3280,7 +3280,7 @@ static void cm_event_connected(struct nes_cm_event *event)
8308f9c9
MT
30353 if (nesqp->destroyed) {
30354 return;
30355 }
30356- atomic_inc(&cm_connecteds);
30357+ atomic_inc_unchecked(&cm_connecteds);
30358 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
30359 " local port 0x%04X. jiffies = %lu.\n",
30360 nesqp->hwqp.qp_id,
fe2de317 30361@@ -3495,7 +3495,7 @@ static void cm_event_reset(struct nes_cm_event *event)
8308f9c9
MT
30362
30363 cm_id->add_ref(cm_id);
30364 ret = cm_id->event_handler(cm_id, &cm_event);
30365- atomic_inc(&cm_closes);
30366+ atomic_inc_unchecked(&cm_closes);
30367 cm_event.event = IW_CM_EVENT_CLOSE;
15a11c5b 30368 cm_event.status = 0;
8308f9c9 30369 cm_event.provider_data = cm_id->provider_data;
fe2de317 30370@@ -3531,7 +3531,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
8308f9c9
MT
30371 return;
30372 cm_id = cm_node->cm_id;
30373
30374- atomic_inc(&cm_connect_reqs);
30375+ atomic_inc_unchecked(&cm_connect_reqs);
30376 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30377 cm_node, cm_id, jiffies);
30378
fe2de317 30379@@ -3569,7 +3569,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
8308f9c9
MT
30380 return;
30381 cm_id = cm_node->cm_id;
30382
30383- atomic_inc(&cm_connect_reqs);
30384+ atomic_inc_unchecked(&cm_connect_reqs);
30385 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30386 cm_node, cm_id, jiffies);
30387
fe2de317
MT
30388diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
30389index 9d7ffeb..a95dd7d 100644
30390--- a/drivers/infiniband/hw/nes/nes_nic.c
30391+++ b/drivers/infiniband/hw/nes/nes_nic.c
30392@@ -1274,31 +1274,31 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
8308f9c9
MT
30393 target_stat_values[++index] = mh_detected;
30394 target_stat_values[++index] = mh_pauses_sent;
30395 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
30396- target_stat_values[++index] = atomic_read(&cm_connects);
30397- target_stat_values[++index] = atomic_read(&cm_accepts);
30398- target_stat_values[++index] = atomic_read(&cm_disconnects);
30399- target_stat_values[++index] = atomic_read(&cm_connecteds);
30400- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
30401- target_stat_values[++index] = atomic_read(&cm_rejects);
30402- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
30403- target_stat_values[++index] = atomic_read(&qps_created);
30404- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
30405- target_stat_values[++index] = atomic_read(&qps_destroyed);
30406- target_stat_values[++index] = atomic_read(&cm_closes);
30407+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
30408+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
30409+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
30410+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
30411+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
30412+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
30413+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
30414+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
30415+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
30416+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
30417+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
30418 target_stat_values[++index] = cm_packets_sent;
30419 target_stat_values[++index] = cm_packets_bounced;
30420 target_stat_values[++index] = cm_packets_created;
30421 target_stat_values[++index] = cm_packets_received;
30422 target_stat_values[++index] = cm_packets_dropped;
30423 target_stat_values[++index] = cm_packets_retrans;
30424- target_stat_values[++index] = atomic_read(&cm_listens_created);
30425- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
30426+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
30427+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
30428 target_stat_values[++index] = cm_backlog_drops;
30429- target_stat_values[++index] = atomic_read(&cm_loopbacks);
30430- target_stat_values[++index] = atomic_read(&cm_nodes_created);
30431- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
30432- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
30433- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
30434+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
30435+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
30436+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
30437+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
30438+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
30439 target_stat_values[++index] = nesadapter->free_4kpbl;
30440 target_stat_values[++index] = nesadapter->free_256pbl;
30441 target_stat_values[++index] = int_mod_timer_init;
fe2de317
MT
30442diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
30443index 9f2f7d4..6d2fee2 100644
30444--- a/drivers/infiniband/hw/nes/nes_verbs.c
30445+++ b/drivers/infiniband/hw/nes/nes_verbs.c
8308f9c9
MT
30446@@ -46,9 +46,9 @@
30447
30448 #include <rdma/ib_umem.h>
30449
30450-atomic_t mod_qp_timouts;
30451-atomic_t qps_created;
30452-atomic_t sw_qps_destroyed;
30453+atomic_unchecked_t mod_qp_timouts;
30454+atomic_unchecked_t qps_created;
30455+atomic_unchecked_t sw_qps_destroyed;
30456
30457 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
30458
fe2de317 30459@@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
8308f9c9
MT
30460 if (init_attr->create_flags)
30461 return ERR_PTR(-EINVAL);
30462
30463- atomic_inc(&qps_created);
30464+ atomic_inc_unchecked(&qps_created);
30465 switch (init_attr->qp_type) {
30466 case IB_QPT_RC:
30467 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
fe2de317 30468@@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
8308f9c9
MT
30469 struct iw_cm_event cm_event;
30470 int ret;
30471
30472- atomic_inc(&sw_qps_destroyed);
30473+ atomic_inc_unchecked(&sw_qps_destroyed);
30474 nesqp->destroyed = 1;
30475
30476 /* Blow away the connection if it exists. */
fe2de317
MT
30477diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
30478index c9624ea..e025b66 100644
30479--- a/drivers/infiniband/hw/qib/qib.h
30480+++ b/drivers/infiniband/hw/qib/qib.h
6892158b 30481@@ -51,6 +51,7 @@
57199397
MT
30482 #include <linux/completion.h>
30483 #include <linux/kref.h>
30484 #include <linux/sched.h>
30485+#include <linux/slab.h>
30486
30487 #include "qib_common.h"
30488 #include "qib_verbs.h"
fe2de317
MT
30489diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
30490index c351aa4..e6967c2 100644
30491--- a/drivers/input/gameport/gameport.c
30492+++ b/drivers/input/gameport/gameport.c
8308f9c9
MT
30493@@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
30494 */
30495 static void gameport_init_port(struct gameport *gameport)
30496 {
30497- static atomic_t gameport_no = ATOMIC_INIT(0);
30498+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
30499
30500 __module_get(THIS_MODULE);
30501
30502 mutex_init(&gameport->drv_mutex);
30503 device_initialize(&gameport->dev);
30504 dev_set_name(&gameport->dev, "gameport%lu",
30505- (unsigned long)atomic_inc_return(&gameport_no) - 1);
30506+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
30507 gameport->dev.bus = &gameport_bus;
30508 gameport->dev.release = gameport_release_port;
30509 if (gameport->parent)
fe2de317
MT
30510diff --git a/drivers/input/input.c b/drivers/input/input.c
30511index da38d97..2aa0b79 100644
30512--- a/drivers/input/input.c
30513+++ b/drivers/input/input.c
30514@@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
8308f9c9
MT
30515 */
30516 int input_register_device(struct input_dev *dev)
30517 {
30518- static atomic_t input_no = ATOMIC_INIT(0);
30519+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
30520 struct input_handler *handler;
30521 const char *path;
30522 int error;
fe2de317 30523@@ -1851,7 +1851,7 @@ int input_register_device(struct input_dev *dev)
66a7e928 30524 dev->setkeycode = input_default_setkeycode;
8308f9c9
MT
30525
30526 dev_set_name(&dev->dev, "input%ld",
30527- (unsigned long) atomic_inc_return(&input_no) - 1);
30528+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
30529
30530 error = device_add(&dev->dev);
30531 if (error)
fe2de317
MT
30532diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
30533index b8d8611..15f8d2c 100644
30534--- a/drivers/input/joystick/sidewinder.c
30535+++ b/drivers/input/joystick/sidewinder.c
66a7e928
MT
30536@@ -30,6 +30,7 @@
30537 #include <linux/kernel.h>
30538 #include <linux/module.h>
30539 #include <linux/slab.h>
30540+#include <linux/sched.h>
30541 #include <linux/init.h>
30542 #include <linux/input.h>
30543 #include <linux/gameport.h>
30544@@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
30545 unsigned char buf[SW_LENGTH];
30546 int i;
30547
30548+ pax_track_stack();
30549+
30550 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
30551
30552 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
fe2de317
MT
30553diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
30554index d728875..844c89b 100644
30555--- a/drivers/input/joystick/xpad.c
30556+++ b/drivers/input/joystick/xpad.c
30557@@ -710,7 +710,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
8308f9c9
MT
30558
30559 static int xpad_led_probe(struct usb_xpad *xpad)
30560 {
30561- static atomic_t led_seq = ATOMIC_INIT(0);
30562+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
30563 long led_no;
30564 struct xpad_led *led;
30565 struct led_classdev *led_cdev;
fe2de317 30566@@ -723,7 +723,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
8308f9c9
MT
30567 if (!led)
30568 return -ENOMEM;
30569
30570- led_no = (long)atomic_inc_return(&led_seq) - 1;
30571+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
30572
30573 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
30574 led->xpad = xpad;
fe2de317
MT
30575diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
30576index 0110b5a..d3ad144 100644
30577--- a/drivers/input/mousedev.c
30578+++ b/drivers/input/mousedev.c
30579@@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
ae4e228f
MT
30580
30581 spin_unlock_irq(&client->packet_lock);
30582
30583- if (copy_to_user(buffer, data, count))
30584+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
30585 return -EFAULT;
30586
30587 return count;
fe2de317
MT
30588diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
30589index ba70058..571d25d 100644
30590--- a/drivers/input/serio/serio.c
30591+++ b/drivers/input/serio/serio.c
30592@@ -497,7 +497,7 @@ static void serio_release_port(struct device *dev)
8308f9c9
MT
30593 */
30594 static void serio_init_port(struct serio *serio)
30595 {
30596- static atomic_t serio_no = ATOMIC_INIT(0);
30597+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
30598
30599 __module_get(THIS_MODULE);
30600
fe2de317 30601@@ -508,7 +508,7 @@ static void serio_init_port(struct serio *serio)
8308f9c9
MT
30602 mutex_init(&serio->drv_mutex);
30603 device_initialize(&serio->dev);
30604 dev_set_name(&serio->dev, "serio%ld",
30605- (long)atomic_inc_return(&serio_no) - 1);
30606+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
30607 serio->dev.bus = &serio_bus;
30608 serio->dev.release = serio_release_port;
30609 serio->dev.groups = serio_device_attr_groups;
fe2de317
MT
30610diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
30611index e44933d..9ba484a 100644
30612--- a/drivers/isdn/capi/capi.c
30613+++ b/drivers/isdn/capi/capi.c
15a11c5b 30614@@ -83,8 +83,8 @@ struct capiminor {
8308f9c9
MT
30615
30616 struct capi20_appl *ap;
30617 u32 ncci;
30618- atomic_t datahandle;
30619- atomic_t msgid;
30620+ atomic_unchecked_t datahandle;
30621+ atomic_unchecked_t msgid;
30622
30623 struct tty_port port;
30624 int ttyinstop;
fe2de317 30625@@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
8308f9c9
MT
30626 capimsg_setu16(s, 2, mp->ap->applid);
30627 capimsg_setu8 (s, 4, CAPI_DATA_B3);
30628 capimsg_setu8 (s, 5, CAPI_RESP);
30629- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
30630+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
30631 capimsg_setu32(s, 8, mp->ncci);
30632 capimsg_setu16(s, 12, datahandle);
30633 }
fe2de317 30634@@ -518,14 +518,14 @@ static void handle_minor_send(struct capiminor *mp)
8308f9c9
MT
30635 mp->outbytes -= len;
30636 spin_unlock_bh(&mp->outlock);
30637
30638- datahandle = atomic_inc_return(&mp->datahandle);
30639+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
30640 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
30641 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
30642 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
30643 capimsg_setu16(skb->data, 2, mp->ap->applid);
30644 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
30645 capimsg_setu8 (skb->data, 5, CAPI_REQ);
30646- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
30647+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
30648 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
30649 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
30650 capimsg_setu16(skb->data, 16, len); /* Data length */
fe2de317
MT
30651diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
30652index db621db..825ea1a 100644
30653--- a/drivers/isdn/gigaset/common.c
30654+++ b/drivers/isdn/gigaset/common.c
30655@@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
58c5fc13
MT
30656 cs->commands_pending = 0;
30657 cs->cur_at_seq = 0;
30658 cs->gotfwver = -1;
30659- cs->open_count = 0;
c52201e0 30660+ local_set(&cs->open_count, 0);
58c5fc13
MT
30661 cs->dev = NULL;
30662 cs->tty = NULL;
30663 cs->tty_dev = NULL;
fe2de317
MT
30664diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
30665index 212efaf..f187c6b 100644
30666--- a/drivers/isdn/gigaset/gigaset.h
30667+++ b/drivers/isdn/gigaset/gigaset.h
c52201e0
MT
30668@@ -35,6 +35,7 @@
30669 #include <linux/tty_driver.h>
30670 #include <linux/list.h>
6e9df6a3 30671 #include <linux/atomic.h>
c52201e0
MT
30672+#include <asm/local.h>
30673
30674 #define GIG_VERSION {0, 5, 0, 0}
30675 #define GIG_COMPAT {0, 4, 0, 0}
30676@@ -433,7 +434,7 @@ struct cardstate {
58c5fc13
MT
30677 spinlock_t cmdlock;
30678 unsigned curlen, cmdbytes;
30679
30680- unsigned open_count;
c52201e0 30681+ local_t open_count;
58c5fc13
MT
30682 struct tty_struct *tty;
30683 struct tasklet_struct if_wake_tasklet;
30684 unsigned control_state;
fe2de317
MT
30685diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
30686index e35058b..5898a8b 100644
30687--- a/drivers/isdn/gigaset/interface.c
30688+++ b/drivers/isdn/gigaset/interface.c
30689@@ -162,9 +162,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
15a11c5b 30690 }
58c5fc13
MT
30691 tty->driver_data = cs;
30692
30693- ++cs->open_count;
30694-
30695- if (cs->open_count == 1) {
c52201e0 30696+ if (local_inc_return(&cs->open_count) == 1) {
58c5fc13
MT
30697 spin_lock_irqsave(&cs->lock, flags);
30698 cs->tty = tty;
30699 spin_unlock_irqrestore(&cs->lock, flags);
fe2de317 30700@@ -192,10 +190,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
58c5fc13
MT
30701
30702 if (!cs->connected)
30703 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30704- else if (!cs->open_count)
c52201e0 30705+ else if (!local_read(&cs->open_count))
58c5fc13
MT
30706 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30707 else {
30708- if (!--cs->open_count) {
c52201e0 30709+ if (!local_dec_return(&cs->open_count)) {
58c5fc13
MT
30710 spin_lock_irqsave(&cs->lock, flags);
30711 cs->tty = NULL;
30712 spin_unlock_irqrestore(&cs->lock, flags);
fe2de317 30713@@ -230,7 +228,7 @@ static int if_ioctl(struct tty_struct *tty,
58c5fc13
MT
30714 if (!cs->connected) {
30715 gig_dbg(DEBUG_IF, "not connected");
30716 retval = -ENODEV;
30717- } else if (!cs->open_count)
c52201e0 30718+ } else if (!local_read(&cs->open_count))
58c5fc13
MT
30719 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30720 else {
30721 retval = 0;
fe2de317 30722@@ -360,7 +358,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
58c5fc13 30723 retval = -ENODEV;
6892158b
MT
30724 goto done;
30725 }
30726- if (!cs->open_count) {
c52201e0 30727+ if (!local_read(&cs->open_count)) {
58c5fc13 30728 dev_warn(cs->dev, "%s: device not opened\n", __func__);
6892158b
MT
30729 retval = -ENODEV;
30730 goto done;
fe2de317 30731@@ -413,7 +411,7 @@ static int if_write_room(struct tty_struct *tty)
58c5fc13
MT
30732 if (!cs->connected) {
30733 gig_dbg(DEBUG_IF, "not connected");
30734 retval = -ENODEV;
30735- } else if (!cs->open_count)
c52201e0 30736+ } else if (!local_read(&cs->open_count))
58c5fc13
MT
30737 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30738 else if (cs->mstate != MS_LOCKED) {
30739 dev_warn(cs->dev, "can't write to unlocked device\n");
fe2de317 30740@@ -443,7 +441,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
ae4e228f
MT
30741
30742 if (!cs->connected)
58c5fc13 30743 gig_dbg(DEBUG_IF, "not connected");
ae4e228f 30744- else if (!cs->open_count)
c52201e0 30745+ else if (!local_read(&cs->open_count))
58c5fc13 30746 dev_warn(cs->dev, "%s: device not opened\n", __func__);
ae4e228f 30747 else if (cs->mstate != MS_LOCKED)
58c5fc13 30748 dev_warn(cs->dev, "can't write to unlocked device\n");
fe2de317 30749@@ -471,7 +469,7 @@ static void if_throttle(struct tty_struct *tty)
58c5fc13
MT
30750
30751 if (!cs->connected)
30752 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30753- else if (!cs->open_count)
c52201e0 30754+ else if (!local_read(&cs->open_count))
58c5fc13 30755 dev_warn(cs->dev, "%s: device not opened\n", __func__);
ae4e228f 30756 else
df50ba0c 30757 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
fe2de317 30758@@ -495,7 +493,7 @@ static void if_unthrottle(struct tty_struct *tty)
58c5fc13
MT
30759
30760 if (!cs->connected)
30761 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30762- else if (!cs->open_count)
c52201e0 30763+ else if (!local_read(&cs->open_count))
58c5fc13 30764 dev_warn(cs->dev, "%s: device not opened\n", __func__);
ae4e228f 30765 else
df50ba0c 30766 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
fe2de317 30767@@ -526,7 +524,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
58c5fc13
MT
30768 goto out;
30769 }
30770
30771- if (!cs->open_count) {
c52201e0 30772+ if (!local_read(&cs->open_count)) {
58c5fc13
MT
30773 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30774 goto out;
30775 }
fe2de317
MT
30776diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
30777index 2a57da59..e7a12ed 100644
30778--- a/drivers/isdn/hardware/avm/b1.c
30779+++ b/drivers/isdn/hardware/avm/b1.c
30780@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
ae4e228f
MT
30781 }
30782 if (left) {
30783 if (t4file->user) {
30784- if (copy_from_user(buf, dp, left))
bc901d79 30785+ if (left > sizeof buf || copy_from_user(buf, dp, left))
ae4e228f
MT
30786 return -EFAULT;
30787 } else {
30788 memcpy(buf, dp, left);
fe2de317 30789@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
ae4e228f
MT
30790 }
30791 if (left) {
30792 if (config->user) {
30793- if (copy_from_user(buf, dp, left))
bc901d79 30794+ if (left > sizeof buf || copy_from_user(buf, dp, left))
ae4e228f
MT
30795 return -EFAULT;
30796 } else {
30797 memcpy(buf, dp, left);
fe2de317
MT
30798diff --git a/drivers/isdn/hardware/eicon/capidtmf.c b/drivers/isdn/hardware/eicon/capidtmf.c
30799index f130724..c373c68 100644
30800--- a/drivers/isdn/hardware/eicon/capidtmf.c
30801+++ b/drivers/isdn/hardware/eicon/capidtmf.c
30802@@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_state *p_state, byte *buffer, word leng
66a7e928
MT
30803 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
30804 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
30805
30806+ pax_track_stack();
30807
30808 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
30809 {
fe2de317
MT
30810diff --git a/drivers/isdn/hardware/eicon/capifunc.c b/drivers/isdn/hardware/eicon/capifunc.c
30811index 4d425c6..a9be6c4 100644
30812--- a/drivers/isdn/hardware/eicon/capifunc.c
30813+++ b/drivers/isdn/hardware/eicon/capifunc.c
66a7e928
MT
30814@@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
30815 IDI_SYNC_REQ req;
30816 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30817
30818+ pax_track_stack();
30819+
30820 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30821
30822 for (x = 0; x < MAX_DESCRIPTORS; x++) {
fe2de317
MT
30823diff --git a/drivers/isdn/hardware/eicon/diddfunc.c b/drivers/isdn/hardware/eicon/diddfunc.c
30824index 3029234..ef0d9e2 100644
30825--- a/drivers/isdn/hardware/eicon/diddfunc.c
30826+++ b/drivers/isdn/hardware/eicon/diddfunc.c
30827@@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
66a7e928
MT
30828 IDI_SYNC_REQ req;
30829 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30830
30831+ pax_track_stack();
30832+
30833 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30834
30835 for (x = 0; x < MAX_DESCRIPTORS; x++) {
fe2de317
MT
30836diff --git a/drivers/isdn/hardware/eicon/divasfunc.c b/drivers/isdn/hardware/eicon/divasfunc.c
30837index 0bbee78..a0d0a01 100644
30838--- a/drivers/isdn/hardware/eicon/divasfunc.c
30839+++ b/drivers/isdn/hardware/eicon/divasfunc.c
30840@@ -160,6 +160,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
66a7e928
MT
30841 IDI_SYNC_REQ req;
30842 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30843
30844+ pax_track_stack();
30845+
30846 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30847
30848 for (x = 0; x < MAX_DESCRIPTORS; x++) {
fe2de317
MT
30849diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
30850index 85784a7..a19ca98 100644
30851--- a/drivers/isdn/hardware/eicon/divasync.h
30852+++ b/drivers/isdn/hardware/eicon/divasync.h
15a11c5b
MT
30853@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
30854 } diva_didd_add_adapter_t;
30855 typedef struct _diva_didd_remove_adapter {
30856 IDI_CALL p_request;
30857-} diva_didd_remove_adapter_t;
30858+} __no_const diva_didd_remove_adapter_t;
30859 typedef struct _diva_didd_read_adapter_array {
30860 void * buffer;
30861 dword length;
fe2de317
MT
30862diff --git a/drivers/isdn/hardware/eicon/idifunc.c b/drivers/isdn/hardware/eicon/idifunc.c
30863index db87d51..7d09acf 100644
30864--- a/drivers/isdn/hardware/eicon/idifunc.c
30865+++ b/drivers/isdn/hardware/eicon/idifunc.c
30866@@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
66a7e928
MT
30867 IDI_SYNC_REQ req;
30868 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30869
30870+ pax_track_stack();
30871+
30872 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30873
30874 for (x = 0; x < MAX_DESCRIPTORS; x++) {
fe2de317
MT
30875diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
30876index a339598..b6a8bfc 100644
30877--- a/drivers/isdn/hardware/eicon/message.c
30878+++ b/drivers/isdn/hardware/eicon/message.c
15a11c5b 30879@@ -4886,6 +4886,8 @@ static void sig_ind(PLCI *plci)
66a7e928
MT
30880 dword d;
30881 word w;
30882
30883+ pax_track_stack();
30884+
30885 a = plci->adapter;
30886 Id = ((word)plci->Id<<8)|a->Id;
30887 PUT_WORD(&SS_Ind[4],0x0000);
fe2de317 30888@@ -7480,6 +7482,8 @@ static word add_b1(PLCI *plci, API_PARSE *bp, word b_channel_info,
66a7e928
MT
30889 word j, n, w;
30890 dword d;
30891
30892+ pax_track_stack();
30893+
30894
30895 for(i=0;i<8;i++) bp_parms[i].length = 0;
30896 for(i=0;i<2;i++) global_config[i].length = 0;
fe2de317 30897@@ -7954,6 +7958,8 @@ static word add_b23(PLCI *plci, API_PARSE *bp)
66a7e928
MT
30898 const byte llc3[] = {4,3,2,2,6,6,0};
30899 const byte header[] = {0,2,3,3,0,0,0};
30900
30901+ pax_track_stack();
30902+
30903 for(i=0;i<8;i++) bp_parms[i].length = 0;
30904 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
30905 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
fe2de317 30906@@ -14741,6 +14747,8 @@ static void group_optimization(DIVA_CAPI_ADAPTER * a, PLCI * plci)
66a7e928
MT
30907 word appl_number_group_type[MAX_APPL];
30908 PLCI *auxplci;
30909
30910+ pax_track_stack();
30911+
30912 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
30913
30914 if(!a->group_optimization_enabled)
fe2de317
MT
30915diff --git a/drivers/isdn/hardware/eicon/mntfunc.c b/drivers/isdn/hardware/eicon/mntfunc.c
30916index a564b75..f3cf8b5 100644
30917--- a/drivers/isdn/hardware/eicon/mntfunc.c
30918+++ b/drivers/isdn/hardware/eicon/mntfunc.c
30919@@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
66a7e928
MT
30920 IDI_SYNC_REQ req;
30921 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30922
30923+ pax_track_stack();
30924+
30925 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30926
30927 for (x = 0; x < MAX_DESCRIPTORS; x++) {
fe2de317
MT
30928diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
30929index a3bd163..8956575 100644
30930--- a/drivers/isdn/hardware/eicon/xdi_adapter.h
30931+++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
15a11c5b
MT
30932@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
30933 typedef struct _diva_os_idi_adapter_interface {
30934 diva_init_card_proc_t cleanup_adapter_proc;
30935 diva_cmd_card_proc_t cmd_proc;
30936-} diva_os_idi_adapter_interface_t;
30937+} __no_const diva_os_idi_adapter_interface_t;
30938
30939 typedef struct _diva_os_xdi_adapter {
30940 struct list_head link;
fe2de317
MT
30941diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
30942index 6ed82ad..b05ac05 100644
30943--- a/drivers/isdn/i4l/isdn_common.c
30944+++ b/drivers/isdn/i4l/isdn_common.c
30945@@ -1286,6 +1286,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
66a7e928
MT
30946 } iocpar;
30947 void __user *argp = (void __user *)arg;
30948
30949+ pax_track_stack();
30950+
30951 #define name iocpar.name
30952 #define bname iocpar.bname
30953 #define iocts iocpar.iocts
fe2de317
MT
30954diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
30955index 1f355bb..43f1fea 100644
30956--- a/drivers/isdn/icn/icn.c
30957+++ b/drivers/isdn/icn/icn.c
30958@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
ae4e228f
MT
30959 if (count > len)
30960 count = len;
30961 if (user) {
30962- if (copy_from_user(msg, buf, count))
bc901d79 30963+ if (count > sizeof msg || copy_from_user(msg, buf, count))
ae4e228f
MT
30964 return -EFAULT;
30965 } else
30966 memcpy(msg, buf, count);
fe2de317
MT
30967diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
30968index 2535933..09a8e86 100644
30969--- a/drivers/lguest/core.c
30970+++ b/drivers/lguest/core.c
df50ba0c 30971@@ -92,9 +92,17 @@ static __init int map_switcher(void)
58c5fc13
MT
30972 * it's worked so far. The end address needs +1 because __get_vm_area
30973 * allocates an extra guard page, so we need space for that.
30974 */
30975+
30976+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
30977+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
30978+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
30979+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
30980+#else
30981 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
30982 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
30983 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
30984+#endif
30985+
30986 if (!switcher_vma) {
30987 err = -ENOMEM;
30988 printk("lguest: could not map switcher pages high\n");
bc901d79
MT
30989@@ -119,7 +127,7 @@ static __init int map_switcher(void)
30990 * Now the Switcher is mapped at the right address, we can't fail!
6e9df6a3 30991 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
bc901d79
MT
30992 */
30993- memcpy(switcher_vma->addr, start_switcher_text,
30994+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
30995 end_switcher_text - start_switcher_text);
30996
30997 printk(KERN_INFO "lguest: mapped switcher at %p\n",
fe2de317
MT
30998diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
30999index 65af42f..530c87a 100644
31000--- a/drivers/lguest/x86/core.c
31001+++ b/drivers/lguest/x86/core.c
bc901d79
MT
31002@@ -59,7 +59,7 @@ static struct {
31003 /* Offset from where switcher.S was compiled to where we've copied it */
31004 static unsigned long switcher_offset(void)
31005 {
31006- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
31007+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
31008 }
31009
31010 /* This cpu's struct lguest_pages. */
fe2de317 31011@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
bc901d79
MT
31012 * These copies are pretty cheap, so we do them unconditionally: */
31013 /* Save the current Host top-level page directory.
31014 */
31015+
31016+#ifdef CONFIG_PAX_PER_CPU_PGD
31017+ pages->state.host_cr3 = read_cr3();
31018+#else
31019 pages->state.host_cr3 = __pa(current->mm->pgd);
31020+#endif
31021+
31022 /*
31023 * Set up the Guest's page tables to see this CPU's pages (and no
31024 * other CPU's pages).
6e9df6a3 31025@@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
bc901d79
MT
31026 * compiled-in switcher code and the high-mapped copy we just made.
31027 */
31028 for (i = 0; i < IDT_ENTRIES; i++)
31029- default_idt_entries[i] += switcher_offset();
31030+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
31031
31032 /*
31033 * Set up the Switcher's per-cpu areas.
6e9df6a3 31034@@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
bc901d79
MT
31035 * it will be undisturbed when we switch. To change %cs and jump we
31036 * need this structure to feed to Intel's "lcall" instruction.
31037 */
31038- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
31039+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
31040 lguest_entry.segment = LGUEST_CS;
31041
31042 /*
fe2de317
MT
31043diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
31044index 40634b0..4f5855e 100644
31045--- a/drivers/lguest/x86/switcher_32.S
31046+++ b/drivers/lguest/x86/switcher_32.S
bc901d79
MT
31047@@ -87,6 +87,7 @@
31048 #include <asm/page.h>
31049 #include <asm/segment.h>
31050 #include <asm/lguest.h>
31051+#include <asm/processor-flags.h>
31052
31053 // We mark the start of the code to copy
31054 // It's placed in .text tho it's never run here
31055@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
31056 // Changes type when we load it: damn Intel!
31057 // For after we switch over our page tables
31058 // That entry will be read-only: we'd crash.
31059+
31060+#ifdef CONFIG_PAX_KERNEXEC
31061+ mov %cr0, %edx
31062+ xor $X86_CR0_WP, %edx
31063+ mov %edx, %cr0
31064+#endif
31065+
31066 movl $(GDT_ENTRY_TSS*8), %edx
31067 ltr %dx
31068
31069@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
31070 // Let's clear it again for our return.
31071 // The GDT descriptor of the Host
31072 // Points to the table after two "size" bytes
31073- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
31074+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
31075 // Clear "used" from type field (byte 5, bit 2)
31076- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
31077+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
31078+
31079+#ifdef CONFIG_PAX_KERNEXEC
31080+ mov %cr0, %eax
31081+ xor $X86_CR0_WP, %eax
31082+ mov %eax, %cr0
31083+#endif
31084
31085 // Once our page table's switched, the Guest is live!
31086 // The Host fades as we run this final step.
31087@@ -295,13 +309,12 @@ deliver_to_host:
31088 // I consulted gcc, and it gave
31089 // These instructions, which I gladly credit:
31090 leal (%edx,%ebx,8), %eax
31091- movzwl (%eax),%edx
31092- movl 4(%eax), %eax
31093- xorw %ax, %ax
31094- orl %eax, %edx
31095+ movl 4(%eax), %edx
31096+ movw (%eax), %dx
31097 // Now the address of the handler's in %edx
31098 // We call it now: its "iret" drops us home.
31099- jmp *%edx
31100+ ljmp $__KERNEL_CS, $1f
31101+1: jmp *%edx
31102
31103 // Every interrupt can come to us here
31104 // But we must truly tell each apart.
fe2de317
MT
31105diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
31106index 4daf9e5..b8d1d0f 100644
31107--- a/drivers/macintosh/macio_asic.c
31108+++ b/drivers/macintosh/macio_asic.c
31109@@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
6e9df6a3
MT
31110 * MacIO is matched against any Apple ID, it's probe() function
31111 * will then decide wether it applies or not
31112 */
31113-static const struct pci_device_id __devinitdata pci_ids [] = { {
31114+static const struct pci_device_id __devinitconst pci_ids [] = { {
31115 .vendor = PCI_VENDOR_ID_APPLE,
31116 .device = PCI_ANY_ID,
31117 .subvendor = PCI_ANY_ID,
fe2de317
MT
31118diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
31119index 2e9a3ca..c2fb229 100644
31120--- a/drivers/md/dm-ioctl.c
31121+++ b/drivers/md/dm-ioctl.c
31122@@ -1578,7 +1578,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
16454cff
MT
31123 cmd == DM_LIST_VERSIONS_CMD)
31124 return 0;
31125
31126- if ((cmd == DM_DEV_CREATE_CMD)) {
31127+ if (cmd == DM_DEV_CREATE_CMD) {
31128 if (!*param->name) {
31129 DMWARN("name not supplied when creating device");
31130 return -EINVAL;
fe2de317
MT
31131diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
31132index 9bfd057..01180bc 100644
31133--- a/drivers/md/dm-raid1.c
31134+++ b/drivers/md/dm-raid1.c
15a11c5b 31135@@ -40,7 +40,7 @@ enum dm_raid1_error {
8308f9c9
MT
31136
31137 struct mirror {
31138 struct mirror_set *ms;
31139- atomic_t error_count;
31140+ atomic_unchecked_t error_count;
31141 unsigned long error_type;
31142 struct dm_dev *dev;
31143 sector_t offset;
fe2de317 31144@@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
8308f9c9
MT
31145 struct mirror *m;
31146
31147 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
31148- if (!atomic_read(&m->error_count))
31149+ if (!atomic_read_unchecked(&m->error_count))
31150 return m;
31151
31152 return NULL;
fe2de317 31153@@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
8308f9c9
MT
31154 * simple way to tell if a device has encountered
31155 * errors.
31156 */
31157- atomic_inc(&m->error_count);
31158+ atomic_inc_unchecked(&m->error_count);
31159
31160 if (test_and_set_bit(error_type, &m->error_type))
31161 return;
fe2de317 31162@@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
8308f9c9
MT
31163 struct mirror *m = get_default_mirror(ms);
31164
31165 do {
31166- if (likely(!atomic_read(&m->error_count)))
31167+ if (likely(!atomic_read_unchecked(&m->error_count)))
31168 return m;
31169
31170 if (m-- == ms->mirror)
15a11c5b 31171@@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
8308f9c9
MT
31172 {
31173 struct mirror *default_mirror = get_default_mirror(m->ms);
31174
31175- return !atomic_read(&default_mirror->error_count);
31176+ return !atomic_read_unchecked(&default_mirror->error_count);
31177 }
31178
31179 static int mirror_available(struct mirror_set *ms, struct bio *bio)
fe2de317 31180@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
8308f9c9
MT
31181 */
31182 if (likely(region_in_sync(ms, region, 1)))
31183 m = choose_mirror(ms, bio->bi_sector);
31184- else if (m && atomic_read(&m->error_count))
31185+ else if (m && atomic_read_unchecked(&m->error_count))
31186 m = NULL;
31187
31188 if (likely(m))
fe2de317 31189@@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
8308f9c9
MT
31190 }
31191
31192 ms->mirror[mirror].ms = ms;
31193- atomic_set(&(ms->mirror[mirror].error_count), 0);
31194+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
31195 ms->mirror[mirror].error_type = 0;
31196 ms->mirror[mirror].offset = offset;
31197
fe2de317 31198@@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_target *ti)
8308f9c9
MT
31199 */
31200 static char device_status_char(struct mirror *m)
31201 {
31202- if (!atomic_read(&(m->error_count)))
31203+ if (!atomic_read_unchecked(&(m->error_count)))
31204 return 'A';
31205
31206 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
fe2de317
MT
31207diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
31208index 3d80cf0..b77cc47 100644
31209--- a/drivers/md/dm-stripe.c
31210+++ b/drivers/md/dm-stripe.c
8308f9c9
MT
31211@@ -20,7 +20,7 @@ struct stripe {
31212 struct dm_dev *dev;
31213 sector_t physical_start;
31214
31215- atomic_t error_count;
31216+ atomic_unchecked_t error_count;
31217 };
31218
31219 struct stripe_c {
fe2de317 31220@@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
8308f9c9
MT
31221 kfree(sc);
31222 return r;
31223 }
31224- atomic_set(&(sc->stripe[i].error_count), 0);
31225+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
31226 }
31227
31228 ti->private = sc;
fe2de317 31229@@ -314,7 +314,7 @@ static int stripe_status(struct dm_target *ti,
8308f9c9
MT
31230 DMEMIT("%d ", sc->stripes);
31231 for (i = 0; i < sc->stripes; i++) {
31232 DMEMIT("%s ", sc->stripe[i].dev->name);
31233- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
31234+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
31235 'D' : 'A';
31236 }
31237 buffer[i] = '\0';
fe2de317 31238@@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
8308f9c9
MT
31239 */
31240 for (i = 0; i < sc->stripes; i++)
31241 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
31242- atomic_inc(&(sc->stripe[i].error_count));
31243- if (atomic_read(&(sc->stripe[i].error_count)) <
31244+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
31245+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
31246 DM_IO_ERROR_THRESHOLD)
31247 schedule_work(&sc->trigger_event);
31248 }
fe2de317
MT
31249diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
31250index bc04518..7a83b81 100644
31251--- a/drivers/md/dm-table.c
31252+++ b/drivers/md/dm-table.c
31253@@ -389,7 +389,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
58c5fc13
MT
31254 if (!dev_size)
31255 return 0;
31256
31257- if ((start >= dev_size) || (start + len > dev_size)) {
31258+ if ((start >= dev_size) || (len > dev_size - start)) {
31259 DMWARN("%s: %s too small for target: "
31260 "start=%llu, len=%llu, dev_size=%llu",
31261 dm_device_name(ti->table->md), bdevname(bdev, b),
fe2de317
MT
31262diff --git a/drivers/md/dm.c b/drivers/md/dm.c
31263index 52b39f3..83a8b6b 100644
31264--- a/drivers/md/dm.c
31265+++ b/drivers/md/dm.c
31266@@ -165,9 +165,9 @@ struct mapped_device {
31267 /*
31268 * Event handling.
31269 */
31270- atomic_t event_nr;
31271+ atomic_unchecked_t event_nr;
31272 wait_queue_head_t eventq;
31273- atomic_t uevent_seq;
31274+ atomic_unchecked_t uevent_seq;
31275 struct list_head uevent_list;
31276 spinlock_t uevent_lock; /* Protect access to uevent_list */
31277
31278@@ -1843,8 +1843,8 @@ static struct mapped_device *alloc_dev(int minor)
31279 rwlock_init(&md->map_lock);
31280 atomic_set(&md->holders, 1);
31281 atomic_set(&md->open_count, 0);
31282- atomic_set(&md->event_nr, 0);
31283- atomic_set(&md->uevent_seq, 0);
31284+ atomic_set_unchecked(&md->event_nr, 0);
31285+ atomic_set_unchecked(&md->uevent_seq, 0);
31286 INIT_LIST_HEAD(&md->uevent_list);
31287 spin_lock_init(&md->uevent_lock);
31288
31289@@ -1978,7 +1978,7 @@ static void event_callback(void *context)
31290
31291 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
31292
31293- atomic_inc(&md->event_nr);
31294+ atomic_inc_unchecked(&md->event_nr);
31295 wake_up(&md->eventq);
31296 }
31297
31298@@ -2614,18 +2614,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
31299
31300 uint32_t dm_next_uevent_seq(struct mapped_device *md)
31301 {
31302- return atomic_add_return(1, &md->uevent_seq);
31303+ return atomic_add_return_unchecked(1, &md->uevent_seq);
31304 }
31305
31306 uint32_t dm_get_event_nr(struct mapped_device *md)
31307 {
31308- return atomic_read(&md->event_nr);
31309+ return atomic_read_unchecked(&md->event_nr);
31310 }
31311
31312 int dm_wait_event(struct mapped_device *md, int event_nr)
31313 {
31314 return wait_event_interruptible(md->eventq,
31315- (event_nr != atomic_read(&md->event_nr)));
31316+ (event_nr != atomic_read_unchecked(&md->event_nr)));
31317 }
31318
31319 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
31320diff --git a/drivers/md/md.c b/drivers/md/md.c
31321index 5c95ccb..217fa57 100644
31322--- a/drivers/md/md.c
31323+++ b/drivers/md/md.c
6e9df6a3 31324@@ -280,10 +280,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
8308f9c9
MT
31325 * start build, activate spare
31326 */
31327 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
31328-static atomic_t md_event_count;
31329+static atomic_unchecked_t md_event_count;
31330 void md_new_event(mddev_t *mddev)
31331 {
31332- atomic_inc(&md_event_count);
31333+ atomic_inc_unchecked(&md_event_count);
31334 wake_up(&md_event_waiters);
31335 }
31336 EXPORT_SYMBOL_GPL(md_new_event);
6e9df6a3 31337@@ -293,7 +293,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
8308f9c9
MT
31338 */
31339 static void md_new_event_inintr(mddev_t *mddev)
31340 {
31341- atomic_inc(&md_event_count);
31342+ atomic_inc_unchecked(&md_event_count);
31343 wake_up(&md_event_waiters);
31344 }
31345
fe2de317 31346@@ -1531,7 +1531,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
8308f9c9
MT
31347
31348 rdev->preferred_minor = 0xffff;
31349 rdev->data_offset = le64_to_cpu(sb->data_offset);
31350- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31351+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31352
31353 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
31354 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
fe2de317 31355@@ -1748,7 +1748,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
8308f9c9
MT
31356 else
31357 sb->resync_offset = cpu_to_le64(0);
31358
31359- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
31360+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
31361
31362 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
31363 sb->size = cpu_to_le64(mddev->dev_sectors);
fe2de317 31364@@ -2643,7 +2643,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
8308f9c9
MT
31365 static ssize_t
31366 errors_show(mdk_rdev_t *rdev, char *page)
31367 {
31368- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
31369+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
31370 }
31371
31372 static ssize_t
fe2de317 31373@@ -2652,7 +2652,7 @@ errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
8308f9c9
MT
31374 char *e;
31375 unsigned long n = simple_strtoul(buf, &e, 10);
31376 if (*buf && (*e == 0 || *e == '\n')) {
31377- atomic_set(&rdev->corrected_errors, n);
31378+ atomic_set_unchecked(&rdev->corrected_errors, n);
31379 return len;
31380 }
31381 return -EINVAL;
6e9df6a3
MT
31382@@ -3042,8 +3042,8 @@ int md_rdev_init(mdk_rdev_t *rdev)
31383 rdev->sb_loaded = 0;
31384 rdev->bb_page = NULL;
8308f9c9
MT
31385 atomic_set(&rdev->nr_pending, 0);
31386- atomic_set(&rdev->read_errors, 0);
31387- atomic_set(&rdev->corrected_errors, 0);
31388+ atomic_set_unchecked(&rdev->read_errors, 0);
31389+ atomic_set_unchecked(&rdev->corrected_errors, 0);
31390
31391 INIT_LIST_HEAD(&rdev->same_set);
31392 init_waitqueue_head(&rdev->blocked_wait);
fe2de317 31393@@ -6667,7 +6667,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
8308f9c9
MT
31394
31395 spin_unlock(&pers_lock);
31396 seq_printf(seq, "\n");
6e9df6a3
MT
31397- seq->poll_event = atomic_read(&md_event_count);
31398+ seq->poll_event = atomic_read_unchecked(&md_event_count);
8308f9c9
MT
31399 return 0;
31400 }
31401 if (v == (void*)2) {
fe2de317 31402@@ -6756,7 +6756,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
58c5fc13
MT
31403 chunk_kb ? "KB" : "B");
31404 if (bitmap->file) {
31405 seq_printf(seq, ", file: ");
31406- seq_path(seq, &bitmap->file->f_path, " \t\n");
31407+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
31408 }
31409
31410 seq_printf(seq, "\n");
fe2de317 31411@@ -6787,7 +6787,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
6e9df6a3
MT
31412 return error;
31413
31414 seq = file->private_data;
31415- seq->poll_event = atomic_read(&md_event_count);
31416+ seq->poll_event = atomic_read_unchecked(&md_event_count);
8308f9c9
MT
31417 return error;
31418 }
6e9df6a3 31419
fe2de317 31420@@ -6801,7 +6801,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
8308f9c9
MT
31421 /* always allow read */
31422 mask = POLLIN | POLLRDNORM;
31423
6e9df6a3
MT
31424- if (seq->poll_event != atomic_read(&md_event_count))
31425+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
8308f9c9
MT
31426 mask |= POLLERR | POLLPRI;
31427 return mask;
31428 }
fe2de317 31429@@ -6845,7 +6845,7 @@ static int is_mddev_idle(mddev_t *mddev, int init)
58c5fc13
MT
31430 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
31431 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
31432 (int)part_stat_read(&disk->part0, sectors[1]) -
31433- atomic_read(&disk->sync_io);
31434+ atomic_read_unchecked(&disk->sync_io);
31435 /* sync IO will cause sync_io to increase before the disk_stats
31436 * as sync_io is counted when a request starts, and
31437 * disk_stats is counted when it completes.
fe2de317
MT
31438diff --git a/drivers/md/md.h b/drivers/md/md.h
31439index 0a309dc..7e01d7f 100644
31440--- a/drivers/md/md.h
31441+++ b/drivers/md/md.h
6e9df6a3 31442@@ -124,13 +124,13 @@ struct mdk_rdev_s
8308f9c9
MT
31443 * only maintained for arrays that
31444 * support hot removal
31445 */
31446- atomic_t read_errors; /* number of consecutive read errors that
31447+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
31448 * we have tried to ignore.
31449 */
31450 struct timespec last_read_error; /* monotonic time since our
31451 * last read error
31452 */
31453- atomic_t corrected_errors; /* number of corrected read errors,
31454+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
31455 * for reporting to userspace and storing
31456 * in superblock.
31457 */
fe2de317 31458@@ -415,7 +415,7 @@ static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev)
58c5fc13
MT
31459
31460 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
31461 {
31462- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31463+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31464 }
31465
31466 struct mdk_personality
fe2de317
MT
31467diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
31468index d9587df..83a0dc3 100644
31469--- a/drivers/md/raid1.c
31470+++ b/drivers/md/raid1.c
31471@@ -1541,7 +1541,7 @@ static int fix_sync_read_error(r1bio_t *r1_bio)
31472 if (r1_sync_page_io(rdev, sect, s,
31473 bio->bi_io_vec[idx].bv_page,
31474 READ) != 0)
31475- atomic_add(s, &rdev->corrected_errors);
31476+ atomic_add_unchecked(s, &rdev->corrected_errors);
31477 }
31478 sectors -= s;
31479 sect += s;
31480@@ -1754,7 +1754,7 @@ static void fix_read_error(conf_t *conf, int read_disk,
31481 test_bit(In_sync, &rdev->flags)) {
31482 if (r1_sync_page_io(rdev, sect, s,
31483 conf->tmppage, READ)) {
31484- atomic_add(s, &rdev->corrected_errors);
31485+ atomic_add_unchecked(s, &rdev->corrected_errors);
31486 printk(KERN_INFO
31487 "md/raid1:%s: read error corrected "
31488 "(%d sectors at %llu on %s)\n",
31489diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
31490index 1d44228..98db57d 100644
31491--- a/drivers/md/raid10.c
31492+++ b/drivers/md/raid10.c
31493@@ -1423,7 +1423,7 @@ static void end_sync_read(struct bio *bio, int error)
6e9df6a3
MT
31494 /* The write handler will notice the lack of
31495 * R10BIO_Uptodate and record any errors etc
31496 */
8308f9c9
MT
31497- atomic_add(r10_bio->sectors,
31498+ atomic_add_unchecked(r10_bio->sectors,
31499 &conf->mirrors[d].rdev->corrected_errors);
6e9df6a3
MT
31500
31501 /* for reconstruct, we always reschedule after a read.
fe2de317 31502@@ -1723,7 +1723,7 @@ static void check_decay_read_errors(mddev_t *mddev, mdk_rdev_t *rdev)
8308f9c9
MT
31503 {
31504 struct timespec cur_time_mon;
31505 unsigned long hours_since_last;
31506- unsigned int read_errors = atomic_read(&rdev->read_errors);
31507+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
31508
31509 ktime_get_ts(&cur_time_mon);
31510
fe2de317 31511@@ -1745,9 +1745,9 @@ static void check_decay_read_errors(mddev_t *mddev, mdk_rdev_t *rdev)
8308f9c9
MT
31512 * overflowing the shift of read_errors by hours_since_last.
31513 */
31514 if (hours_since_last >= 8 * sizeof(read_errors))
31515- atomic_set(&rdev->read_errors, 0);
31516+ atomic_set_unchecked(&rdev->read_errors, 0);
31517 else
31518- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
31519+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
31520 }
31521
6e9df6a3 31522 static int r10_sync_page_io(mdk_rdev_t *rdev, sector_t sector,
fe2de317 31523@@ -1797,8 +1797,8 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
15a11c5b 31524 return;
8308f9c9 31525
15a11c5b
MT
31526 check_decay_read_errors(mddev, rdev);
31527- atomic_inc(&rdev->read_errors);
31528- if (atomic_read(&rdev->read_errors) > max_read_errors) {
31529+ atomic_inc_unchecked(&rdev->read_errors);
31530+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
31531 char b[BDEVNAME_SIZE];
31532 bdevname(rdev->bdev, b);
31533
fe2de317 31534@@ -1806,7 +1806,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
15a11c5b
MT
31535 "md/raid10:%s: %s: Raid device exceeded "
31536 "read_error threshold [cur %d:max %d]\n",
31537 mdname(mddev), b,
31538- atomic_read(&rdev->read_errors), max_read_errors);
31539+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
31540 printk(KERN_NOTICE
31541 "md/raid10:%s: %s: Failing raid device\n",
31542 mdname(mddev), b);
fe2de317 31543@@ -1951,7 +1951,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
6e9df6a3
MT
31544 (unsigned long long)(
31545 sect + rdev->data_offset),
31546 bdevname(rdev->bdev, b));
8308f9c9
MT
31547- atomic_add(s, &rdev->corrected_errors);
31548+ atomic_add_unchecked(s, &rdev->corrected_errors);
6e9df6a3
MT
31549 }
31550
31551 rdev_dec_pending(rdev, mddev);
fe2de317
MT
31552diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
31553index b6200c3..02e8702 100644
31554--- a/drivers/md/raid5.c
31555+++ b/drivers/md/raid5.c
31556@@ -1616,19 +1616,19 @@ static void raid5_end_read_request(struct bio * bi, int error)
6e9df6a3
MT
31557 (unsigned long long)(sh->sector
31558 + rdev->data_offset),
31559 bdevname(rdev->bdev, b));
31560- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
31561+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
8308f9c9
MT
31562 clear_bit(R5_ReadError, &sh->dev[i].flags);
31563 clear_bit(R5_ReWrite, &sh->dev[i].flags);
31564 }
31565- if (atomic_read(&conf->disks[i].rdev->read_errors))
31566- atomic_set(&conf->disks[i].rdev->read_errors, 0);
31567+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
31568+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
31569 } else {
31570 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
31571 int retry = 0;
31572 rdev = conf->disks[i].rdev;
31573
31574 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
31575- atomic_inc(&rdev->read_errors);
31576+ atomic_inc_unchecked(&rdev->read_errors);
31577 if (conf->mddev->degraded >= conf->max_degraded)
6e9df6a3
MT
31578 printk_ratelimited(
31579 KERN_WARNING
fe2de317 31580@@ -1648,7 +1648,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
6e9df6a3
MT
31581 (unsigned long long)(sh->sector
31582 + rdev->data_offset),
31583 bdn);
8308f9c9
MT
31584- else if (atomic_read(&rdev->read_errors)
31585+ else if (atomic_read_unchecked(&rdev->read_errors)
31586 > conf->max_nr_stripes)
31587 printk(KERN_WARNING
31588 "md/raid:%s: Too many read errors, failing device %s.\n",
fe2de317 31589@@ -1978,6 +1978,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
66a7e928
MT
31590 sector_t r_sector;
31591 struct stripe_head sh2;
31592
31593+ pax_track_stack();
31594
31595 chunk_offset = sector_div(new_sector, sectors_per_chunk);
31596 stripe = new_sector;
fe2de317
MT
31597diff --git a/drivers/media/common/saa7146_hlp.c b/drivers/media/common/saa7146_hlp.c
31598index 1d1d8d2..6c6837a 100644
31599--- a/drivers/media/common/saa7146_hlp.c
31600+++ b/drivers/media/common/saa7146_hlp.c
31601@@ -353,6 +353,8 @@ static void calculate_clipping_registers_rect(struct saa7146_dev *dev, struct sa
66a7e928
MT
31602
31603 int x[32], y[32], w[32], h[32];
31604
31605+ pax_track_stack();
31606+
31607 /* clear out memory */
31608 memset(&line_list[0], 0x00, sizeof(u32)*32);
31609 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
fe2de317
MT
31610diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
31611index 573d540..16f78f3 100644
31612--- a/drivers/media/dvb/ddbridge/ddbridge-core.c
31613+++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
6e9df6a3
MT
31614@@ -1675,7 +1675,7 @@ static struct ddb_info ddb_v6 = {
31615 .subvendor = _subvend, .subdevice = _subdev, \
31616 .driver_data = (unsigned long)&_driverdata }
31617
31618-static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
31619+static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
31620 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
31621 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
31622 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
fe2de317
MT
31623diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
31624index 7ea517b..252fe54 100644
31625--- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
31626+++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
31627@@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, u8 * eb
66a7e928
MT
31628 u8 buf[HOST_LINK_BUF_SIZE];
31629 int i;
31630
31631+ pax_track_stack();
31632+
31633 dprintk("%s\n", __func__);
31634
31635 /* check if we have space for a link buf in the rx_buffer */
fe2de317 31636@@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file,
66a7e928
MT
31637 unsigned long timeout;
31638 int written;
31639
31640+ pax_track_stack();
31641+
31642 dprintk("%s\n", __func__);
31643
31644 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
fe2de317
MT
31645diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
31646index a7d876f..8c21b61 100644
31647--- a/drivers/media/dvb/dvb-core/dvb_demux.h
31648+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
6e9df6a3 31649@@ -73,7 +73,7 @@ struct dvb_demux_feed {
15a11c5b
MT
31650 union {
31651 dmx_ts_cb ts;
31652 dmx_section_cb sec;
31653- } cb;
31654+ } __no_const cb;
31655
31656 struct dvb_demux *demux;
31657 void *priv;
fe2de317
MT
31658diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
31659index f732877..d38c35a 100644
31660--- a/drivers/media/dvb/dvb-core/dvbdev.c
31661+++ b/drivers/media/dvb/dvb-core/dvbdev.c
31662@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
ae4e228f
MT
31663 const struct dvb_device *template, void *priv, int type)
31664 {
31665 struct dvb_device *dvbdev;
16454cff 31666- struct file_operations *dvbdevfops;
15a11c5b 31667+ file_operations_no_const *dvbdevfops;
ae4e228f
MT
31668 struct device *clsdev;
31669 int minor;
16454cff 31670 int id;
fe2de317
MT
31671diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
31672index acb5fb2..2413f1d 100644
31673--- a/drivers/media/dvb/dvb-usb/cxusb.c
31674+++ b/drivers/media/dvb/dvb-usb/cxusb.c
31675@@ -1059,7 +1059,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
15a11c5b
MT
31676 struct dib0700_adapter_state {
31677 int (*set_param_save) (struct dvb_frontend *,
31678 struct dvb_frontend_parameters *);
31679-};
31680+} __no_const;
31681
31682 static int dib7070_set_param_override(struct dvb_frontend *fe,
31683 struct dvb_frontend_parameters *fep)
fe2de317
MT
31684diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
31685index a224e94..503b76a 100644
31686--- a/drivers/media/dvb/dvb-usb/dib0700_core.c
31687+++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
31688@@ -478,6 +478,8 @@ int dib0700_download_firmware(struct usb_device *udev, const struct firmware *fw
15a11c5b
MT
31689 if (!buf)
31690 return -ENOMEM;
66a7e928
MT
31691
31692+ pax_track_stack();
31693+
31694 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
31695 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",
31696 hx.addr, hx.len, hx.chk);
fe2de317
MT
31697diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
31698index 058b231..183d2b3 100644
31699--- a/drivers/media/dvb/dvb-usb/dw2102.c
31700+++ b/drivers/media/dvb/dvb-usb/dw2102.c
15a11c5b
MT
31701@@ -95,7 +95,7 @@ struct su3000_state {
31702
31703 struct s6x0_state {
31704 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
31705-};
31706+} __no_const;
31707
31708 /* debug */
31709 static int dvb_usb_dw2102_debug;
fe2de317
MT
31710diff --git a/drivers/media/dvb/dvb-usb/lmedm04.c b/drivers/media/dvb/dvb-usb/lmedm04.c
31711index 37b1469..28a6f6f 100644
31712--- a/drivers/media/dvb/dvb-usb/lmedm04.c
31713+++ b/drivers/media/dvb/dvb-usb/lmedm04.c
31714@@ -742,6 +742,7 @@ static int lme2510_download_firmware(struct usb_device *dev,
15a11c5b
MT
31715 usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
31716 0x06, 0x80, 0x0200, 0x00, data, 0x0109, 1000);
66a7e928
MT
31717
31718+ pax_track_stack();
31719
15a11c5b
MT
31720 data[0] = 0x8a;
31721 len_in = 1;
fe2de317 31722@@ -764,6 +765,8 @@ static void lme_coldreset(struct usb_device *dev)
66a7e928
MT
31723 int ret = 0, len_in;
31724 u8 data[512] = {0};
31725
31726+ pax_track_stack();
31727+
31728 data[0] = 0x0a;
31729 len_in = 1;
31730 info("FRM Firmware Cold Reset");
fe2de317
MT
31731diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
31732index ba91735..4261d84 100644
31733--- a/drivers/media/dvb/frontends/dib3000.h
31734+++ b/drivers/media/dvb/frontends/dib3000.h
6e9df6a3
MT
31735@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
31736 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
15a11c5b
MT
31737 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
31738 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
6e9df6a3
MT
31739-};
31740+} __no_const;
15a11c5b
MT
31741
31742 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
31743 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
fe2de317
MT
31744diff --git a/drivers/media/dvb/frontends/mb86a16.c b/drivers/media/dvb/frontends/mb86a16.c
31745index c283112..7f367a7 100644
31746--- a/drivers/media/dvb/frontends/mb86a16.c
31747+++ b/drivers/media/dvb/frontends/mb86a16.c
31748@@ -1060,6 +1060,8 @@ static int mb86a16_set_fe(struct mb86a16_state *state)
66a7e928
MT
31749 int ret = -1;
31750 int sync;
31751
31752+ pax_track_stack();
31753+
31754 dprintk(verbose, MB86A16_INFO, 1, "freq=%d Mhz, symbrt=%d Ksps", state->frequency, state->srate);
31755
31756 fcp = 3000;
fe2de317
MT
31757diff --git a/drivers/media/dvb/frontends/or51211.c b/drivers/media/dvb/frontends/or51211.c
31758index c709ce6..b3fe620 100644
31759--- a/drivers/media/dvb/frontends/or51211.c
31760+++ b/drivers/media/dvb/frontends/or51211.c
31761@@ -113,6 +113,8 @@ static int or51211_load_firmware (struct dvb_frontend* fe,
66a7e928
MT
31762 u8 tudata[585];
31763 int i;
31764
31765+ pax_track_stack();
31766+
31767 dprintk("Firmware is %zd bytes\n",fw->size);
31768
31769 /* Get eprom data */
fe2de317
MT
31770diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
31771index 0564192..75b16f5 100644
31772--- a/drivers/media/dvb/ngene/ngene-cards.c
31773+++ b/drivers/media/dvb/ngene/ngene-cards.c
31774@@ -477,7 +477,7 @@ static struct ngene_info ngene_info_m780 = {
6e9df6a3
MT
31775
31776 /****************************************************************************/
31777
31778-static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
31779+static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
31780 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
31781 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
31782 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
fe2de317
MT
31783diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
31784index 16a089f..ab1667d 100644
31785--- a/drivers/media/radio/radio-cadet.c
31786+++ b/drivers/media/radio/radio-cadet.c
31787@@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
6e9df6a3
MT
31788 unsigned char readbuf[RDS_BUFFER];
31789 int i = 0;
31790
31791+ if (count > RDS_BUFFER)
31792+ return -EFAULT;
31793 mutex_lock(&dev->lock);
31794 if (dev->rdsstat == 0) {
31795 dev->rdsstat = 1;
fe2de317
MT
31796diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
31797index 9cde353..8c6a1c3 100644
31798--- a/drivers/media/video/au0828/au0828.h
31799+++ b/drivers/media/video/au0828/au0828.h
6e9df6a3
MT
31800@@ -191,7 +191,7 @@ struct au0828_dev {
31801
31802 /* I2C */
31803 struct i2c_adapter i2c_adap;
31804- struct i2c_algorithm i2c_algo;
31805+ i2c_algorithm_no_const i2c_algo;
31806 struct i2c_client i2c_client;
31807 u32 i2c_rc;
31808
fe2de317
MT
31809diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
31810index 9e2f870..22e3a08 100644
31811--- a/drivers/media/video/cx18/cx18-driver.c
31812+++ b/drivers/media/video/cx18/cx18-driver.c
31813@@ -327,6 +327,8 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
66a7e928
MT
31814 struct i2c_client c;
31815 u8 eedata[256];
31816
31817+ pax_track_stack();
31818+
31819 memset(&c, 0, sizeof(c));
31820 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
31821 c.adapter = &cx->i2c_adap[0];
fe2de317
MT
31822diff --git a/drivers/media/video/cx23885/cx23885-input.c b/drivers/media/video/cx23885/cx23885-input.c
31823index ce765e3..f9e1b04 100644
31824--- a/drivers/media/video/cx23885/cx23885-input.c
31825+++ b/drivers/media/video/cx23885/cx23885-input.c
31826@@ -53,6 +53,8 @@ static void cx23885_input_process_measurements(struct cx23885_dev *dev,
66a7e928
MT
31827 bool handle = false;
31828 struct ir_raw_event ir_core_event[64];
31829
31830+ pax_track_stack();
31831+
31832 do {
31833 num = 0;
31834 v4l2_subdev_call(dev->sd_ir, ir, rx_read, (u8 *) ir_core_event,
fe2de317
MT
31835diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
31836index 68d1240..46b32eb 100644
31837--- a/drivers/media/video/cx88/cx88-alsa.c
31838+++ b/drivers/media/video/cx88/cx88-alsa.c
31839@@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
6e9df6a3
MT
31840 * Only boards with eeprom and byte 1 at eeprom=1 have it
31841 */
31842
31843-static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
31844+static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
31845 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
31846 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
31847 {0, }
fe2de317
MT
31848diff --git a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
31849index 9515f3a..c9ecb85 100644
31850--- a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
31851+++ b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
31852@@ -120,6 +120,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw *hdw)
66a7e928
MT
31853 u8 *eeprom;
31854 struct tveeprom tvdata;
31855
31856+ pax_track_stack();
31857+
31858 memset(&tvdata,0,sizeof(tvdata));
31859
31860 eeprom = pvr2_eeprom_fetch(hdw);
fe2de317
MT
31861diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31862index 305e6aa..0143317 100644
31863--- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31864+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
6e9df6a3
MT
31865@@ -196,7 +196,7 @@ struct pvr2_hdw {
31866
31867 /* I2C stuff */
31868 struct i2c_adapter i2c_adap;
31869- struct i2c_algorithm i2c_algo;
31870+ i2c_algorithm_no_const i2c_algo;
31871 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
31872 int i2c_cx25840_hack_state;
31873 int i2c_linked;
fe2de317
MT
31874diff --git a/drivers/media/video/saa7134/saa6752hs.c b/drivers/media/video/saa7134/saa6752hs.c
31875index f9f29cc..5a2e330 100644
31876--- a/drivers/media/video/saa7134/saa6752hs.c
31877+++ b/drivers/media/video/saa7134/saa6752hs.c
31878@@ -682,6 +682,8 @@ static int saa6752hs_init(struct v4l2_subdev *sd, u32 leading_null_bytes)
66a7e928
MT
31879 unsigned char localPAT[256];
31880 unsigned char localPMT[256];
31881
31882+ pax_track_stack();
31883+
31884 /* Set video format - must be done first as it resets other settings */
31885 set_reg8(client, 0x41, h->video_format);
31886
fe2de317
MT
31887diff --git a/drivers/media/video/saa7164/saa7164-cmd.c b/drivers/media/video/saa7164/saa7164-cmd.c
31888index 62fac7f..f29e0b9 100644
31889--- a/drivers/media/video/saa7164/saa7164-cmd.c
31890+++ b/drivers/media/video/saa7164/saa7164-cmd.c
31891@@ -88,6 +88,8 @@ int saa7164_irq_dequeue(struct saa7164_dev *dev)
66a7e928
MT
31892 u8 tmp[512];
31893 dprintk(DBGLVL_CMD, "%s()\n", __func__);
31894
31895+ pax_track_stack();
31896+
31897 /* While any outstand message on the bus exists... */
31898 do {
31899
fe2de317 31900@@ -141,6 +143,8 @@ int saa7164_cmd_dequeue(struct saa7164_dev *dev)
66a7e928
MT
31901 u8 tmp[512];
31902 dprintk(DBGLVL_CMD, "%s()\n", __func__);
31903
31904+ pax_track_stack();
31905+
31906 while (loop) {
31907
31908 struct tmComResInfo tRsp = { 0, 0, 0, 0, 0, 0 };
fe2de317
MT
31909diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
31910index 84cd1b6..f741e07 100644
31911--- a/drivers/media/video/timblogiw.c
31912+++ b/drivers/media/video/timblogiw.c
31913@@ -744,7 +744,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
15a11c5b
MT
31914
31915 /* Platform device functions */
31916
31917-static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
31918+static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
31919 .vidioc_querycap = timblogiw_querycap,
31920 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
31921 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
fe2de317 31922@@ -766,7 +766,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
6e9df6a3
MT
31923 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
31924 };
31925
31926-static __devinitconst struct v4l2_file_operations timblogiw_fops = {
31927+static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
31928 .owner = THIS_MODULE,
31929 .open = timblogiw_open,
31930 .release = timblogiw_close,
fe2de317
MT
31931diff --git a/drivers/media/video/usbvision/usbvision-core.c b/drivers/media/video/usbvision/usbvision-core.c
31932index f344411..6ae9974 100644
31933--- a/drivers/media/video/usbvision/usbvision-core.c
31934+++ b/drivers/media/video/usbvision/usbvision-core.c
31935@@ -707,6 +707,8 @@ static enum parse_state usbvision_parse_compress(struct usb_usbvision *usbvision
66a7e928
MT
31936 unsigned char rv, gv, bv;
31937 static unsigned char *Y, *U, *V;
31938
31939+ pax_track_stack();
31940+
31941 frame = usbvision->cur_frame;
31942 image_size = frame->frmwidth * frame->frmheight;
31943 if ((frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
fe2de317
MT
31944diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
31945index f300dea..04834ba 100644
31946--- a/drivers/media/video/videobuf-dma-sg.c
31947+++ b/drivers/media/video/videobuf-dma-sg.c
6e9df6a3 31948@@ -607,6 +607,8 @@ void *videobuf_sg_alloc(size_t size)
66a7e928
MT
31949 {
31950 struct videobuf_queue q;
31951
31952+ pax_track_stack();
31953+
31954 /* Required to make generic handler to call __videobuf_alloc */
31955 q.int_ops = &sg_ops;
31956
fe2de317
MT
31957diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
31958index 7956a10..f39232f 100644
31959--- a/drivers/message/fusion/mptbase.c
31960+++ b/drivers/message/fusion/mptbase.c
31961@@ -6681,8 +6681,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
6892158b
MT
31962 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
31963 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
57199397
MT
31964
31965+#ifdef CONFIG_GRKERNSEC_HIDESYM
6892158b 31966+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
57199397 31967+#else
6892158b 31968 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
57199397
MT
31969 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
31970+#endif
31971+
31972 /*
31973 * Rounding UP to nearest 4-kB boundary here...
31974 */
fe2de317
MT
31975diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
31976index 7596aec..f7ae9aa 100644
31977--- a/drivers/message/fusion/mptsas.c
31978+++ b/drivers/message/fusion/mptsas.c
31979@@ -439,6 +439,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
df50ba0c
MT
31980 return 0;
31981 }
31982
31983+static inline void
31984+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31985+{
31986+ if (phy_info->port_details) {
31987+ phy_info->port_details->rphy = rphy;
31988+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31989+ ioc->name, rphy));
31990+ }
31991+
31992+ if (rphy) {
31993+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31994+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31995+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31996+ ioc->name, rphy, rphy->dev.release));
31997+ }
31998+}
31999+
32000 /* no mutex */
32001 static void
32002 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
fe2de317 32003@@ -477,23 +494,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
df50ba0c
MT
32004 return NULL;
32005 }
32006
32007-static inline void
32008-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
32009-{
32010- if (phy_info->port_details) {
32011- phy_info->port_details->rphy = rphy;
32012- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
32013- ioc->name, rphy));
32014- }
32015-
32016- if (rphy) {
32017- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
32018- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
32019- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
32020- ioc->name, rphy, rphy->dev.release));
32021- }
32022-}
32023-
32024 static inline struct sas_port *
32025 mptsas_get_port(struct mptsas_phyinfo *phy_info)
32026 {
fe2de317
MT
32027diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
32028index ce61a57..3da8862 100644
32029--- a/drivers/message/fusion/mptscsih.c
32030+++ b/drivers/message/fusion/mptscsih.c
6892158b
MT
32031@@ -1268,15 +1268,16 @@ mptscsih_info(struct Scsi_Host *SChost)
32032
32033 h = shost_priv(SChost);
32034
32035- if (h) {
32036- if (h->info_kbuf == NULL)
32037- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
32038- return h->info_kbuf;
32039- h->info_kbuf[0] = '\0';
32040+ if (!h)
32041+ return NULL;
32042
32043- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
32044- h->info_kbuf[size-1] = '\0';
32045- }
32046+ if (h->info_kbuf == NULL)
32047+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
32048+ return h->info_kbuf;
32049+ h->info_kbuf[0] = '\0';
32050+
32051+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
32052+ h->info_kbuf[size-1] = '\0';
32053
32054 return h->info_kbuf;
32055 }
fe2de317
MT
32056diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
32057index 098de2b..fbb922c 100644
32058--- a/drivers/message/i2o/i2o_config.c
32059+++ b/drivers/message/i2o/i2o_config.c
32060@@ -781,6 +781,8 @@ static int i2o_cfg_passthru(unsigned long arg)
66a7e928
MT
32061 struct i2o_message *msg;
32062 unsigned int iop;
32063
32064+ pax_track_stack();
32065+
32066 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
32067 return -EFAULT;
32068
fe2de317
MT
32069diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
32070index 07dbeaf..5533142 100644
32071--- a/drivers/message/i2o/i2o_proc.c
32072+++ b/drivers/message/i2o/i2o_proc.c
df50ba0c 32073@@ -255,13 +255,6 @@ static char *scsi_devices[] = {
58c5fc13
MT
32074 "Array Controller Device"
32075 };
32076
32077-static char *chtostr(u8 * chars, int n)
32078-{
32079- char tmp[256];
32080- tmp[0] = 0;
32081- return strncat(tmp, (char *)chars, n);
32082-}
32083-
32084 static int i2o_report_query_status(struct seq_file *seq, int block_status,
32085 char *group)
32086 {
fe2de317 32087@@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
58c5fc13
MT
32088
32089 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
32090 seq_printf(seq, "%-#8x", ddm_table.module_id);
32091- seq_printf(seq, "%-29s",
32092- chtostr(ddm_table.module_name_version, 28));
32093+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
32094 seq_printf(seq, "%9d ", ddm_table.data_size);
32095 seq_printf(seq, "%8d", ddm_table.code_size);
32096
fe2de317 32097@@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
58c5fc13
MT
32098
32099 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
32100 seq_printf(seq, "%-#8x", dst->module_id);
32101- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
32102- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
32103+ seq_printf(seq, "%-.28s", dst->module_name_version);
32104+ seq_printf(seq, "%-.8s", dst->date);
32105 seq_printf(seq, "%8d ", dst->module_size);
32106 seq_printf(seq, "%8d ", dst->mpb_size);
32107 seq_printf(seq, "0x%04x", dst->module_flags);
fe2de317 32108@@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
58c5fc13
MT
32109 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
32110 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
32111 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
32112- seq_printf(seq, "Vendor info : %s\n",
32113- chtostr((u8 *) (work32 + 2), 16));
32114- seq_printf(seq, "Product info : %s\n",
32115- chtostr((u8 *) (work32 + 6), 16));
32116- seq_printf(seq, "Description : %s\n",
32117- chtostr((u8 *) (work32 + 10), 16));
32118- seq_printf(seq, "Product rev. : %s\n",
32119- chtostr((u8 *) (work32 + 14), 8));
32120+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
32121+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
32122+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
32123+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
32124
32125 seq_printf(seq, "Serial number : ");
32126 print_serial_number(seq, (u8 *) (work32 + 16),
fe2de317 32127@@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
58c5fc13
MT
32128 }
32129
32130 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
32131- seq_printf(seq, "Module name : %s\n",
32132- chtostr(result.module_name, 24));
32133- seq_printf(seq, "Module revision : %s\n",
32134- chtostr(result.module_rev, 8));
32135+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
32136+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
32137
32138 seq_printf(seq, "Serial number : ");
32139 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
fe2de317 32140@@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
58c5fc13
MT
32141 return 0;
32142 }
32143
32144- seq_printf(seq, "Device name : %s\n",
32145- chtostr(result.device_name, 64));
32146- seq_printf(seq, "Service name : %s\n",
32147- chtostr(result.service_name, 64));
32148- seq_printf(seq, "Physical name : %s\n",
32149- chtostr(result.physical_location, 64));
32150- seq_printf(seq, "Instance number : %s\n",
32151- chtostr(result.instance_number, 4));
32152+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
32153+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
32154+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
32155+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
32156
32157 return 0;
32158 }
fe2de317
MT
32159diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
32160index a8c08f3..155fe3d 100644
32161--- a/drivers/message/i2o/iop.c
32162+++ b/drivers/message/i2o/iop.c
32163@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
8308f9c9
MT
32164
32165 spin_lock_irqsave(&c->context_list_lock, flags);
32166
32167- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
32168- atomic_inc(&c->context_list_counter);
32169+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
32170+ atomic_inc_unchecked(&c->context_list_counter);
32171
32172- entry->context = atomic_read(&c->context_list_counter);
32173+ entry->context = atomic_read_unchecked(&c->context_list_counter);
32174
32175 list_add(&entry->list, &c->context_list);
32176
fe2de317 32177@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
8308f9c9
MT
32178
32179 #if BITS_PER_LONG == 64
32180 spin_lock_init(&c->context_list_lock);
32181- atomic_set(&c->context_list_counter, 0);
32182+ atomic_set_unchecked(&c->context_list_counter, 0);
32183 INIT_LIST_HEAD(&c->context_list);
32184 #endif
32185
fe2de317
MT
32186diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
32187index a20e1c4..4f57255 100644
32188--- a/drivers/mfd/ab3100-core.c
32189+++ b/drivers/mfd/ab3100-core.c
6e9df6a3
MT
32190@@ -809,7 +809,7 @@ struct ab_family_id {
32191 char *name;
32192 };
32193
32194-static const struct ab_family_id ids[] __devinitdata = {
32195+static const struct ab_family_id ids[] __devinitconst = {
32196 /* AB3100 */
32197 {
32198 .id = 0xc0,
fe2de317
MT
32199diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
32200index f12720d..3c251fd 100644
32201--- a/drivers/mfd/abx500-core.c
32202+++ b/drivers/mfd/abx500-core.c
15a11c5b 32203@@ -14,7 +14,7 @@ static LIST_HEAD(abx500_list);
66a7e928 32204
15a11c5b
MT
32205 struct abx500_device_entry {
32206 struct list_head list;
32207- struct abx500_ops ops;
32208+ abx500_ops_no_const ops;
66a7e928
MT
32209 struct device *dev;
32210 };
32211
fe2de317
MT
32212diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
32213index 5c2a06a..8fa077c 100644
32214--- a/drivers/mfd/janz-cmodio.c
32215+++ b/drivers/mfd/janz-cmodio.c
57199397
MT
32216@@ -13,6 +13,7 @@
32217
32218 #include <linux/kernel.h>
32219 #include <linux/module.h>
32220+#include <linux/slab.h>
32221 #include <linux/init.h>
32222 #include <linux/pci.h>
32223 #include <linux/interrupt.h>
fe2de317
MT
32224diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
32225index 5fe5de1..af64f53 100644
32226--- a/drivers/mfd/wm8350-i2c.c
32227+++ b/drivers/mfd/wm8350-i2c.c
32228@@ -44,6 +44,8 @@ static int wm8350_i2c_write_device(struct wm8350 *wm8350, char reg,
66a7e928
MT
32229 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
32230 int ret;
32231
32232+ pax_track_stack();
32233+
32234 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
32235 return -EINVAL;
32236
fe2de317
MT
32237diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
32238index 8b51cd6..f628f8d 100644
32239--- a/drivers/misc/lis3lv02d/lis3lv02d.c
32240+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
32241@@ -437,7 +437,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
66a7e928
MT
32242 * the lid is closed. This leads to interrupts as soon as a little move
32243 * is done.
32244 */
32245- atomic_inc(&lis3_dev.count);
32246+ atomic_inc_unchecked(&lis3_dev.count);
32247
32248 wake_up_interruptible(&lis3_dev.misc_wait);
32249 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
fe2de317 32250@@ -520,7 +520,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
66a7e928
MT
32251 if (lis3_dev.pm_dev)
32252 pm_runtime_get_sync(lis3_dev.pm_dev);
32253
32254- atomic_set(&lis3_dev.count, 0);
32255+ atomic_set_unchecked(&lis3_dev.count, 0);
32256 return 0;
32257 }
32258
fe2de317 32259@@ -547,7 +547,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
66a7e928
MT
32260 add_wait_queue(&lis3_dev.misc_wait, &wait);
32261 while (true) {
32262 set_current_state(TASK_INTERRUPTIBLE);
32263- data = atomic_xchg(&lis3_dev.count, 0);
32264+ data = atomic_xchg_unchecked(&lis3_dev.count, 0);
32265 if (data)
32266 break;
32267
6e9df6a3 32268@@ -585,7 +585,7 @@ out:
66a7e928
MT
32269 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
32270 {
32271 poll_wait(file, &lis3_dev.misc_wait, wait);
32272- if (atomic_read(&lis3_dev.count))
32273+ if (atomic_read_unchecked(&lis3_dev.count))
32274 return POLLIN | POLLRDNORM;
32275 return 0;
32276 }
fe2de317
MT
32277diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
32278index a193958..4d7ecd2 100644
32279--- a/drivers/misc/lis3lv02d/lis3lv02d.h
32280+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
66a7e928
MT
32281@@ -265,7 +265,7 @@ struct lis3lv02d {
32282 struct input_polled_dev *idev; /* input device */
32283 struct platform_device *pdev; /* platform device */
32284 struct regulator_bulk_data regulators[2];
32285- atomic_t count; /* interrupt count after last read */
32286+ atomic_unchecked_t count; /* interrupt count after last read */
32287 union axis_conversion ac; /* hw -> logical axis */
32288 int mapped_btns[3];
32289
fe2de317
MT
32290diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
32291index 2f30bad..c4c13d0 100644
32292--- a/drivers/misc/sgi-gru/gruhandles.c
32293+++ b/drivers/misc/sgi-gru/gruhandles.c
32294@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
ae4e228f
MT
32295 unsigned long nsec;
32296
32297 nsec = CLKS2NSEC(clks);
32298- atomic_long_inc(&mcs_op_statistics[op].count);
32299- atomic_long_add(nsec, &mcs_op_statistics[op].total);
32300+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
32301+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
32302 if (mcs_op_statistics[op].max < nsec)
32303 mcs_op_statistics[op].max = nsec;
32304 }
fe2de317
MT
32305diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
32306index 7768b87..f8aac38 100644
32307--- a/drivers/misc/sgi-gru/gruprocfs.c
32308+++ b/drivers/misc/sgi-gru/gruprocfs.c
ae4e228f
MT
32309@@ -32,9 +32,9 @@
32310
32311 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
32312
32313-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
32314+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
32315 {
32316- unsigned long val = atomic_long_read(v);
32317+ unsigned long val = atomic_long_read_unchecked(v);
32318
32319 seq_printf(s, "%16lu %s\n", val, id);
32320 }
fe2de317 32321@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
ae4e228f
MT
32322
32323 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
32324 for (op = 0; op < mcsop_last; op++) {
32325- count = atomic_long_read(&mcs_op_statistics[op].count);
32326- total = atomic_long_read(&mcs_op_statistics[op].total);
32327+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
32328+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
32329 max = mcs_op_statistics[op].max;
32330 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
32331 count ? total / count : 0, max);
fe2de317
MT
32332diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
32333index 5c3ce24..4915ccb 100644
32334--- a/drivers/misc/sgi-gru/grutables.h
32335+++ b/drivers/misc/sgi-gru/grutables.h
ae4e228f
MT
32336@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
32337 * GRU statistics.
32338 */
32339 struct gru_stats_s {
32340- atomic_long_t vdata_alloc;
32341- atomic_long_t vdata_free;
32342- atomic_long_t gts_alloc;
32343- atomic_long_t gts_free;
32344- atomic_long_t gms_alloc;
32345- atomic_long_t gms_free;
32346- atomic_long_t gts_double_allocate;
32347- atomic_long_t assign_context;
32348- atomic_long_t assign_context_failed;
32349- atomic_long_t free_context;
32350- atomic_long_t load_user_context;
32351- atomic_long_t load_kernel_context;
32352- atomic_long_t lock_kernel_context;
32353- atomic_long_t unlock_kernel_context;
32354- atomic_long_t steal_user_context;
32355- atomic_long_t steal_kernel_context;
32356- atomic_long_t steal_context_failed;
32357- atomic_long_t nopfn;
32358- atomic_long_t asid_new;
32359- atomic_long_t asid_next;
32360- atomic_long_t asid_wrap;
32361- atomic_long_t asid_reuse;
32362- atomic_long_t intr;
32363- atomic_long_t intr_cbr;
32364- atomic_long_t intr_tfh;
32365- atomic_long_t intr_spurious;
32366- atomic_long_t intr_mm_lock_failed;
32367- atomic_long_t call_os;
32368- atomic_long_t call_os_wait_queue;
32369- atomic_long_t user_flush_tlb;
32370- atomic_long_t user_unload_context;
32371- atomic_long_t user_exception;
32372- atomic_long_t set_context_option;
32373- atomic_long_t check_context_retarget_intr;
32374- atomic_long_t check_context_unload;
32375- atomic_long_t tlb_dropin;
32376- atomic_long_t tlb_preload_page;
32377- atomic_long_t tlb_dropin_fail_no_asid;
32378- atomic_long_t tlb_dropin_fail_upm;
32379- atomic_long_t tlb_dropin_fail_invalid;
32380- atomic_long_t tlb_dropin_fail_range_active;
32381- atomic_long_t tlb_dropin_fail_idle;
32382- atomic_long_t tlb_dropin_fail_fmm;
32383- atomic_long_t tlb_dropin_fail_no_exception;
32384- atomic_long_t tfh_stale_on_fault;
32385- atomic_long_t mmu_invalidate_range;
32386- atomic_long_t mmu_invalidate_page;
32387- atomic_long_t flush_tlb;
32388- atomic_long_t flush_tlb_gru;
32389- atomic_long_t flush_tlb_gru_tgh;
32390- atomic_long_t flush_tlb_gru_zero_asid;
ae4e228f
MT
32391+ atomic_long_unchecked_t vdata_alloc;
32392+ atomic_long_unchecked_t vdata_free;
32393+ atomic_long_unchecked_t gts_alloc;
32394+ atomic_long_unchecked_t gts_free;
32395+ atomic_long_unchecked_t gms_alloc;
32396+ atomic_long_unchecked_t gms_free;
32397+ atomic_long_unchecked_t gts_double_allocate;
32398+ atomic_long_unchecked_t assign_context;
32399+ atomic_long_unchecked_t assign_context_failed;
32400+ atomic_long_unchecked_t free_context;
32401+ atomic_long_unchecked_t load_user_context;
32402+ atomic_long_unchecked_t load_kernel_context;
32403+ atomic_long_unchecked_t lock_kernel_context;
32404+ atomic_long_unchecked_t unlock_kernel_context;
32405+ atomic_long_unchecked_t steal_user_context;
32406+ atomic_long_unchecked_t steal_kernel_context;
32407+ atomic_long_unchecked_t steal_context_failed;
32408+ atomic_long_unchecked_t nopfn;
32409+ atomic_long_unchecked_t asid_new;
32410+ atomic_long_unchecked_t asid_next;
32411+ atomic_long_unchecked_t asid_wrap;
32412+ atomic_long_unchecked_t asid_reuse;
32413+ atomic_long_unchecked_t intr;
32414+ atomic_long_unchecked_t intr_cbr;
32415+ atomic_long_unchecked_t intr_tfh;
32416+ atomic_long_unchecked_t intr_spurious;
32417+ atomic_long_unchecked_t intr_mm_lock_failed;
32418+ atomic_long_unchecked_t call_os;
32419+ atomic_long_unchecked_t call_os_wait_queue;
32420+ atomic_long_unchecked_t user_flush_tlb;
32421+ atomic_long_unchecked_t user_unload_context;
32422+ atomic_long_unchecked_t user_exception;
32423+ atomic_long_unchecked_t set_context_option;
32424+ atomic_long_unchecked_t check_context_retarget_intr;
32425+ atomic_long_unchecked_t check_context_unload;
32426+ atomic_long_unchecked_t tlb_dropin;
32427+ atomic_long_unchecked_t tlb_preload_page;
32428+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
32429+ atomic_long_unchecked_t tlb_dropin_fail_upm;
32430+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
32431+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
32432+ atomic_long_unchecked_t tlb_dropin_fail_idle;
32433+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
32434+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
32435+ atomic_long_unchecked_t tfh_stale_on_fault;
32436+ atomic_long_unchecked_t mmu_invalidate_range;
32437+ atomic_long_unchecked_t mmu_invalidate_page;
32438+ atomic_long_unchecked_t flush_tlb;
32439+ atomic_long_unchecked_t flush_tlb_gru;
32440+ atomic_long_unchecked_t flush_tlb_gru_tgh;
32441+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
fe2de317
MT
32442
32443- atomic_long_t copy_gpa;
32444- atomic_long_t read_gpa;
ae4e228f
MT
32445+ atomic_long_unchecked_t copy_gpa;
32446+ atomic_long_unchecked_t read_gpa;
fe2de317
MT
32447
32448- atomic_long_t mesq_receive;
32449- atomic_long_t mesq_receive_none;
32450- atomic_long_t mesq_send;
32451- atomic_long_t mesq_send_failed;
32452- atomic_long_t mesq_noop;
32453- atomic_long_t mesq_send_unexpected_error;
32454- atomic_long_t mesq_send_lb_overflow;
32455- atomic_long_t mesq_send_qlimit_reached;
32456- atomic_long_t mesq_send_amo_nacked;
32457- atomic_long_t mesq_send_put_nacked;
32458- atomic_long_t mesq_page_overflow;
32459- atomic_long_t mesq_qf_locked;
32460- atomic_long_t mesq_qf_noop_not_full;
32461- atomic_long_t mesq_qf_switch_head_failed;
32462- atomic_long_t mesq_qf_unexpected_error;
32463- atomic_long_t mesq_noop_unexpected_error;
32464- atomic_long_t mesq_noop_lb_overflow;
32465- atomic_long_t mesq_noop_qlimit_reached;
32466- atomic_long_t mesq_noop_amo_nacked;
32467- atomic_long_t mesq_noop_put_nacked;
32468- atomic_long_t mesq_noop_page_overflow;
ae4e228f
MT
32469+ atomic_long_unchecked_t mesq_receive;
32470+ atomic_long_unchecked_t mesq_receive_none;
32471+ atomic_long_unchecked_t mesq_send;
32472+ atomic_long_unchecked_t mesq_send_failed;
32473+ atomic_long_unchecked_t mesq_noop;
32474+ atomic_long_unchecked_t mesq_send_unexpected_error;
32475+ atomic_long_unchecked_t mesq_send_lb_overflow;
32476+ atomic_long_unchecked_t mesq_send_qlimit_reached;
32477+ atomic_long_unchecked_t mesq_send_amo_nacked;
32478+ atomic_long_unchecked_t mesq_send_put_nacked;
32479+ atomic_long_unchecked_t mesq_page_overflow;
32480+ atomic_long_unchecked_t mesq_qf_locked;
32481+ atomic_long_unchecked_t mesq_qf_noop_not_full;
32482+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
32483+ atomic_long_unchecked_t mesq_qf_unexpected_error;
32484+ atomic_long_unchecked_t mesq_noop_unexpected_error;
32485+ atomic_long_unchecked_t mesq_noop_lb_overflow;
32486+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
32487+ atomic_long_unchecked_t mesq_noop_amo_nacked;
32488+ atomic_long_unchecked_t mesq_noop_put_nacked;
32489+ atomic_long_unchecked_t mesq_noop_page_overflow;
58c5fc13 32490
58c5fc13 32491 };
58c5fc13 32492
fe2de317 32493@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
ae4e228f 32494 tghop_invalidate, mcsop_last};
58c5fc13 32495
ae4e228f
MT
32496 struct mcs_op_statistic {
32497- atomic_long_t count;
32498- atomic_long_t total;
32499+ atomic_long_unchecked_t count;
32500+ atomic_long_unchecked_t total;
32501 unsigned long max;
58c5fc13
MT
32502 };
32503
fe2de317 32504@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
58c5fc13 32505
ae4e228f
MT
32506 #define STAT(id) do { \
32507 if (gru_options & OPT_STATS) \
32508- atomic_long_inc(&gru_stats.id); \
32509+ atomic_long_inc_unchecked(&gru_stats.id); \
32510 } while (0)
58c5fc13 32511
ae4e228f 32512 #ifdef CONFIG_SGI_GRU_DEBUG
fe2de317
MT
32513diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
32514index 851b2f2..a4ec097 100644
32515--- a/drivers/misc/sgi-xp/xp.h
32516+++ b/drivers/misc/sgi-xp/xp.h
32517@@ -289,7 +289,7 @@ struct xpc_interface {
32518 xpc_notify_func, void *);
32519 void (*received) (short, int, void *);
32520 enum xp_retval (*partid_to_nasids) (short, void *);
32521-};
32522+} __no_const;
32523
32524 extern struct xpc_interface xpc_interface;
32525
32526diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
32527index b94d5f7..7f494c5 100644
32528--- a/drivers/misc/sgi-xp/xpc.h
32529+++ b/drivers/misc/sgi-xp/xpc.h
6e9df6a3
MT
32530@@ -835,6 +835,7 @@ struct xpc_arch_operations {
32531 void (*received_payload) (struct xpc_channel *, void *);
32532 void (*notify_senders_of_disconnect) (struct xpc_channel *);
32533 };
32534+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
32535
32536 /* struct xpc_partition act_state values (for XPC HB) */
32537
fe2de317 32538@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
6e9df6a3
MT
32539 /* found in xpc_main.c */
32540 extern struct device *xpc_part;
32541 extern struct device *xpc_chan;
32542-extern struct xpc_arch_operations xpc_arch_ops;
32543+extern xpc_arch_operations_no_const xpc_arch_ops;
32544 extern int xpc_disengage_timelimit;
32545 extern int xpc_disengage_timedout;
32546 extern int xpc_activate_IRQ_rcvd;
fe2de317
MT
32547diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
32548index 8d082b4..aa749ae 100644
32549--- a/drivers/misc/sgi-xp/xpc_main.c
32550+++ b/drivers/misc/sgi-xp/xpc_main.c
32551@@ -162,7 +162,7 @@ static struct notifier_block xpc_die_notifier = {
6e9df6a3
MT
32552 .notifier_call = xpc_system_die,
32553 };
32554
32555-struct xpc_arch_operations xpc_arch_ops;
32556+xpc_arch_operations_no_const xpc_arch_ops;
32557
32558 /*
32559 * Timer function to enforce the timelimit on the partition disengage.
fe2de317
MT
32560diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
32561index 26c5286..292d261 100644
32562--- a/drivers/mmc/host/sdhci-pci.c
32563+++ b/drivers/mmc/host/sdhci-pci.c
32564@@ -542,7 +542,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
6e9df6a3
MT
32565 .probe = via_probe,
32566 };
32567
32568-static const struct pci_device_id pci_ids[] __devinitdata = {
32569+static const struct pci_device_id pci_ids[] __devinitconst = {
32570 {
32571 .vendor = PCI_VENDOR_ID_RICOH,
32572 .device = PCI_DEVICE_ID_RICOH_R5C822,
fe2de317
MT
32573diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
32574index e1e122f..d99a6ea 100644
32575--- a/drivers/mtd/chips/cfi_cmdset_0001.c
32576+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
32577@@ -757,6 +757,8 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
66a7e928
MT
32578 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
32579 unsigned long timeo = jiffies + HZ;
32580
32581+ pax_track_stack();
32582+
32583 /* Prevent setting state FL_SYNCING for chip in suspended state. */
32584 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
32585 goto sleep;
fe2de317 32586@@ -1653,6 +1655,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
66a7e928
MT
32587 unsigned long initial_adr;
32588 int initial_len = len;
32589
32590+ pax_track_stack();
32591+
32592 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
32593 adr += chip->start;
32594 initial_adr = adr;
fe2de317 32595@@ -1871,6 +1875,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
66a7e928
MT
32596 int retries = 3;
32597 int ret;
32598
32599+ pax_track_stack();
32600+
32601 adr += chip->start;
32602
32603 retry:
fe2de317
MT
32604diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
32605index 179814a..abe9d60 100644
32606--- a/drivers/mtd/chips/cfi_cmdset_0020.c
32607+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
32608@@ -255,6 +255,8 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
66a7e928
MT
32609 unsigned long cmd_addr;
32610 struct cfi_private *cfi = map->fldrv_priv;
32611
32612+ pax_track_stack();
32613+
32614 adr += chip->start;
32615
32616 /* Ensure cmd read/writes are aligned. */
fe2de317 32617@@ -429,6 +431,8 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
66a7e928
MT
32618 DECLARE_WAITQUEUE(wait, current);
32619 int wbufsize, z;
32620
32621+ pax_track_stack();
32622+
32623 /* M58LW064A requires bus alignment for buffer wriets -- saw */
32624 if (adr & (map_bankwidth(map)-1))
32625 return -EINVAL;
fe2de317 32626@@ -743,6 +747,8 @@ static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, u
66a7e928
MT
32627 DECLARE_WAITQUEUE(wait, current);
32628 int ret = 0;
32629
32630+ pax_track_stack();
32631+
32632 adr += chip->start;
32633
32634 /* Let's determine this according to the interleave only once */
fe2de317 32635@@ -1048,6 +1054,8 @@ static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, un
66a7e928
MT
32636 unsigned long timeo = jiffies + HZ;
32637 DECLARE_WAITQUEUE(wait, current);
32638
32639+ pax_track_stack();
32640+
32641 adr += chip->start;
32642
32643 /* Let's determine this according to the interleave only once */
fe2de317 32644@@ -1197,6 +1205,8 @@ static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip,
66a7e928
MT
32645 unsigned long timeo = jiffies + HZ;
32646 DECLARE_WAITQUEUE(wait, current);
32647
32648+ pax_track_stack();
32649+
32650 adr += chip->start;
32651
32652 /* Let's determine this according to the interleave only once */
fe2de317
MT
32653diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
32654index f7fbf60..9866457 100644
32655--- a/drivers/mtd/devices/doc2000.c
32656+++ b/drivers/mtd/devices/doc2000.c
32657@@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
58c5fc13
MT
32658
32659 /* The ECC will not be calculated correctly if less than 512 is written */
32660 /* DBB-
32661- if (len != 0x200 && eccbuf)
32662+ if (len != 0x200)
32663 printk(KERN_WARNING
32664 "ECC needs a full sector write (adr: %lx size %lx)\n",
32665 (long) to, (long) len);
fe2de317
MT
32666diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
32667index 241192f..d0c35a3 100644
32668--- a/drivers/mtd/devices/doc2001.c
32669+++ b/drivers/mtd/devices/doc2001.c
32670@@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
ae4e228f
MT
32671 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
32672
58c5fc13 32673 /* Don't allow read past end of device */
ae4e228f
MT
32674- if (from >= this->totlen)
32675+ if (from >= this->totlen || !len)
58c5fc13 32676 return -EINVAL;
58c5fc13
MT
32677
32678 /* Don't allow a single read to cross a 512-byte block boundary */
fe2de317
MT
32679diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
32680index 037b399..225a71d 100644
32681--- a/drivers/mtd/ftl.c
32682+++ b/drivers/mtd/ftl.c
32683@@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
66a7e928
MT
32684 loff_t offset;
32685 uint16_t srcunitswap = cpu_to_le16(srcunit);
32686
32687+ pax_track_stack();
32688+
32689 eun = &part->EUNInfo[srcunit];
32690 xfer = &part->XferInfo[xferunit];
32691 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
fe2de317
MT
32692diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
32693index d7592e6..31c505c 100644
32694--- a/drivers/mtd/inftlcore.c
32695+++ b/drivers/mtd/inftlcore.c
32696@@ -259,6 +259,8 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
66a7e928
MT
32697 struct inftl_oob oob;
32698 size_t retlen;
32699
32700+ pax_track_stack();
32701+
32702 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
32703 "pending=%d)\n", inftl, thisVUC, pendingblock);
32704
fe2de317
MT
32705diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
32706index 104052e..6232be5 100644
32707--- a/drivers/mtd/inftlmount.c
32708+++ b/drivers/mtd/inftlmount.c
32709@@ -53,6 +53,8 @@ static int find_boot_record(struct INFTLrecord *inftl)
66a7e928
MT
32710 struct INFTLPartition *ip;
32711 size_t retlen;
32712
32713+ pax_track_stack();
32714+
32715 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
32716
32717 /*
fe2de317
MT
32718diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
32719index dbfe17b..c7b0918 100644
32720--- a/drivers/mtd/lpddr/qinfo_probe.c
32721+++ b/drivers/mtd/lpddr/qinfo_probe.c
32722@@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map_info *map, struct lpddr_private *lpddr)
66a7e928
MT
32723 {
32724 map_word pfow_val[4];
32725
32726+ pax_track_stack();
32727+
32728 /* Check identification string */
32729 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
32730 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
fe2de317
MT
32731diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
32732index 49e20a4..60fbfa5 100644
32733--- a/drivers/mtd/mtdchar.c
32734+++ b/drivers/mtd/mtdchar.c
32735@@ -554,6 +554,8 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
66a7e928
MT
32736 u_long size;
32737 struct mtd_info_user info;
32738
32739+ pax_track_stack();
32740+
32741 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
32742
32743 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
fe2de317
MT
32744diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
32745index d527621..2491fab 100644
32746--- a/drivers/mtd/nand/denali.c
32747+++ b/drivers/mtd/nand/denali.c
15a11c5b 32748@@ -26,6 +26,7 @@
57199397
MT
32749 #include <linux/pci.h>
32750 #include <linux/mtd/mtd.h>
32751 #include <linux/module.h>
32752+#include <linux/slab.h>
32753
32754 #include "denali.h"
32755
fe2de317
MT
32756diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
32757index b155666..611b801 100644
32758--- a/drivers/mtd/nftlcore.c
32759+++ b/drivers/mtd/nftlcore.c
32760@@ -264,6 +264,8 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
66a7e928
MT
32761 int inplace = 1;
32762 size_t retlen;
32763
32764+ pax_track_stack();
32765+
32766 memset(BlockMap, 0xff, sizeof(BlockMap));
32767 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
32768
fe2de317
MT
32769diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
32770index e3cd1ff..0ea79a3 100644
32771--- a/drivers/mtd/nftlmount.c
32772+++ b/drivers/mtd/nftlmount.c
66a7e928
MT
32773@@ -24,6 +24,7 @@
32774 #include <asm/errno.h>
32775 #include <linux/delay.h>
32776 #include <linux/slab.h>
32777+#include <linux/sched.h>
32778 #include <linux/mtd/mtd.h>
32779 #include <linux/mtd/nand.h>
32780 #include <linux/mtd/nftl.h>
fe2de317 32781@@ -45,6 +46,8 @@ static int find_boot_record(struct NFTLrecord *nftl)
66a7e928
MT
32782 struct mtd_info *mtd = nftl->mbd.mtd;
32783 unsigned int i;
32784
32785+ pax_track_stack();
32786+
32787 /* Assume logical EraseSize == physical erasesize for starting the scan.
32788 We'll sort it out later if we find a MediaHeader which says otherwise */
32789 /* Actually, we won't. The new DiskOnChip driver has already scanned
fe2de317
MT
32790diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
32791index 6c3fb5a..c542a81 100644
32792--- a/drivers/mtd/ubi/build.c
32793+++ b/drivers/mtd/ubi/build.c
6e9df6a3 32794@@ -1311,7 +1311,7 @@ module_exit(ubi_exit);
ae4e228f
MT
32795 static int __init bytes_str_to_int(const char *str)
32796 {
32797 char *endp;
32798- unsigned long result;
32799+ unsigned long result, scale = 1;
58c5fc13
MT
32800
32801 result = simple_strtoul(str, &endp, 0);
ae4e228f 32802 if (str == endp || result >= INT_MAX) {
fe2de317 32803@@ -1322,11 +1322,11 @@ static int __init bytes_str_to_int(const char *str)
ae4e228f
MT
32804
32805 switch (*endp) {
32806 case 'G':
32807- result *= 1024;
32808+ scale *= 1024;
32809 case 'M':
32810- result *= 1024;
32811+ scale *= 1024;
32812 case 'K':
32813- result *= 1024;
32814+ scale *= 1024;
32815 if (endp[1] == 'i' && endp[2] == 'B')
32816 endp += 2;
32817 case '\0':
fe2de317 32818@@ -1337,7 +1337,13 @@ static int __init bytes_str_to_int(const char *str)
58c5fc13 32819 return -EINVAL;
ae4e228f
MT
32820 }
32821
32822- return result;
32823+ if ((intoverflow_t)result*scale >= INT_MAX) {
32824+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
32825+ str);
32826+ return -EINVAL;
32827+ }
32828+
32829+ return result*scale;
32830 }
32831
32832 /**
fe2de317
MT
32833diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
32834index d4f7dda..d627d46 100644
32835--- a/drivers/net/atlx/atl2.c
32836+++ b/drivers/net/atlx/atl2.c
32837@@ -2857,7 +2857,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
6e9df6a3
MT
32838 */
32839
32840 #define ATL2_PARAM(X, desc) \
32841- static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
32842+ static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
32843 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
32844 MODULE_PARM_DESC(X, desc);
32845 #else
fe2de317
MT
32846diff --git a/drivers/net/bna/bfa_ioc_ct.c b/drivers/net/bna/bfa_ioc_ct.c
32847index 87aecdf..ec23470 100644
32848--- a/drivers/net/bna/bfa_ioc_ct.c
32849+++ b/drivers/net/bna/bfa_ioc_ct.c
32850@@ -48,7 +48,21 @@ static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
15a11c5b
MT
32851 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
32852 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
32853
32854-static struct bfa_ioc_hwif nw_hwif_ct;
32855+static struct bfa_ioc_hwif nw_hwif_ct = {
32856+ .ioc_pll_init = bfa_ioc_ct_pll_init,
32857+ .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
32858+ .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
32859+ .ioc_reg_init = bfa_ioc_ct_reg_init,
32860+ .ioc_map_port = bfa_ioc_ct_map_port,
32861+ .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
32862+ .ioc_notify_fail = bfa_ioc_ct_notify_fail,
32863+ .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
32864+ .ioc_sync_start = bfa_ioc_ct_sync_start,
32865+ .ioc_sync_join = bfa_ioc_ct_sync_join,
32866+ .ioc_sync_leave = bfa_ioc_ct_sync_leave,
32867+ .ioc_sync_ack = bfa_ioc_ct_sync_ack,
32868+ .ioc_sync_complete = bfa_ioc_ct_sync_complete
32869+};
66a7e928 32870
15a11c5b
MT
32871 /**
32872 * Called from bfa_ioc_attach() to map asic specific calls.
32873@@ -56,20 +70,6 @@ static struct bfa_ioc_hwif nw_hwif_ct;
32874 void
32875 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
32876 {
32877- nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
32878- nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
32879- nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
32880- nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
32881- nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
32882- nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
32883- nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
32884- nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
32885- nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
32886- nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
32887- nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
32888- nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
32889- nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
32890-
32891 ioc->ioc_hwif = &nw_hwif_ct;
32892 }
32893
fe2de317
MT
32894diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c
32895index 8e35b25..c39f205 100644
32896--- a/drivers/net/bna/bnad.c
32897+++ b/drivers/net/bna/bnad.c
32898@@ -1673,7 +1673,14 @@ bnad_setup_tx(struct bnad *bnad, uint tx_id)
15a11c5b
MT
32899 struct bna_intr_info *intr_info =
32900 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
32901 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
32902- struct bna_tx_event_cbfn tx_cbfn;
32903+ static struct bna_tx_event_cbfn tx_cbfn = {
32904+ /* Initialize the tx event handlers */
32905+ .tcb_setup_cbfn = bnad_cb_tcb_setup,
32906+ .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
32907+ .tx_stall_cbfn = bnad_cb_tx_stall,
32908+ .tx_resume_cbfn = bnad_cb_tx_resume,
32909+ .tx_cleanup_cbfn = bnad_cb_tx_cleanup
32910+ };
32911 struct bna_tx *tx;
32912 unsigned long flags;
32913
fe2de317 32914@@ -1682,13 +1689,6 @@ bnad_setup_tx(struct bnad *bnad, uint tx_id)
15a11c5b
MT
32915 tx_config->txq_depth = bnad->txq_depth;
32916 tx_config->tx_type = BNA_TX_T_REGULAR;
32917
32918- /* Initialize the tx event handlers */
32919- tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
32920- tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
32921- tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
32922- tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
32923- tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
32924-
32925 /* Get BNA's resource requirement for one tx object */
32926 spin_lock_irqsave(&bnad->bna_lock, flags);
32927 bna_tx_res_req(bnad->num_txq_per_tx,
fe2de317 32928@@ -1819,21 +1819,21 @@ bnad_setup_rx(struct bnad *bnad, uint rx_id)
15a11c5b
MT
32929 struct bna_intr_info *intr_info =
32930 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
32931 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
32932- struct bna_rx_event_cbfn rx_cbfn;
32933+ static struct bna_rx_event_cbfn rx_cbfn = {
32934+ /* Initialize the Rx event handlers */
32935+ .rcb_setup_cbfn = bnad_cb_rcb_setup,
32936+ .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
32937+ .ccb_setup_cbfn = bnad_cb_ccb_setup,
32938+ .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
32939+ .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
32940+ .rx_post_cbfn = bnad_cb_rx_post
32941+ };
32942 struct bna_rx *rx;
32943 unsigned long flags;
32944
32945 /* Initialize the Rx object configuration */
32946 bnad_init_rx_config(bnad, rx_config);
66a7e928 32947
15a11c5b
MT
32948- /* Initialize the Rx event handlers */
32949- rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
32950- rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
32951- rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
32952- rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
32953- rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
32954- rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
32955-
32956 /* Get BNA's resource requirement for one Rx object */
32957 spin_lock_irqsave(&bnad->bna_lock, flags);
32958 bna_rx_res_req(rx_config, res_info);
fe2de317
MT
32959diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
32960index 4b2b570..31033f4 100644
32961--- a/drivers/net/bnx2.c
32962+++ b/drivers/net/bnx2.c
6e9df6a3 32963@@ -5877,6 +5877,8 @@ bnx2_test_nvram(struct bnx2 *bp)
66a7e928
MT
32964 int rc = 0;
32965 u32 magic, csum;
32966
32967+ pax_track_stack();
32968+
32969 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
32970 goto test_nvram_done;
32971
fe2de317
MT
32972diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
32973index cf3e479..5dc0ecc 100644
32974--- a/drivers/net/bnx2x/bnx2x_ethtool.c
32975+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
32976@@ -1943,6 +1943,8 @@ static int bnx2x_test_nvram(struct bnx2x *bp)
66a7e928
MT
32977 int i, rc;
32978 u32 magic, crc;
32979
32980+ pax_track_stack();
32981+
32982 if (BP_NOMCP(bp))
32983 return 0;
32984
fe2de317
MT
32985diff --git a/drivers/net/bnx2x/bnx2x_sp.h b/drivers/net/bnx2x/bnx2x_sp.h
32986index 9a517c2..a50cfcb 100644
32987--- a/drivers/net/bnx2x/bnx2x_sp.h
32988+++ b/drivers/net/bnx2x/bnx2x_sp.h
6e9df6a3
MT
32989@@ -449,7 +449,7 @@ struct bnx2x_rx_mode_obj {
32990
32991 int (*wait_comp)(struct bnx2x *bp,
32992 struct bnx2x_rx_mode_ramrod_params *p);
32993-};
32994+} __no_const;
32995
32996 /********************** Set multicast group ***********************************/
32997
fe2de317
MT
32998diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h
32999index c5f5479..2e8c260 100644
33000--- a/drivers/net/cxgb3/l2t.h
33001+++ b/drivers/net/cxgb3/l2t.h
33002@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
15a11c5b
MT
33003 */
33004 struct l2t_skb_cb {
33005 arp_failure_handler_func arp_failure_handler;
33006-};
33007+} __no_const;
33008
33009 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
33010
fe2de317
MT
33011diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
33012index b4efa29..c5f2703 100644
33013--- a/drivers/net/cxgb4/cxgb4_main.c
33014+++ b/drivers/net/cxgb4/cxgb4_main.c
33015@@ -3396,6 +3396,8 @@ static int __devinit enable_msix(struct adapter *adap)
66a7e928
MT
33016 unsigned int nchan = adap->params.nports;
33017 struct msix_entry entries[MAX_INGQ + 1];
33018
33019+ pax_track_stack();
33020+
33021 for (i = 0; i < ARRAY_SIZE(entries); ++i)
33022 entries[i].entry = i;
33023
fe2de317
MT
33024diff --git a/drivers/net/cxgb4/t4_hw.c b/drivers/net/cxgb4/t4_hw.c
33025index d1ec111..12735bc 100644
33026--- a/drivers/net/cxgb4/t4_hw.c
33027+++ b/drivers/net/cxgb4/t4_hw.c
33028@@ -362,6 +362,8 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
66a7e928
MT
33029 u8 vpd[VPD_LEN], csum;
33030 unsigned int vpdr_len, kw_offset, id_len;
33031
33032+ pax_track_stack();
33033+
33034 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
33035 if (ret < 0)
33036 return ret;
fe2de317
MT
33037diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
33038index 536b3a5..e6f8dcc 100644
33039--- a/drivers/net/e1000e/82571.c
33040+++ b/drivers/net/e1000e/82571.c
33041@@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
ae4e228f
MT
33042 {
33043 struct e1000_hw *hw = &adapter->hw;
33044 struct e1000_mac_info *mac = &hw->mac;
16454cff 33045- struct e1000_mac_operations *func = &mac->ops;
15a11c5b 33046+ e1000_mac_operations_no_const *func = &mac->ops;
ae4e228f
MT
33047 u32 swsm = 0;
33048 u32 swsm2 = 0;
16454cff 33049 bool force_clear_smbi = false;
fe2de317
MT
33050diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
33051index e4f4225..24da2ea 100644
33052--- a/drivers/net/e1000e/es2lan.c
33053+++ b/drivers/net/e1000e/es2lan.c
33054@@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
ae4e228f
MT
33055 {
33056 struct e1000_hw *hw = &adapter->hw;
33057 struct e1000_mac_info *mac = &hw->mac;
16454cff 33058- struct e1000_mac_operations *func = &mac->ops;
15a11c5b 33059+ e1000_mac_operations_no_const *func = &mac->ops;
ae4e228f
MT
33060
33061 /* Set media type */
16454cff 33062 switch (adapter->pdev->device) {
fe2de317
MT
33063diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
33064index 2967039..ca8c40c 100644
33065--- a/drivers/net/e1000e/hw.h
33066+++ b/drivers/net/e1000e/hw.h
6e9df6a3 33067@@ -778,6 +778,7 @@ struct e1000_mac_operations {
15a11c5b
MT
33068 void (*write_vfta)(struct e1000_hw *, u32, u32);
33069 s32 (*read_mac_addr)(struct e1000_hw *);
ae4e228f 33070 };
15a11c5b 33071+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
ae4e228f 33072
6e9df6a3
MT
33073 /*
33074 * When to use various PHY register access functions:
33075@@ -818,6 +819,7 @@ struct e1000_phy_operations {
15a11c5b
MT
33076 void (*power_up)(struct e1000_hw *);
33077 void (*power_down)(struct e1000_hw *);
ae4e228f 33078 };
15a11c5b 33079+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
ae4e228f 33080
15a11c5b
MT
33081 /* Function pointers for the NVM. */
33082 struct e1000_nvm_operations {
6e9df6a3 33083@@ -829,9 +831,10 @@ struct e1000_nvm_operations {
15a11c5b
MT
33084 s32 (*validate)(struct e1000_hw *);
33085 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
ae4e228f 33086 };
15a11c5b 33087+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
ae4e228f
MT
33088
33089 struct e1000_mac_info {
15a11c5b
MT
33090- struct e1000_mac_operations ops;
33091+ e1000_mac_operations_no_const ops;
66a7e928
MT
33092 u8 addr[ETH_ALEN];
33093 u8 perm_addr[ETH_ALEN];
15a11c5b 33094
6e9df6a3 33095@@ -872,7 +875,7 @@ struct e1000_mac_info {
bc901d79
MT
33096 };
33097
33098 struct e1000_phy_info {
15a11c5b
MT
33099- struct e1000_phy_operations ops;
33100+ e1000_phy_operations_no_const ops;
bc901d79
MT
33101
33102 enum e1000_phy_type type;
15a11c5b 33103
6e9df6a3 33104@@ -906,7 +909,7 @@ struct e1000_phy_info {
ae4e228f
MT
33105 };
33106
33107 struct e1000_nvm_info {
15a11c5b
MT
33108- struct e1000_nvm_operations ops;
33109+ e1000_nvm_operations_no_const ops;
ae4e228f
MT
33110
33111 enum e1000_nvm_type type;
15a11c5b 33112 enum e1000_nvm_override override;
fe2de317
MT
33113diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
33114index fa8677c..196356f 100644
33115--- a/drivers/net/fealnx.c
33116+++ b/drivers/net/fealnx.c
6e9df6a3
MT
33117@@ -150,7 +150,7 @@ struct chip_info {
33118 int flags;
33119 };
33120
33121-static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
33122+static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
33123 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
33124 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
33125 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
fe2de317
MT
33126diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
33127index 2a5a34d..be871cc 100644
33128--- a/drivers/net/hamradio/6pack.c
33129+++ b/drivers/net/hamradio/6pack.c
33130@@ -463,6 +463,8 @@ static void sixpack_receive_buf(struct tty_struct *tty,
66a7e928
MT
33131 unsigned char buf[512];
33132 int count1;
33133
33134+ pax_track_stack();
33135+
33136 if (!count)
33137 return;
33138
fe2de317
MT
33139diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
33140index 4519a13..f97fcd0 100644
33141--- a/drivers/net/igb/e1000_hw.h
33142+++ b/drivers/net/igb/e1000_hw.h
15a11c5b
MT
33143@@ -314,6 +314,7 @@ struct e1000_mac_operations {
33144 s32 (*read_mac_addr)(struct e1000_hw *);
33145 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
66a7e928 33146 };
15a11c5b 33147+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
66a7e928 33148
15a11c5b
MT
33149 struct e1000_phy_operations {
33150 s32 (*acquire)(struct e1000_hw *);
33151@@ -330,6 +331,7 @@ struct e1000_phy_operations {
33152 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
33153 s32 (*write_reg)(struct e1000_hw *, u32, u16);
66a7e928 33154 };
15a11c5b 33155+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
66a7e928 33156
15a11c5b
MT
33157 struct e1000_nvm_operations {
33158 s32 (*acquire)(struct e1000_hw *);
33159@@ -339,6 +341,7 @@ struct e1000_nvm_operations {
33160 s32 (*update)(struct e1000_hw *);
33161 s32 (*validate)(struct e1000_hw *);
66a7e928 33162 };
15a11c5b 33163+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
ae4e228f
MT
33164
33165 struct e1000_info {
33166 s32 (*get_invariants)(struct e1000_hw *);
15a11c5b 33167@@ -350,7 +353,7 @@ struct e1000_info {
ae4e228f 33168 extern const struct e1000_info e1000_82575_info;
bc901d79
MT
33169
33170 struct e1000_mac_info {
15a11c5b
MT
33171- struct e1000_mac_operations ops;
33172+ e1000_mac_operations_no_const ops;
bc901d79
MT
33173
33174 u8 addr[6];
15a11c5b
MT
33175 u8 perm_addr[6];
33176@@ -388,7 +391,7 @@ struct e1000_mac_info {
bc901d79
MT
33177 };
33178
33179 struct e1000_phy_info {
15a11c5b
MT
33180- struct e1000_phy_operations ops;
33181+ e1000_phy_operations_no_const ops;
bc901d79
MT
33182
33183 enum e1000_phy_type type;
15a11c5b
MT
33184
33185@@ -423,7 +426,7 @@ struct e1000_phy_info {
ae4e228f
MT
33186 };
33187
33188 struct e1000_nvm_info {
15a11c5b
MT
33189- struct e1000_nvm_operations ops;
33190+ e1000_nvm_operations_no_const ops;
ae4e228f 33191 enum e1000_nvm_type type;
66a7e928 33192 enum e1000_nvm_override override;
15a11c5b
MT
33193
33194@@ -468,6 +471,7 @@ struct e1000_mbx_operations {
33195 s32 (*check_for_ack)(struct e1000_hw *, u16);
33196 s32 (*check_for_rst)(struct e1000_hw *, u16);
33197 };
33198+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
33199
33200 struct e1000_mbx_stats {
33201 u32 msgs_tx;
33202@@ -479,7 +483,7 @@ struct e1000_mbx_stats {
bc901d79
MT
33203 };
33204
15a11c5b
MT
33205 struct e1000_mbx_info {
33206- struct e1000_mbx_operations ops;
33207+ e1000_mbx_operations_no_const ops;
33208 struct e1000_mbx_stats stats;
33209 u32 timeout;
33210 u32 usec_delay;
fe2de317
MT
33211diff --git a/drivers/net/igbvf/vf.h b/drivers/net/igbvf/vf.h
33212index d7ed58f..64cde36 100644
33213--- a/drivers/net/igbvf/vf.h
33214+++ b/drivers/net/igbvf/vf.h
15a11c5b
MT
33215@@ -189,9 +189,10 @@ struct e1000_mac_operations {
33216 s32 (*read_mac_addr)(struct e1000_hw *);
33217 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
33218 };
33219+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
33220
bc901d79 33221 struct e1000_mac_info {
15a11c5b
MT
33222- struct e1000_mac_operations ops;
33223+ e1000_mac_operations_no_const ops;
bc901d79
MT
33224 u8 addr[6];
33225 u8 perm_addr[6];
66a7e928 33226
15a11c5b
MT
33227@@ -213,6 +214,7 @@ struct e1000_mbx_operations {
33228 s32 (*check_for_ack)(struct e1000_hw *);
33229 s32 (*check_for_rst)(struct e1000_hw *);
33230 };
33231+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
66a7e928 33232
15a11c5b
MT
33233 struct e1000_mbx_stats {
33234 u32 msgs_tx;
33235@@ -224,7 +226,7 @@ struct e1000_mbx_stats {
33236 };
66a7e928 33237
15a11c5b
MT
33238 struct e1000_mbx_info {
33239- struct e1000_mbx_operations ops;
33240+ e1000_mbx_operations_no_const ops;
33241 struct e1000_mbx_stats stats;
33242 u32 timeout;
33243 u32 usec_delay;
fe2de317
MT
33244diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
33245index 6a130eb..1aeb9e4 100644
33246--- a/drivers/net/ixgb/ixgb_main.c
33247+++ b/drivers/net/ixgb/ixgb_main.c
33248@@ -1070,6 +1070,8 @@ ixgb_set_multi(struct net_device *netdev)
66a7e928
MT
33249 u32 rctl;
33250 int i;
33251
33252+ pax_track_stack();
33253+
33254 /* Check for Promiscuous and All Multicast modes */
33255
33256 rctl = IXGB_READ_REG(hw, RCTL);
fe2de317
MT
33257diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c
33258index dd7fbeb..44b9bbf 100644
33259--- a/drivers/net/ixgb/ixgb_param.c
33260+++ b/drivers/net/ixgb/ixgb_param.c
66a7e928
MT
33261@@ -261,6 +261,9 @@ void __devinit
33262 ixgb_check_options(struct ixgb_adapter *adapter)
33263 {
33264 int bd = adapter->bd_number;
33265+
33266+ pax_track_stack();
33267+
33268 if (bd >= IXGB_MAX_NIC) {
33269 pr_notice("Warning: no configuration for board #%i\n", bd);
33270 pr_notice("Using defaults for all values\n");
fe2de317
MT
33271diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
33272index e0d970e..1cfdea5 100644
33273--- a/drivers/net/ixgbe/ixgbe_type.h
33274+++ b/drivers/net/ixgbe/ixgbe_type.h
6e9df6a3 33275@@ -2642,6 +2642,7 @@ struct ixgbe_eeprom_operations {
15a11c5b
MT
33276 s32 (*update_checksum)(struct ixgbe_hw *);
33277 u16 (*calc_checksum)(struct ixgbe_hw *);
66a7e928 33278 };
15a11c5b 33279+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
66a7e928 33280
15a11c5b
MT
33281 struct ixgbe_mac_operations {
33282 s32 (*init_hw)(struct ixgbe_hw *);
6e9df6a3
MT
33283@@ -2703,6 +2704,7 @@ struct ixgbe_mac_operations {
33284 /* Manageability interface */
33285 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
66a7e928 33286 };
15a11c5b 33287+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
66a7e928 33288
15a11c5b
MT
33289 struct ixgbe_phy_operations {
33290 s32 (*identify)(struct ixgbe_hw *);
6e9df6a3 33291@@ -2722,9 +2724,10 @@ struct ixgbe_phy_operations {
15a11c5b
MT
33292 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
33293 s32 (*check_overtemp)(struct ixgbe_hw *);
66a7e928 33294 };
15a11c5b 33295+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
66a7e928
MT
33296
33297 struct ixgbe_eeprom_info {
33298- struct ixgbe_eeprom_operations ops;
15a11c5b 33299+ ixgbe_eeprom_operations_no_const ops;
66a7e928
MT
33300 enum ixgbe_eeprom_type type;
33301 u32 semaphore_delay;
33302 u16 word_size;
6e9df6a3 33303@@ -2734,7 +2737,7 @@ struct ixgbe_eeprom_info {
66a7e928
MT
33304
33305 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
33306 struct ixgbe_mac_info {
33307- struct ixgbe_mac_operations ops;
15a11c5b 33308+ ixgbe_mac_operations_no_const ops;
66a7e928
MT
33309 enum ixgbe_mac_type type;
33310 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
33311 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
6e9df6a3 33312@@ -2762,7 +2765,7 @@ struct ixgbe_mac_info {
66a7e928
MT
33313 };
33314
33315 struct ixgbe_phy_info {
33316- struct ixgbe_phy_operations ops;
15a11c5b 33317+ ixgbe_phy_operations_no_const ops;
66a7e928
MT
33318 struct mdio_if_info mdio;
33319 enum ixgbe_phy_type type;
33320 u32 id;
6e9df6a3 33321@@ -2790,6 +2793,7 @@ struct ixgbe_mbx_operations {
15a11c5b
MT
33322 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
33323 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
66a7e928 33324 };
15a11c5b 33325+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
66a7e928 33326
15a11c5b
MT
33327 struct ixgbe_mbx_stats {
33328 u32 msgs_tx;
6e9df6a3 33329@@ -2801,7 +2805,7 @@ struct ixgbe_mbx_stats {
15a11c5b 33330 };
66a7e928 33331
15a11c5b
MT
33332 struct ixgbe_mbx_info {
33333- struct ixgbe_mbx_operations ops;
33334+ ixgbe_mbx_operations_no_const ops;
33335 struct ixgbe_mbx_stats stats;
33336 u32 timeout;
33337 u32 usec_delay;
fe2de317
MT
33338diff --git a/drivers/net/ixgbevf/vf.h b/drivers/net/ixgbevf/vf.h
33339index 10306b4..28df758 100644
33340--- a/drivers/net/ixgbevf/vf.h
33341+++ b/drivers/net/ixgbevf/vf.h
15a11c5b
MT
33342@@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
33343 s32 (*clear_vfta)(struct ixgbe_hw *);
33344 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
66a7e928 33345 };
15a11c5b 33346+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
66a7e928 33347
15a11c5b
MT
33348 enum ixgbe_mac_type {
33349 ixgbe_mac_unknown = 0,
33350@@ -79,7 +80,7 @@ enum ixgbe_mac_type {
66a7e928
MT
33351 };
33352
15a11c5b
MT
33353 struct ixgbe_mac_info {
33354- struct ixgbe_mac_operations ops;
33355+ ixgbe_mac_operations_no_const ops;
33356 u8 addr[6];
33357 u8 perm_addr[6];
33358
33359@@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
33360 s32 (*check_for_ack)(struct ixgbe_hw *);
33361 s32 (*check_for_rst)(struct ixgbe_hw *);
66a7e928 33362 };
15a11c5b
MT
33363+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
33364
33365 struct ixgbe_mbx_stats {
33366 u32 msgs_tx;
33367@@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
66a7e928
MT
33368 };
33369
15a11c5b
MT
33370 struct ixgbe_mbx_info {
33371- struct ixgbe_mbx_operations ops;
33372+ ixgbe_mbx_operations_no_const ops;
33373 struct ixgbe_mbx_stats stats;
33374 u32 timeout;
33375 u32 udelay;
fe2de317
MT
33376diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c
33377index 27418d3..adf15bb 100644
33378--- a/drivers/net/ksz884x.c
33379+++ b/drivers/net/ksz884x.c
33380@@ -6533,6 +6533,8 @@ static void netdev_get_ethtool_stats(struct net_device *dev,
66a7e928
MT
33381 int rc;
33382 u64 counter[TOTAL_PORT_COUNTER_NUM];
33383
33384+ pax_track_stack();
33385+
33386 mutex_lock(&hw_priv->lock);
33387 n = SWITCH_PORT_NUM;
33388 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
fe2de317
MT
33389diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
33390index f0ee35d..3831c8a 100644
33391--- a/drivers/net/mlx4/main.c
33392+++ b/drivers/net/mlx4/main.c
66a7e928
MT
33393@@ -40,6 +40,7 @@
33394 #include <linux/dma-mapping.h>
33395 #include <linux/slab.h>
33396 #include <linux/io-mapping.h>
33397+#include <linux/sched.h>
33398
33399 #include <linux/mlx4/device.h>
33400 #include <linux/mlx4/doorbell.h>
fe2de317 33401@@ -762,6 +763,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
66a7e928
MT
33402 u64 icm_size;
33403 int err;
33404
33405+ pax_track_stack();
33406+
33407 err = mlx4_QUERY_FW(dev);
33408 if (err) {
33409 if (err == -EACCES)
fe2de317
MT
33410diff --git a/drivers/net/niu.c b/drivers/net/niu.c
33411index ed47585..5e5be8f 100644
33412--- a/drivers/net/niu.c
33413+++ b/drivers/net/niu.c
33414@@ -9061,6 +9061,8 @@ static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map)
66a7e928
MT
33415 int i, num_irqs, err;
33416 u8 first_ldg;
33417
33418+ pax_track_stack();
33419+
33420 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
33421 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
33422 ldg_num_map[i] = first_ldg + i;
fe2de317
MT
33423diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
33424index 80b6f36..5cd8938 100644
33425--- a/drivers/net/pcnet32.c
33426+++ b/drivers/net/pcnet32.c
15a11c5b
MT
33427@@ -270,7 +270,7 @@ struct pcnet32_private {
33428 struct sk_buff **rx_skbuff;
33429 dma_addr_t *tx_dma_addr;
33430 dma_addr_t *rx_dma_addr;
33431- struct pcnet32_access a;
33432+ struct pcnet32_access *a;
33433 spinlock_t lock; /* Guard lock */
33434 unsigned int cur_rx, cur_tx; /* The next free ring entry */
33435 unsigned int rx_ring_size; /* current rx ring size */
fe2de317 33436@@ -460,9 +460,9 @@ static void pcnet32_netif_start(struct net_device *dev)
15a11c5b
MT
33437 u16 val;
33438
33439 netif_wake_queue(dev);
33440- val = lp->a.read_csr(ioaddr, CSR3);
33441+ val = lp->a->read_csr(ioaddr, CSR3);
33442 val &= 0x00ff;
33443- lp->a.write_csr(ioaddr, CSR3, val);
33444+ lp->a->write_csr(ioaddr, CSR3, val);
33445 napi_enable(&lp->napi);
33446 }
33447
fe2de317 33448@@ -730,7 +730,7 @@ static u32 pcnet32_get_link(struct net_device *dev)
15a11c5b
MT
33449 r = mii_link_ok(&lp->mii_if);
33450 } else if (lp->chip_version >= PCNET32_79C970A) {
33451 ulong ioaddr = dev->base_addr; /* card base I/O address */
33452- r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
33453+ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
33454 } else { /* can not detect link on really old chips */
33455 r = 1;
33456 }
fe2de317 33457@@ -792,7 +792,7 @@ static int pcnet32_set_ringparam(struct net_device *dev,
15a11c5b 33458 pcnet32_netif_stop(dev);
ae4e228f 33459
15a11c5b
MT
33460 spin_lock_irqsave(&lp->lock, flags);
33461- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
33462+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
ae4e228f 33463
15a11c5b
MT
33464 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
33465
fe2de317 33466@@ -868,7 +868,7 @@ static void pcnet32_ethtool_test(struct net_device *dev,
15a11c5b
MT
33467 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
33468 {
33469 struct pcnet32_private *lp = netdev_priv(dev);
33470- struct pcnet32_access *a = &lp->a; /* access to registers */
33471+ struct pcnet32_access *a = lp->a; /* access to registers */
33472 ulong ioaddr = dev->base_addr; /* card base I/O address */
33473 struct sk_buff *skb; /* sk buff */
33474 int x, i; /* counters */
fe2de317 33475@@ -888,21 +888,21 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
15a11c5b
MT
33476 pcnet32_netif_stop(dev);
33477
33478 spin_lock_irqsave(&lp->lock, flags);
33479- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
33480+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
33481
33482 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
33483
33484 /* Reset the PCNET32 */
33485- lp->a.reset(ioaddr);
33486- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
33487+ lp->a->reset(ioaddr);
33488+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
33489
33490 /* switch pcnet32 to 32bit mode */
33491- lp->a.write_bcr(ioaddr, 20, 2);
33492+ lp->a->write_bcr(ioaddr, 20, 2);
33493
33494 /* purge & init rings but don't actually restart */
33495 pcnet32_restart(dev, 0x0000);
33496
33497- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
33498+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
33499
33500 /* Initialize Transmit buffers. */
33501 size = data_len + 15;
fe2de317 33502@@ -947,10 +947,10 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
15a11c5b
MT
33503
33504 /* set int loopback in CSR15 */
33505 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
33506- lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
33507+ lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
33508
33509 teststatus = cpu_to_le16(0x8000);
33510- lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
33511+ lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
33512
33513 /* Check status of descriptors */
33514 for (x = 0; x < numbuffs; x++) {
fe2de317 33515@@ -969,7 +969,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
15a11c5b
MT
33516 }
33517 }
33518
33519- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
33520+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
33521 wmb();
33522 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
33523 netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
33524@@ -1015,7 +1015,7 @@ clean_up:
33525 pcnet32_restart(dev, CSR0_NORMAL);
33526 } else {
33527 pcnet32_purge_rx_ring(dev);
33528- lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
33529+ lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
33530 }
33531 spin_unlock_irqrestore(&lp->lock, flags);
33532
fe2de317 33533@@ -1026,7 +1026,7 @@ static int pcnet32_set_phys_id(struct net_device *dev,
15a11c5b
MT
33534 enum ethtool_phys_id_state state)
33535 {
33536 struct pcnet32_private *lp = netdev_priv(dev);
33537- struct pcnet32_access *a = &lp->a;
33538+ struct pcnet32_access *a = lp->a;
33539 ulong ioaddr = dev->base_addr;
33540 unsigned long flags;
33541 int i;
fe2de317 33542@@ -1067,7 +1067,7 @@ static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
15a11c5b
MT
33543 {
33544 int csr5;
33545 struct pcnet32_private *lp = netdev_priv(dev);
33546- struct pcnet32_access *a = &lp->a;
33547+ struct pcnet32_access *a = lp->a;
33548 ulong ioaddr = dev->base_addr;
33549 int ticks;
33550
fe2de317 33551@@ -1324,8 +1324,8 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
15a11c5b
MT
33552 spin_lock_irqsave(&lp->lock, flags);
33553 if (pcnet32_tx(dev)) {
33554 /* reset the chip to clear the error condition, then restart */
33555- lp->a.reset(ioaddr);
33556- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
33557+ lp->a->reset(ioaddr);
33558+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
33559 pcnet32_restart(dev, CSR0_START);
33560 netif_wake_queue(dev);
33561 }
fe2de317 33562@@ -1337,12 +1337,12 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
15a11c5b
MT
33563 __napi_complete(napi);
33564
33565 /* clear interrupt masks */
33566- val = lp->a.read_csr(ioaddr, CSR3);
33567+ val = lp->a->read_csr(ioaddr, CSR3);
33568 val &= 0x00ff;
33569- lp->a.write_csr(ioaddr, CSR3, val);
33570+ lp->a->write_csr(ioaddr, CSR3, val);
33571
33572 /* Set interrupt enable. */
33573- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
33574+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
33575
33576 spin_unlock_irqrestore(&lp->lock, flags);
33577 }
fe2de317 33578@@ -1365,7 +1365,7 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
15a11c5b
MT
33579 int i, csr0;
33580 u16 *buff = ptr;
33581 struct pcnet32_private *lp = netdev_priv(dev);
33582- struct pcnet32_access *a = &lp->a;
33583+ struct pcnet32_access *a = lp->a;
33584 ulong ioaddr = dev->base_addr;
33585 unsigned long flags;
33586
fe2de317 33587@@ -1401,9 +1401,9 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
15a11c5b
MT
33588 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
33589 if (lp->phymask & (1 << j)) {
33590 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
33591- lp->a.write_bcr(ioaddr, 33,
33592+ lp->a->write_bcr(ioaddr, 33,
33593 (j << 5) | i);
33594- *buff++ = lp->a.read_bcr(ioaddr, 34);
33595+ *buff++ = lp->a->read_bcr(ioaddr, 34);
33596 }
33597 }
33598 }
fe2de317 33599@@ -1785,7 +1785,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
15a11c5b
MT
33600 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
33601 lp->options |= PCNET32_PORT_FD;
33602
33603- lp->a = *a;
33604+ lp->a = a;
33605
33606 /* prior to register_netdev, dev->name is not yet correct */
33607 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
fe2de317 33608@@ -1844,7 +1844,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
15a11c5b
MT
33609 if (lp->mii) {
33610 /* lp->phycount and lp->phymask are set to 0 by memset above */
33611
33612- lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
33613+ lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
33614 /* scan for PHYs */
33615 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
33616 unsigned short id1, id2;
fe2de317 33617@@ -1864,7 +1864,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
15a11c5b
MT
33618 pr_info("Found PHY %04x:%04x at address %d\n",
33619 id1, id2, i);
33620 }
33621- lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
33622+ lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
33623 if (lp->phycount > 1)
33624 lp->options |= PCNET32_PORT_MII;
33625 }
fe2de317 33626@@ -2020,10 +2020,10 @@ static int pcnet32_open(struct net_device *dev)
15a11c5b
MT
33627 }
33628
33629 /* Reset the PCNET32 */
33630- lp->a.reset(ioaddr);
33631+ lp->a->reset(ioaddr);
33632
33633 /* switch pcnet32 to 32bit mode */
33634- lp->a.write_bcr(ioaddr, 20, 2);
33635+ lp->a->write_bcr(ioaddr, 20, 2);
33636
33637 netif_printk(lp, ifup, KERN_DEBUG, dev,
33638 "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
fe2de317 33639@@ -2032,14 +2032,14 @@ static int pcnet32_open(struct net_device *dev)
15a11c5b
MT
33640 (u32) (lp->init_dma_addr));
33641
33642 /* set/reset autoselect bit */
33643- val = lp->a.read_bcr(ioaddr, 2) & ~2;
33644+ val = lp->a->read_bcr(ioaddr, 2) & ~2;
33645 if (lp->options & PCNET32_PORT_ASEL)
33646 val |= 2;
33647- lp->a.write_bcr(ioaddr, 2, val);
33648+ lp->a->write_bcr(ioaddr, 2, val);
33649
33650 /* handle full duplex setting */
33651 if (lp->mii_if.full_duplex) {
33652- val = lp->a.read_bcr(ioaddr, 9) & ~3;
33653+ val = lp->a->read_bcr(ioaddr, 9) & ~3;
33654 if (lp->options & PCNET32_PORT_FD) {
33655 val |= 1;
33656 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
fe2de317 33657@@ -2049,14 +2049,14 @@ static int pcnet32_open(struct net_device *dev)
15a11c5b
MT
33658 if (lp->chip_version == 0x2627)
33659 val |= 3;
33660 }
33661- lp->a.write_bcr(ioaddr, 9, val);
33662+ lp->a->write_bcr(ioaddr, 9, val);
33663 }
33664
33665 /* set/reset GPSI bit in test register */
33666- val = lp->a.read_csr(ioaddr, 124) & ~0x10;
33667+ val = lp->a->read_csr(ioaddr, 124) & ~0x10;
33668 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
33669 val |= 0x10;
33670- lp->a.write_csr(ioaddr, 124, val);
33671+ lp->a->write_csr(ioaddr, 124, val);
33672
33673 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
33674 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
fe2de317 33675@@ -2075,24 +2075,24 @@ static int pcnet32_open(struct net_device *dev)
15a11c5b
MT
33676 * duplex, and/or enable auto negotiation, and clear DANAS
33677 */
33678 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
33679- lp->a.write_bcr(ioaddr, 32,
33680- lp->a.read_bcr(ioaddr, 32) | 0x0080);
33681+ lp->a->write_bcr(ioaddr, 32,
33682+ lp->a->read_bcr(ioaddr, 32) | 0x0080);
33683 /* disable Auto Negotiation, set 10Mpbs, HD */
33684- val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
33685+ val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
33686 if (lp->options & PCNET32_PORT_FD)
33687 val |= 0x10;
33688 if (lp->options & PCNET32_PORT_100)
33689 val |= 0x08;
33690- lp->a.write_bcr(ioaddr, 32, val);
33691+ lp->a->write_bcr(ioaddr, 32, val);
33692 } else {
33693 if (lp->options & PCNET32_PORT_ASEL) {
33694- lp->a.write_bcr(ioaddr, 32,
33695- lp->a.read_bcr(ioaddr,
33696+ lp->a->write_bcr(ioaddr, 32,
33697+ lp->a->read_bcr(ioaddr,
33698 32) | 0x0080);
33699 /* enable auto negotiate, setup, disable fd */
33700- val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
33701+ val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
33702 val |= 0x20;
33703- lp->a.write_bcr(ioaddr, 32, val);
33704+ lp->a->write_bcr(ioaddr, 32, val);
33705 }
33706 }
33707 } else {
fe2de317 33708@@ -2105,10 +2105,10 @@ static int pcnet32_open(struct net_device *dev)
15a11c5b
MT
33709 * There is really no good other way to handle multiple PHYs
33710 * other than turning off all automatics
33711 */
33712- val = lp->a.read_bcr(ioaddr, 2);
33713- lp->a.write_bcr(ioaddr, 2, val & ~2);
33714- val = lp->a.read_bcr(ioaddr, 32);
33715- lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
33716+ val = lp->a->read_bcr(ioaddr, 2);
33717+ lp->a->write_bcr(ioaddr, 2, val & ~2);
33718+ val = lp->a->read_bcr(ioaddr, 32);
33719+ lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
33720
33721 if (!(lp->options & PCNET32_PORT_ASEL)) {
33722 /* setup ecmd */
fe2de317 33723@@ -2118,7 +2118,7 @@ static int pcnet32_open(struct net_device *dev)
15a11c5b
MT
33724 ethtool_cmd_speed_set(&ecmd,
33725 (lp->options & PCNET32_PORT_100) ?
33726 SPEED_100 : SPEED_10);
33727- bcr9 = lp->a.read_bcr(ioaddr, 9);
33728+ bcr9 = lp->a->read_bcr(ioaddr, 9);
33729
33730 if (lp->options & PCNET32_PORT_FD) {
33731 ecmd.duplex = DUPLEX_FULL;
fe2de317 33732@@ -2127,7 +2127,7 @@ static int pcnet32_open(struct net_device *dev)
15a11c5b
MT
33733 ecmd.duplex = DUPLEX_HALF;
33734 bcr9 |= ~(1 << 0);
33735 }
33736- lp->a.write_bcr(ioaddr, 9, bcr9);
33737+ lp->a->write_bcr(ioaddr, 9, bcr9);
33738 }
33739
33740 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
fe2de317 33741@@ -2158,9 +2158,9 @@ static int pcnet32_open(struct net_device *dev)
15a11c5b
MT
33742
33743 #ifdef DO_DXSUFLO
33744 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
33745- val = lp->a.read_csr(ioaddr, CSR3);
33746+ val = lp->a->read_csr(ioaddr, CSR3);
33747 val |= 0x40;
33748- lp->a.write_csr(ioaddr, CSR3, val);
33749+ lp->a->write_csr(ioaddr, CSR3, val);
33750 }
66a7e928 33751 #endif
66a7e928 33752
fe2de317 33753@@ -2176,11 +2176,11 @@ static int pcnet32_open(struct net_device *dev)
15a11c5b 33754 napi_enable(&lp->napi);
66a7e928 33755
15a11c5b
MT
33756 /* Re-initialize the PCNET32, and start it when done. */
33757- lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
33758- lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
33759+ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
33760+ lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
66a7e928 33761
15a11c5b
MT
33762- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
33763- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
33764+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
33765+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
66a7e928 33766
15a11c5b 33767 netif_start_queue(dev);
66a7e928 33768
fe2de317 33769@@ -2192,19 +2192,19 @@ static int pcnet32_open(struct net_device *dev)
66a7e928 33770
15a11c5b
MT
33771 i = 0;
33772 while (i++ < 100)
33773- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
33774+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
33775 break;
33776 /*
33777 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
33778 * reports that doing so triggers a bug in the '974.
33779 */
33780- lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
33781+ lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
66a7e928 33782
15a11c5b
MT
33783 netif_printk(lp, ifup, KERN_DEBUG, dev,
33784 "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
33785 i,
33786 (u32) (lp->init_dma_addr),
33787- lp->a.read_csr(ioaddr, CSR0));
33788+ lp->a->read_csr(ioaddr, CSR0));
66a7e928 33789
15a11c5b 33790 spin_unlock_irqrestore(&lp->lock, flags);
66a7e928 33791
15a11c5b
MT
33792@@ -2218,7 +2218,7 @@ err_free_ring:
33793 * Switch back to 16bit mode to avoid problems with dumb
33794 * DOS packet driver after a warm reboot
33795 */
33796- lp->a.write_bcr(ioaddr, 20, 4);
33797+ lp->a->write_bcr(ioaddr, 20, 4);
66a7e928 33798
15a11c5b
MT
33799 err_free_irq:
33800 spin_unlock_irqrestore(&lp->lock, flags);
fe2de317 33801@@ -2323,7 +2323,7 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
66a7e928 33802
15a11c5b
MT
33803 /* wait for stop */
33804 for (i = 0; i < 100; i++)
33805- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
33806+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
33807 break;
66a7e928 33808
15a11c5b 33809 if (i >= 100)
fe2de317 33810@@ -2335,13 +2335,13 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
15a11c5b 33811 return;
66a7e928 33812
15a11c5b
MT
33813 /* ReInit Ring */
33814- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
33815+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
33816 i = 0;
33817 while (i++ < 1000)
33818- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
33819+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
33820 break;
66a7e928 33821
15a11c5b
MT
33822- lp->a.write_csr(ioaddr, CSR0, csr0_bits);
33823+ lp->a->write_csr(ioaddr, CSR0, csr0_bits);
33824 }
33825
33826 static void pcnet32_tx_timeout(struct net_device *dev)
fe2de317 33827@@ -2353,8 +2353,8 @@ static void pcnet32_tx_timeout(struct net_device *dev)
15a11c5b
MT
33828 /* Transmitter timeout, serious problems. */
33829 if (pcnet32_debug & NETIF_MSG_DRV)
33830 pr_err("%s: transmit timed out, status %4.4x, resetting\n",
33831- dev->name, lp->a.read_csr(ioaddr, CSR0));
33832- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
33833+ dev->name, lp->a->read_csr(ioaddr, CSR0));
33834+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
33835 dev->stats.tx_errors++;
33836 if (netif_msg_tx_err(lp)) {
33837 int i;
fe2de317 33838@@ -2397,7 +2397,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
15a11c5b
MT
33839
33840 netif_printk(lp, tx_queued, KERN_DEBUG, dev,
33841 "%s() called, csr0 %4.4x\n",
33842- __func__, lp->a.read_csr(ioaddr, CSR0));
33843+ __func__, lp->a->read_csr(ioaddr, CSR0));
33844
33845 /* Default status -- will not enable Successful-TxDone
33846 * interrupt when that option is available to us.
fe2de317 33847@@ -2427,7 +2427,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
15a11c5b
MT
33848 dev->stats.tx_bytes += skb->len;
33849
33850 /* Trigger an immediate send poll. */
33851- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
33852+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
33853
33854 if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
33855 lp->tx_full = 1;
33856@@ -2452,16 +2452,16 @@ pcnet32_interrupt(int irq, void *dev_id)
33857
33858 spin_lock(&lp->lock);
33859
33860- csr0 = lp->a.read_csr(ioaddr, CSR0);
33861+ csr0 = lp->a->read_csr(ioaddr, CSR0);
33862 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
33863 if (csr0 == 0xffff)
33864 break; /* PCMCIA remove happened */
33865 /* Acknowledge all of the current interrupt sources ASAP. */
33866- lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
33867+ lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
33868
33869 netif_printk(lp, intr, KERN_DEBUG, dev,
33870 "interrupt csr0=%#2.2x new csr=%#2.2x\n",
33871- csr0, lp->a.read_csr(ioaddr, CSR0));
33872+ csr0, lp->a->read_csr(ioaddr, CSR0));
33873
33874 /* Log misc errors. */
33875 if (csr0 & 0x4000)
33876@@ -2488,19 +2488,19 @@ pcnet32_interrupt(int irq, void *dev_id)
33877 if (napi_schedule_prep(&lp->napi)) {
33878 u16 val;
33879 /* set interrupt masks */
33880- val = lp->a.read_csr(ioaddr, CSR3);
33881+ val = lp->a->read_csr(ioaddr, CSR3);
33882 val |= 0x5f00;
33883- lp->a.write_csr(ioaddr, CSR3, val);
33884+ lp->a->write_csr(ioaddr, CSR3, val);
33885
33886 __napi_schedule(&lp->napi);
33887 break;
33888 }
33889- csr0 = lp->a.read_csr(ioaddr, CSR0);
33890+ csr0 = lp->a->read_csr(ioaddr, CSR0);
33891 }
66a7e928 33892
15a11c5b
MT
33893 netif_printk(lp, intr, KERN_DEBUG, dev,
33894 "exiting interrupt, csr0=%#4.4x\n",
33895- lp->a.read_csr(ioaddr, CSR0));
33896+ lp->a->read_csr(ioaddr, CSR0));
66a7e928 33897
15a11c5b 33898 spin_unlock(&lp->lock);
66a7e928 33899
fe2de317 33900@@ -2520,20 +2520,20 @@ static int pcnet32_close(struct net_device *dev)
66a7e928 33901
15a11c5b 33902 spin_lock_irqsave(&lp->lock, flags);
66a7e928 33903
15a11c5b
MT
33904- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
33905+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
66a7e928 33906
15a11c5b
MT
33907 netif_printk(lp, ifdown, KERN_DEBUG, dev,
33908 "Shutting down ethercard, status was %2.2x\n",
33909- lp->a.read_csr(ioaddr, CSR0));
33910+ lp->a->read_csr(ioaddr, CSR0));
66a7e928 33911
15a11c5b
MT
33912 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
33913- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
33914+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
66a7e928 33915
15a11c5b
MT
33916 /*
33917 * Switch back to 16bit mode to avoid problems with dumb
33918 * DOS packet driver after a warm reboot
33919 */
33920- lp->a.write_bcr(ioaddr, 20, 4);
33921+ lp->a->write_bcr(ioaddr, 20, 4);
33922
33923 spin_unlock_irqrestore(&lp->lock, flags);
33924
fe2de317 33925@@ -2556,7 +2556,7 @@ static struct net_device_stats *pcnet32_get_stats(struct net_device *dev)
15a11c5b
MT
33926 unsigned long flags;
33927
33928 spin_lock_irqsave(&lp->lock, flags);
33929- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
33930+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
33931 spin_unlock_irqrestore(&lp->lock, flags);
33932
33933 return &dev->stats;
fe2de317 33934@@ -2577,10 +2577,10 @@ static void pcnet32_load_multicast(struct net_device *dev)
15a11c5b
MT
33935 if (dev->flags & IFF_ALLMULTI) {
33936 ib->filter[0] = cpu_to_le32(~0U);
33937 ib->filter[1] = cpu_to_le32(~0U);
33938- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
33939- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
33940- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
33941- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
33942+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
33943+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
33944+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
33945+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
33946 return;
33947 }
33948 /* clear the multicast filter */
fe2de317 33949@@ -2594,7 +2594,7 @@ static void pcnet32_load_multicast(struct net_device *dev)
15a11c5b
MT
33950 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
33951 }
33952 for (i = 0; i < 4; i++)
33953- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
33954+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
33955 le16_to_cpu(mcast_table[i]));
33956 }
33957
fe2de317 33958@@ -2609,28 +2609,28 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
15a11c5b
MT
33959
33960 spin_lock_irqsave(&lp->lock, flags);
33961 suspended = pcnet32_suspend(dev, &flags, 0);
33962- csr15 = lp->a.read_csr(ioaddr, CSR15);
33963+ csr15 = lp->a->read_csr(ioaddr, CSR15);
33964 if (dev->flags & IFF_PROMISC) {
33965 /* Log any net taps. */
33966 netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
33967 lp->init_block->mode =
33968 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
33969 7);
33970- lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
33971+ lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
33972 } else {
33973 lp->init_block->mode =
33974 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
33975- lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
33976+ lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
33977 pcnet32_load_multicast(dev);
33978 }
33979
33980 if (suspended) {
33981 int csr5;
33982 /* clear SUSPEND (SPND) - CSR5 bit 0 */
33983- csr5 = lp->a.read_csr(ioaddr, CSR5);
33984- lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
33985+ csr5 = lp->a->read_csr(ioaddr, CSR5);
33986+ lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
33987 } else {
33988- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
33989+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
33990 pcnet32_restart(dev, CSR0_NORMAL);
33991 netif_wake_queue(dev);
33992 }
fe2de317 33993@@ -2648,8 +2648,8 @@ static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
15a11c5b
MT
33994 if (!lp->mii)
33995 return 0;
33996
33997- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
33998- val_out = lp->a.read_bcr(ioaddr, 34);
33999+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34000+ val_out = lp->a->read_bcr(ioaddr, 34);
34001
34002 return val_out;
34003 }
fe2de317 34004@@ -2663,8 +2663,8 @@ static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
15a11c5b
MT
34005 if (!lp->mii)
34006 return;
34007
34008- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34009- lp->a.write_bcr(ioaddr, 34, val);
34010+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34011+ lp->a->write_bcr(ioaddr, 34, val);
34012 }
34013
34014 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
fe2de317 34015@@ -2741,7 +2741,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
15a11c5b
MT
34016 curr_link = mii_link_ok(&lp->mii_if);
34017 } else {
34018 ulong ioaddr = dev->base_addr; /* card base I/O address */
34019- curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
34020+ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
34021 }
34022 if (!curr_link) {
34023 if (prev_link || verbose) {
fe2de317 34024@@ -2764,13 +2764,13 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
15a11c5b
MT
34025 (ecmd.duplex == DUPLEX_FULL)
34026 ? "full" : "half");
34027 }
34028- bcr9 = lp->a.read_bcr(dev->base_addr, 9);
34029+ bcr9 = lp->a->read_bcr(dev->base_addr, 9);
34030 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
34031 if (lp->mii_if.full_duplex)
34032 bcr9 |= (1 << 0);
34033 else
34034 bcr9 &= ~(1 << 0);
34035- lp->a.write_bcr(dev->base_addr, 9, bcr9);
34036+ lp->a->write_bcr(dev->base_addr, 9, bcr9);
34037 }
34038 } else {
34039 netif_info(lp, link, dev, "link up\n");
fe2de317
MT
34040diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
34041index edfa15d..002bfa9 100644
34042--- a/drivers/net/ppp_generic.c
34043+++ b/drivers/net/ppp_generic.c
34044@@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
15a11c5b
MT
34045 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
34046 struct ppp_stats stats;
34047 struct ppp_comp_stats cstats;
34048- char *vers;
34049
34050 switch (cmd) {
34051 case SIOCGPPPSTATS:
fe2de317 34052@@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
15a11c5b 34053 break;
66a7e928 34054
15a11c5b
MT
34055 case SIOCGPPPVER:
34056- vers = PPP_VERSION;
34057- if (copy_to_user(addr, vers, strlen(vers) + 1))
34058+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
34059 break;
34060 err = 0;
34061 break;
fe2de317
MT
34062diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
34063index 6d657ca..d1be94b 100644
34064--- a/drivers/net/r8169.c
34065+++ b/drivers/net/r8169.c
6e9df6a3 34066@@ -663,12 +663,12 @@ struct rtl8169_private {
15a11c5b
MT
34067 struct mdio_ops {
34068 void (*write)(void __iomem *, int, int);
34069 int (*read)(void __iomem *, int);
34070- } mdio_ops;
34071+ } __no_const mdio_ops;
34072
34073 struct pll_power_ops {
34074 void (*down)(struct rtl8169_private *);
34075 void (*up)(struct rtl8169_private *);
34076- } pll_power_ops;
34077+ } __no_const pll_power_ops;
34078
34079 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
34080 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
fe2de317
MT
34081diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
34082index 3c0f131..17f8b02 100644
34083--- a/drivers/net/sis190.c
34084+++ b/drivers/net/sis190.c
34085@@ -1624,7 +1624,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
6e9df6a3
MT
34086 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
34087 struct net_device *dev)
34088 {
34089- static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
34090+ static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
34091 struct sis190_private *tp = netdev_priv(dev);
34092 struct pci_dev *isa_bridge;
34093 u8 reg, tmp8;
fe2de317
MT
34094diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
34095index 4793df8..44c9849 100644
34096--- a/drivers/net/sundance.c
34097+++ b/drivers/net/sundance.c
6e9df6a3
MT
34098@@ -218,7 +218,7 @@ enum {
34099 struct pci_id_info {
34100 const char *name;
34101 };
34102-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
34103+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
34104 {"D-Link DFE-550TX FAST Ethernet Adapter"},
34105 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
34106 {"D-Link DFE-580TX 4 port Server Adapter"},
fe2de317
MT
34107diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
34108index 2ea456d..3ad9523 100644
34109--- a/drivers/net/tg3.h
34110+++ b/drivers/net/tg3.h
15a11c5b 34111@@ -134,6 +134,7 @@
58c5fc13
MT
34112 #define CHIPREV_ID_5750_A0 0x4000
34113 #define CHIPREV_ID_5750_A1 0x4001
34114 #define CHIPREV_ID_5750_A3 0x4003
34115+#define CHIPREV_ID_5750_C1 0x4201
34116 #define CHIPREV_ID_5750_C2 0x4202
34117 #define CHIPREV_ID_5752_A0_HW 0x5000
34118 #define CHIPREV_ID_5752_A0 0x6000
fe2de317
MT
34119diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
34120index 515f122..41dd273 100644
34121--- a/drivers/net/tokenring/abyss.c
34122+++ b/drivers/net/tokenring/abyss.c
34123@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
15a11c5b
MT
34124
34125 static int __init abyss_init (void)
34126 {
34127- abyss_netdev_ops = tms380tr_netdev_ops;
34128+ pax_open_kernel();
34129+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34130
34131- abyss_netdev_ops.ndo_open = abyss_open;
34132- abyss_netdev_ops.ndo_stop = abyss_close;
34133+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
34134+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
34135+ pax_close_kernel();
34136
34137 return pci_register_driver(&abyss_driver);
34138 }
fe2de317
MT
34139diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
34140index 6153cfd..cf69c1c 100644
34141--- a/drivers/net/tokenring/madgemc.c
34142+++ b/drivers/net/tokenring/madgemc.c
34143@@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver = {
15a11c5b
MT
34144
34145 static int __init madgemc_init (void)
34146 {
34147- madgemc_netdev_ops = tms380tr_netdev_ops;
34148- madgemc_netdev_ops.ndo_open = madgemc_open;
34149- madgemc_netdev_ops.ndo_stop = madgemc_close;
34150+ pax_open_kernel();
34151+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34152+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
34153+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
34154+ pax_close_kernel();
34155
34156 return mca_register_driver (&madgemc_driver);
34157 }
fe2de317
MT
34158diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
34159index 8d362e6..f91cc52 100644
34160--- a/drivers/net/tokenring/proteon.c
34161+++ b/drivers/net/tokenring/proteon.c
15a11c5b
MT
34162@@ -353,9 +353,11 @@ static int __init proteon_init(void)
34163 struct platform_device *pdev;
34164 int i, num = 0, err = 0;
34165
34166- proteon_netdev_ops = tms380tr_netdev_ops;
34167- proteon_netdev_ops.ndo_open = proteon_open;
34168- proteon_netdev_ops.ndo_stop = tms380tr_close;
34169+ pax_open_kernel();
34170+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34171+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
34172+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
34173+ pax_close_kernel();
34174
34175 err = platform_driver_register(&proteon_driver);
34176 if (err)
fe2de317
MT
34177diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
34178index 46db5c5..37c1536 100644
34179--- a/drivers/net/tokenring/skisa.c
34180+++ b/drivers/net/tokenring/skisa.c
15a11c5b
MT
34181@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
34182 struct platform_device *pdev;
34183 int i, num = 0, err = 0;
34184
34185- sk_isa_netdev_ops = tms380tr_netdev_ops;
34186- sk_isa_netdev_ops.ndo_open = sk_isa_open;
34187- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
34188+ pax_open_kernel();
34189+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34190+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
34191+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
34192+ pax_close_kernel();
34193
34194 err = platform_driver_register(&sk_isa_driver);
34195 if (err)
fe2de317
MT
34196diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
34197index ce90efc..2676f89 100644
34198--- a/drivers/net/tulip/de2104x.c
34199+++ b/drivers/net/tulip/de2104x.c
34200@@ -1795,6 +1795,8 @@ static void __devinit de21041_get_srom_info (struct de_private *de)
66a7e928
MT
34201 struct de_srom_info_leaf *il;
34202 void *bufp;
34203
34204+ pax_track_stack();
34205+
34206 /* download entire eeprom */
34207 for (i = 0; i < DE_EEPROM_WORDS; i++)
34208 ((__le16 *)ee_data)[i] =
fe2de317
MT
34209diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
34210index 959b410..c97fac2 100644
34211--- a/drivers/net/tulip/de4x5.c
34212+++ b/drivers/net/tulip/de4x5.c
34213@@ -5397,7 +5397,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
ae4e228f
MT
34214 for (i=0; i<ETH_ALEN; i++) {
34215 tmp.addr[i] = dev->dev_addr[i];
34216 }
34217- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
bc901d79 34218+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
ae4e228f
MT
34219 break;
34220
34221 case DE4X5_SET_HWADDR: /* Set the hardware address */
fe2de317 34222@@ -5437,7 +5437,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
ae4e228f
MT
34223 spin_lock_irqsave(&lp->lock, flags);
34224 memcpy(&statbuf, &lp->pktStats, ioc->len);
34225 spin_unlock_irqrestore(&lp->lock, flags);
34226- if (copy_to_user(ioc->data, &statbuf, ioc->len))
bc901d79 34227+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
ae4e228f
MT
34228 return -EFAULT;
34229 break;
34230 }
fe2de317
MT
34231diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
34232index fa5eee9..e074432 100644
34233--- a/drivers/net/tulip/eeprom.c
34234+++ b/drivers/net/tulip/eeprom.c
34235@@ -81,7 +81,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
6e9df6a3
MT
34236 {NULL}};
34237
34238
34239-static const char *block_name[] __devinitdata = {
34240+static const char *block_name[] __devinitconst = {
34241 "21140 non-MII",
34242 "21140 MII PHY",
34243 "21142 Serial PHY",
fe2de317
MT
34244diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
34245index 862eadf..3eee1e6 100644
34246--- a/drivers/net/tulip/winbond-840.c
34247+++ b/drivers/net/tulip/winbond-840.c
6e9df6a3
MT
34248@@ -236,7 +236,7 @@ struct pci_id_info {
34249 int drv_flags; /* Driver use, intended as capability flags. */
34250 };
34251
34252-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
34253+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
34254 { /* Sometime a Level-One switch card. */
34255 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
34256 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
fe2de317
MT
34257diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
34258index 304fe78..db112fa 100644
34259--- a/drivers/net/usb/hso.c
34260+++ b/drivers/net/usb/hso.c
c52201e0
MT
34261@@ -71,7 +71,7 @@
34262 #include <asm/byteorder.h>
34263 #include <linux/serial_core.h>
34264 #include <linux/serial.h>
34265-
34266+#include <asm/local.h>
34267
34268 #define MOD_AUTHOR "Option Wireless"
34269 #define MOD_DESCRIPTION "USB High Speed Option driver"
6892158b 34270@@ -257,7 +257,7 @@ struct hso_serial {
58c5fc13
MT
34271
34272 /* from usb_serial_port */
34273 struct tty_struct *tty;
34274- int open_count;
c52201e0 34275+ local_t open_count;
58c5fc13
MT
34276 spinlock_t serial_lock;
34277
34278 int (*write_data) (struct hso_serial *serial);
fe2de317 34279@@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
58c5fc13
MT
34280 struct urb *urb;
34281
34282 urb = serial->rx_urb[0];
34283- if (serial->open_count > 0) {
c52201e0 34284+ if (local_read(&serial->open_count) > 0) {
58c5fc13
MT
34285 count = put_rxbuf_data(urb, serial);
34286 if (count == -1)
34287 return;
fe2de317 34288@@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
58c5fc13
MT
34289 DUMP1(urb->transfer_buffer, urb->actual_length);
34290
34291 /* Anyone listening? */
34292- if (serial->open_count == 0)
c52201e0 34293+ if (local_read(&serial->open_count) == 0)
58c5fc13
MT
34294 return;
34295
34296 if (status == 0) {
fe2de317 34297@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
58c5fc13
MT
34298 spin_unlock_irq(&serial->serial_lock);
34299
34300 /* check for port already opened, if not set the termios */
34301- serial->open_count++;
34302- if (serial->open_count == 1) {
c52201e0 34303+ if (local_inc_return(&serial->open_count) == 1) {
58c5fc13
MT
34304 serial->rx_state = RX_IDLE;
34305 /* Force default termio settings */
57199397 34306 _hso_serial_set_termios(tty, NULL);
fe2de317 34307@@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
58c5fc13
MT
34308 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
34309 if (result) {
34310 hso_stop_serial_device(serial->parent);
34311- serial->open_count--;
c52201e0 34312+ local_dec(&serial->open_count);
58c5fc13
MT
34313 kref_put(&serial->parent->ref, hso_serial_ref_free);
34314 }
34315 } else {
fe2de317 34316@@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
58c5fc13
MT
34317
34318 /* reset the rts and dtr */
34319 /* do the actual close */
34320- serial->open_count--;
c52201e0 34321+ local_dec(&serial->open_count);
ae4e228f 34322
58c5fc13
MT
34323- if (serial->open_count <= 0) {
34324- serial->open_count = 0;
c52201e0
MT
34325+ if (local_read(&serial->open_count) <= 0) {
34326+ local_set(&serial->open_count, 0);
58c5fc13
MT
34327 spin_lock_irq(&serial->serial_lock);
34328 if (serial->tty == tty) {
34329 serial->tty->driver_data = NULL;
fe2de317 34330@@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
58c5fc13
MT
34331
34332 /* the actual setup */
34333 spin_lock_irqsave(&serial->serial_lock, flags);
34334- if (serial->open_count)
c52201e0 34335+ if (local_read(&serial->open_count))
58c5fc13
MT
34336 _hso_serial_set_termios(tty, old);
34337 else
34338 tty->termios = old;
fe2de317 34339@@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *urb)
ae4e228f
MT
34340 D1("Pending read interrupt on port %d\n", i);
34341 spin_lock(&serial->serial_lock);
34342 if (serial->rx_state == RX_IDLE &&
34343- serial->open_count > 0) {
c52201e0 34344+ local_read(&serial->open_count) > 0) {
ae4e228f
MT
34345 /* Setup and send a ctrl req read on
34346 * port i */
34347 if (!serial->rx_urb_filled[0]) {
fe2de317 34348@@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interface *iface)
58c5fc13
MT
34349 /* Start all serial ports */
34350 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
34351 if (serial_table[i] && (serial_table[i]->interface == iface)) {
34352- if (dev2ser(serial_table[i])->open_count) {
c52201e0 34353+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
58c5fc13
MT
34354 result =
34355 hso_start_serial_device(serial_table[i], GFP_NOIO);
34356 hso_kick_transmit(dev2ser(serial_table[i]));
fe2de317
MT
34357diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
34358index 27400ed..c796e05 100644
34359--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
34360+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
34361@@ -601,8 +601,7 @@ vmxnet3_set_rss_indir(struct net_device *netdev,
71d190be
MT
34362 * Return with error code if any of the queue indices
34363 * is out of range
34364 */
34365- if (p->ring_index[i] < 0 ||
34366- p->ring_index[i] >= adapter->num_rx_queues)
34367+ if (p->ring_index[i] >= adapter->num_rx_queues)
34368 return -EINVAL;
34369 }
34370
fe2de317
MT
34371diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
34372index dd36258..e47fd31 100644
34373--- a/drivers/net/vxge/vxge-config.h
34374+++ b/drivers/net/vxge/vxge-config.h
6e9df6a3 34375@@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
15a11c5b
MT
34376 void (*link_down)(struct __vxge_hw_device *devh);
34377 void (*crit_err)(struct __vxge_hw_device *devh,
34378 enum vxge_hw_event type, u64 ext_data);
34379-};
34380+} __no_const;
66a7e928 34381
15a11c5b
MT
34382 /*
34383 * struct __vxge_hw_blockpool_entry - Block private data structure
fe2de317
MT
34384diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
34385index 178348a2..18bb433 100644
34386--- a/drivers/net/vxge/vxge-main.c
34387+++ b/drivers/net/vxge/vxge-main.c
34388@@ -100,6 +100,8 @@ static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
66a7e928
MT
34389 struct sk_buff *completed[NR_SKB_COMPLETED];
34390 int more;
34391
34392+ pax_track_stack();
34393+
34394 do {
34395 more = 0;
34396 skb_ptr = completed;
fe2de317 34397@@ -1915,6 +1917,8 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
66a7e928
MT
34398 u8 mtable[256] = {0}; /* CPU to vpath mapping */
34399 int index;
34400
34401+ pax_track_stack();
34402+
34403 /*
34404 * Filling
34405 * - itable with bucket numbers
fe2de317
MT
34406diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
34407index 4a518a3..936b334 100644
34408--- a/drivers/net/vxge/vxge-traffic.h
34409+++ b/drivers/net/vxge/vxge-traffic.h
15a11c5b
MT
34410@@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
34411 struct vxge_hw_mempool_dma *dma_object,
34412 u32 index,
34413 u32 is_last);
34414-};
34415+} __no_const;
34416
34417 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
34418 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
fe2de317
MT
34419diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
34420index 56aeb01..547f71f 100644
34421--- a/drivers/net/wan/hdlc_x25.c
34422+++ b/drivers/net/wan/hdlc_x25.c
34423@@ -134,16 +134,16 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
15a11c5b
MT
34424
34425 static int x25_open(struct net_device *dev)
34426 {
34427- struct lapb_register_struct cb;
34428+ static struct lapb_register_struct cb = {
34429+ .connect_confirmation = x25_connected,
34430+ .connect_indication = x25_connected,
34431+ .disconnect_confirmation = x25_disconnected,
34432+ .disconnect_indication = x25_disconnected,
34433+ .data_indication = x25_data_indication,
34434+ .data_transmit = x25_data_transmit
34435+ };
34436 int result;
34437
34438- cb.connect_confirmation = x25_connected;
34439- cb.connect_indication = x25_connected;
34440- cb.disconnect_confirmation = x25_disconnected;
34441- cb.disconnect_indication = x25_disconnected;
34442- cb.data_indication = x25_data_indication;
34443- cb.data_transmit = x25_data_transmit;
34444-
34445 result = lapb_register(dev, &cb);
34446 if (result != LAPB_OK)
34447 return result;
fe2de317
MT
34448diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c
34449index 1fda46c..f2858f2 100644
34450--- a/drivers/net/wimax/i2400m/usb-fw.c
34451+++ b/drivers/net/wimax/i2400m/usb-fw.c
34452@@ -287,6 +287,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *i2400m,
66a7e928
MT
34453 int do_autopm = 1;
34454 DECLARE_COMPLETION_ONSTACK(notif_completion);
34455
34456+ pax_track_stack();
34457+
34458 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
34459 i2400m, ack, ack_size);
34460 BUG_ON(_ack == i2400m->bm_ack_buf);
fe2de317
MT
34461diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
34462index e1b3e3c..e413f18 100644
34463--- a/drivers/net/wireless/airo.c
34464+++ b/drivers/net/wireless/airo.c
34465@@ -3003,6 +3003,8 @@ static void airo_process_scan_results (struct airo_info *ai) {
66a7e928
MT
34466 BSSListElement * loop_net;
34467 BSSListElement * tmp_net;
34468
34469+ pax_track_stack();
34470+
34471 /* Blow away current list of scan results */
34472 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
34473 list_move_tail (&loop_net->list, &ai->network_free_list);
fe2de317 34474@@ -3794,6 +3796,8 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
66a7e928
MT
34475 WepKeyRid wkr;
34476 int rc;
34477
34478+ pax_track_stack();
34479+
34480 memset( &mySsid, 0, sizeof( mySsid ) );
34481 kfree (ai->flash);
34482 ai->flash = NULL;
fe2de317 34483@@ -4753,6 +4757,8 @@ static int proc_stats_rid_open( struct inode *inode,
66a7e928
MT
34484 __le32 *vals = stats.vals;
34485 int len;
34486
34487+ pax_track_stack();
34488+
34489 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
34490 return -ENOMEM;
34491 data = file->private_data;
fe2de317 34492@@ -5476,6 +5482,8 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
66a7e928
MT
34493 /* If doLoseSync is not 1, we won't do a Lose Sync */
34494 int doLoseSync = -1;
34495
34496+ pax_track_stack();
34497+
34498 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
34499 return -ENOMEM;
34500 data = file->private_data;
fe2de317 34501@@ -7181,6 +7189,8 @@ static int airo_get_aplist(struct net_device *dev,
66a7e928
MT
34502 int i;
34503 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
34504
34505+ pax_track_stack();
34506+
34507 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
34508 if (!qual)
34509 return -ENOMEM;
fe2de317 34510@@ -7741,6 +7751,8 @@ static void airo_read_wireless_stats(struct airo_info *local)
66a7e928
MT
34511 CapabilityRid cap_rid;
34512 __le32 *vals = stats_rid.vals;
34513
34514+ pax_track_stack();
34515+
34516 /* Get stats out of the card */
34517 clear_bit(JOB_WSTATS, &local->jobs);
34518 if (local->power.event) {
fe2de317
MT
34519diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
34520index 17c4b56..00d836f 100644
34521--- a/drivers/net/wireless/ath/ath.h
34522+++ b/drivers/net/wireless/ath/ath.h
34523@@ -121,6 +121,7 @@ struct ath_ops {
34524 void (*write_flush) (void *);
34525 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
34526 };
34527+typedef struct ath_ops __no_const ath_ops_no_const;
34528
34529 struct ath_common;
34530 struct ath_bus_ops;
34531diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
34532index ccca724..7afbadc 100644
34533--- a/drivers/net/wireless/ath/ath5k/debug.c
34534+++ b/drivers/net/wireless/ath/ath5k/debug.c
34535@@ -203,6 +203,8 @@ static ssize_t read_file_beacon(struct file *file, char __user *user_buf,
66a7e928
MT
34536 unsigned int v;
34537 u64 tsf;
34538
34539+ pax_track_stack();
34540+
6e9df6a3
MT
34541 v = ath5k_hw_reg_read(ah, AR5K_BEACON);
34542 len += snprintf(buf + len, sizeof(buf) - len,
66a7e928 34543 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
fe2de317 34544@@ -321,6 +323,8 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
66a7e928
MT
34545 unsigned int len = 0;
34546 unsigned int i;
34547
34548+ pax_track_stack();
34549+
6e9df6a3
MT
34550 len += snprintf(buf + len, sizeof(buf) - len,
34551 "DEBUG LEVEL: 0x%08x\n\n", ah->debug.level);
66a7e928 34552
fe2de317 34553@@ -492,6 +496,8 @@ static ssize_t read_file_misc(struct file *file, char __user *user_buf,
66a7e928 34554 unsigned int len = 0;
6e9df6a3 34555 u32 filt = ath5k_hw_get_rx_filter(ah);
66a7e928
MT
34556
34557+ pax_track_stack();
34558+
6e9df6a3
MT
34559 len += snprintf(buf + len, sizeof(buf) - len, "bssid-mask: %pM\n",
34560 ah->bssidmask);
34561 len += snprintf(buf + len, sizeof(buf) - len, "filter-flags: 0x%x ",
fe2de317 34562@@ -548,6 +554,8 @@ static ssize_t read_file_frameerrors(struct file *file, char __user *user_buf,
66a7e928
MT
34563 unsigned int len = 0;
34564 int i;
34565
34566+ pax_track_stack();
34567+
6e9df6a3 34568 len += snprintf(buf + len, sizeof(buf) - len,
66a7e928 34569 "RX\n---------------------\n");
6e9df6a3 34570 len += snprintf(buf + len, sizeof(buf) - len, "CRC\t%u\t(%u%%)\n",
fe2de317 34571@@ -665,6 +673,8 @@ static ssize_t read_file_ani(struct file *file, char __user *user_buf,
66a7e928
MT
34572 char buf[700];
34573 unsigned int len = 0;
34574
34575+ pax_track_stack();
34576+
6e9df6a3 34577 len += snprintf(buf + len, sizeof(buf) - len,
66a7e928 34578 "HW has PHY error counters:\t%s\n",
6e9df6a3 34579 ah->ah_capabilities.cap_has_phyerr_counters ?
fe2de317 34580@@ -829,6 +839,8 @@ static ssize_t read_file_queue(struct file *file, char __user *user_buf,
66a7e928
MT
34581 struct ath5k_buf *bf, *bf0;
34582 int i, n;
34583
34584+ pax_track_stack();
34585+
6e9df6a3
MT
34586 len += snprintf(buf + len, sizeof(buf) - len,
34587 "available txbuffers: %d\n", ah->txbuf_len);
66a7e928 34588
fe2de317
MT
34589diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
34590index 7c2aaad..ad14dee 100644
34591--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
34592+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
34593@@ -758,6 +758,8 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah)
15a11c5b 34594 int i, im, j;
66a7e928
MT
34595 int nmeasurement;
34596
34597+ pax_track_stack();
34598+
34599 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
34600 if (ah->txchainmask & (1 << i))
34601 num_chains++;
fe2de317
MT
34602diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
34603index f80d1d6..08b773d 100644
34604--- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
34605+++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
34606@@ -406,6 +406,8 @@ static bool create_pa_curve(u32 *data_L, u32 *data_U, u32 *pa_table, u16 *gain)
66a7e928
MT
34607 int theta_low_bin = 0;
34608 int i;
34609
34610+ pax_track_stack();
34611+
34612 /* disregard any bin that contains <= 16 samples */
34613 thresh_accum_cnt = 16;
34614 scale_factor = 5;
fe2de317
MT
34615diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
34616index d1eb896..8b67cd4 100644
34617--- a/drivers/net/wireless/ath/ath9k/debug.c
34618+++ b/drivers/net/wireless/ath/ath9k/debug.c
34619@@ -387,6 +387,8 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
66a7e928
MT
34620 char buf[512];
34621 unsigned int len = 0;
34622
34623+ pax_track_stack();
34624+
34625 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
34626 len += snprintf(buf + len, sizeof(buf) - len,
34627 "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
fe2de317 34628@@ -477,6 +479,8 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
66a7e928
MT
34629 u8 addr[ETH_ALEN];
34630 u32 tmp;
34631
34632+ pax_track_stack();
34633+
34634 len += snprintf(buf + len, sizeof(buf) - len,
34635 "%s (chan=%d center-freq: %d MHz channel-type: %d (%s))\n",
34636 wiphy_name(sc->hw->wiphy),
fe2de317
MT
34637diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
34638index d3ff33c..309398e 100644
34639--- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
34640+++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
34641@@ -31,6 +31,8 @@ static ssize_t read_file_tgt_int_stats(struct file *file, char __user *user_buf,
15a11c5b
MT
34642 unsigned int len = 0;
34643 int ret = 0;
34644
34645+ pax_track_stack();
34646+
34647 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
34648
34649 ath9k_htc_ps_wakeup(priv);
fe2de317 34650@@ -89,6 +91,8 @@ static ssize_t read_file_tgt_tx_stats(struct file *file, char __user *user_buf,
66a7e928
MT
34651 unsigned int len = 0;
34652 int ret = 0;
34653
34654+ pax_track_stack();
34655+
34656 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
34657
15a11c5b 34658 ath9k_htc_ps_wakeup(priv);
fe2de317 34659@@ -159,6 +163,8 @@ static ssize_t read_file_tgt_rx_stats(struct file *file, char __user *user_buf,
15a11c5b
MT
34660 unsigned int len = 0;
34661 int ret = 0;
34662
34663+ pax_track_stack();
34664+
34665 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
34666
34667 ath9k_htc_ps_wakeup(priv);
fe2de317 34668@@ -203,6 +209,8 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
66a7e928
MT
34669 char buf[512];
34670 unsigned int len = 0;
34671
34672+ pax_track_stack();
34673+
34674 len += snprintf(buf + len, sizeof(buf) - len,
34675 "%20s : %10u\n", "Buffers queued",
34676 priv->debug.tx_stats.buf_queued);
fe2de317 34677@@ -376,6 +384,8 @@ static ssize_t read_file_slot(struct file *file, char __user *user_buf,
66a7e928
MT
34678 char buf[512];
34679 unsigned int len = 0;
34680
34681+ pax_track_stack();
34682+
15a11c5b
MT
34683 spin_lock_bh(&priv->tx.tx_lock);
34684
34685 len += snprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");
fe2de317 34686@@ -411,6 +421,8 @@ static ssize_t read_file_queue(struct file *file, char __user *user_buf,
15a11c5b
MT
34687 char buf[512];
34688 unsigned int len = 0;
34689
34690+ pax_track_stack();
34691+
34692 len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
34693 "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue));
34694
fe2de317
MT
34695diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
34696index c798890..c19a8fb 100644
34697--- a/drivers/net/wireless/ath/ath9k/hw.h
34698+++ b/drivers/net/wireless/ath/ath9k/hw.h
6e9df6a3 34699@@ -588,7 +588,7 @@ struct ath_hw_private_ops {
15a11c5b
MT
34700
34701 /* ANI */
34702 void (*ani_cache_ini_regs)(struct ath_hw *ah);
34703-};
34704+} __no_const;
34705
34706 /**
34707 * struct ath_hw_ops - callbacks used by hardware code and driver code
6e9df6a3 34708@@ -639,7 +639,7 @@ struct ath_hw_ops {
15a11c5b
MT
34709 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
34710 struct ath_hw_antcomb_conf *antconf);
34711
34712-};
34713+} __no_const;
34714
34715 struct ath_nf_limits {
34716 s16 max;
6e9df6a3 34717@@ -652,7 +652,7 @@ struct ath_nf_limits {
15a11c5b
MT
34718 #define AH_UNPLUGGED 0x2 /* The card has been physically removed. */
34719
34720 struct ath_hw {
34721- struct ath_ops reg_ops;
34722+ ath_ops_no_const reg_ops;
34723
34724 struct ieee80211_hw *hw;
34725 struct ath_common common;
fe2de317
MT
34726diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
34727index ef9ad79..f5f8d80 100644
34728--- a/drivers/net/wireless/ipw2x00/ipw2100.c
34729+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
34730@@ -2102,6 +2102,8 @@ static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid,
66a7e928
MT
34731 int err;
34732 DECLARE_SSID_BUF(ssid);
34733
34734+ pax_track_stack();
34735+
34736 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
34737
34738 if (ssid_len)
fe2de317 34739@@ -5451,6 +5453,8 @@ static int ipw2100_set_key(struct ipw2100_priv *priv,
66a7e928
MT
34740 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
34741 int err;
34742
34743+ pax_track_stack();
34744+
34745 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
34746 idx, keylen, len);
34747
fe2de317
MT
34748diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
34749index 32a9966..de69787 100644
34750--- a/drivers/net/wireless/ipw2x00/libipw_rx.c
34751+++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
34752@@ -1565,6 +1565,8 @@ static void libipw_process_probe_response(struct libipw_device
66a7e928
MT
34753 unsigned long flags;
34754 DECLARE_SSID_BUF(ssid);
34755
34756+ pax_track_stack();
34757+
34758 LIBIPW_DEBUG_SCAN("'%s' (%pM"
34759 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
34760 print_ssid(ssid, info_element->data, info_element->len),
fe2de317
MT
34761diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
34762index 66ee1562..b90412b 100644
34763--- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
34764+++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
34765@@ -3687,7 +3687,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
15a11c5b
MT
34766 */
34767 if (iwl3945_mod_params.disable_hw_scan) {
34768 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
34769- iwl3945_hw_ops.hw_scan = NULL;
34770+ pax_open_kernel();
34771+ *(void **)&iwl3945_hw_ops.hw_scan = NULL;
34772+ pax_close_kernel();
66a7e928 34773 }
66a7e928 34774
15a11c5b 34775 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
fe2de317
MT
34776diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
34777index 3789ff4..22ab151 100644
34778--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
34779+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
34780@@ -920,6 +920,8 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
66a7e928
MT
34781 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
34782 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
34783
34784+ pax_track_stack();
34785+
34786 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
34787
34788 /* Treat uninitialized rate scaling data same as non-existing. */
fe2de317 34789@@ -2931,6 +2933,8 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
66a7e928
MT
34790 container_of(lq_sta, struct iwl_station_priv, lq_sta);
34791 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
34792
34793+ pax_track_stack();
34794+
34795 /* Override starting rate (index 0) if needed for debug purposes */
34796 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
34797
fe2de317
MT
34798diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
34799index f9a407e..a6f2bb7 100644
34800--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
34801+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
34802@@ -68,8 +68,8 @@ do { \
34803 } while (0)
34804
34805 #else
34806-#define IWL_DEBUG(__priv, level, fmt, args...)
34807-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
34808+#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
34809+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
34810 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
34811 const void *p, u32 len)
34812 {}
34813diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
34814index ec1485b..900c3bd 100644
34815--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
34816+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
34817@@ -561,6 +561,8 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
66a7e928
MT
34818 int pos = 0;
34819 const size_t bufsz = sizeof(buf);
34820
34821+ pax_track_stack();
34822+
34823 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
34824 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
34825 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
fe2de317 34826@@ -693,6 +695,8 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
66a7e928
MT
34827 char buf[256 * NUM_IWL_RXON_CTX];
34828 const size_t bufsz = sizeof(buf);
34829
34830+ pax_track_stack();
34831+
34832 for_each_context(priv, ctx) {
34833 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
34834 ctx->ctxid);
fe2de317
MT
34835diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c
34836index 0a0cc96..fd49ad8 100644
34837--- a/drivers/net/wireless/iwmc3200wifi/debugfs.c
34838+++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c
34839@@ -327,6 +327,8 @@ static ssize_t iwm_debugfs_fw_err_read(struct file *filp,
66a7e928
MT
34840 int buf_len = 512;
34841 size_t len = 0;
34842
34843+ pax_track_stack();
34844+
34845 if (*ppos != 0)
34846 return 0;
34847 if (count < sizeof(buf))
fe2de317
MT
34848diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
34849index 031cd89..bdc8435 100644
34850--- a/drivers/net/wireless/mac80211_hwsim.c
34851+++ b/drivers/net/wireless/mac80211_hwsim.c
34852@@ -1670,9 +1670,11 @@ static int __init init_mac80211_hwsim(void)
15a11c5b 34853 return -EINVAL;
66a7e928 34854
15a11c5b
MT
34855 if (fake_hw_scan) {
34856- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
34857- mac80211_hwsim_ops.sw_scan_start = NULL;
34858- mac80211_hwsim_ops.sw_scan_complete = NULL;
34859+ pax_open_kernel();
34860+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
34861+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
34862+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
34863+ pax_close_kernel();
34864 }
ae4e228f 34865
15a11c5b 34866 spin_lock_init(&hwsim_radio_lock);
fe2de317
MT
34867diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
34868index 2215c3c..64e6a47 100644
34869--- a/drivers/net/wireless/mwifiex/main.h
34870+++ b/drivers/net/wireless/mwifiex/main.h
6e9df6a3
MT
34871@@ -560,7 +560,7 @@ struct mwifiex_if_ops {
34872
34873 void (*update_mp_end_port) (struct mwifiex_adapter *, u16);
34874 void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
34875-};
34876+} __no_const;
34877
34878 struct mwifiex_adapter {
34879 struct mwifiex_private *priv[MWIFIEX_MAX_BSS_NUM];
fe2de317
MT
34880diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
34881index 29f9389..f6d2ce0 100644
34882--- a/drivers/net/wireless/rndis_wlan.c
34883+++ b/drivers/net/wireless/rndis_wlan.c
34884@@ -1277,7 +1277,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
df50ba0c
MT
34885
34886 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
34887
34888- if (rts_threshold < 0 || rts_threshold > 2347)
34889+ if (rts_threshold > 2347)
34890 rts_threshold = 2347;
34891
34892 tmp = cpu_to_le32(rts_threshold);
fe2de317
MT
34893diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
34894index 3b11642..d6bb049 100644
34895--- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
34896+++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
34897@@ -837,6 +837,8 @@ bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
66a7e928
MT
34898 u8 rfpath;
34899 u8 num_total_rfpath = rtlphy->num_total_rfpath;
34900
34901+ pax_track_stack();
34902+
34903 precommoncmdcnt = 0;
34904 _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
34905 MAX_PRECMD_CNT,
fe2de317
MT
34906diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
34907index a77f1bb..c608b2b 100644
34908--- a/drivers/net/wireless/wl1251/wl1251.h
34909+++ b/drivers/net/wireless/wl1251/wl1251.h
15a11c5b
MT
34910@@ -266,7 +266,7 @@ struct wl1251_if_operations {
34911 void (*reset)(struct wl1251 *wl);
34912 void (*enable_irq)(struct wl1251 *wl);
34913 void (*disable_irq)(struct wl1251 *wl);
34914-};
34915+} __no_const;
34916
34917 struct wl1251 {
34918 struct ieee80211_hw *hw;
fe2de317
MT
34919diff --git a/drivers/net/wireless/wl12xx/spi.c b/drivers/net/wireless/wl12xx/spi.c
34920index e0b3736..4b466e6 100644
34921--- a/drivers/net/wireless/wl12xx/spi.c
34922+++ b/drivers/net/wireless/wl12xx/spi.c
34923@@ -281,6 +281,8 @@ static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
66a7e928
MT
34924 u32 chunk_len;
34925 int i;
34926
34927+ pax_track_stack();
34928+
34929 WARN_ON(len > WL1271_AGGR_BUFFER_SIZE);
34930
34931 spi_message_init(&m);
fe2de317
MT
34932diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
34933index f34b5b2..b5abb9f 100644
34934--- a/drivers/oprofile/buffer_sync.c
34935+++ b/drivers/oprofile/buffer_sync.c
34936@@ -343,7 +343,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
58c5fc13
MT
34937 if (cookie == NO_COOKIE)
34938 offset = pc;
34939 if (cookie == INVALID_COOKIE) {
34940- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
34941+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
34942 offset = pc;
34943 }
34944 if (cookie != last_cookie) {
fe2de317 34945@@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
58c5fc13
MT
34946 /* add userspace sample */
34947
34948 if (!mm) {
34949- atomic_inc(&oprofile_stats.sample_lost_no_mm);
34950+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
34951 return 0;
34952 }
34953
34954 cookie = lookup_dcookie(mm, s->eip, &offset);
34955
34956 if (cookie == INVALID_COOKIE) {
34957- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
34958+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
34959 return 0;
34960 }
34961
15a11c5b 34962@@ -563,7 +563,7 @@ void sync_buffer(int cpu)
58c5fc13
MT
34963 /* ignore backtraces if failed to add a sample */
34964 if (state == sb_bt_start) {
34965 state = sb_bt_ignore;
34966- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
34967+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
34968 }
34969 }
34970 release_mm(mm);
fe2de317
MT
34971diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
34972index dd87e86..bc0148c 100644
34973--- a/drivers/oprofile/event_buffer.c
34974+++ b/drivers/oprofile/event_buffer.c
34975@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
ae4e228f
MT
34976 }
34977
58c5fc13
MT
34978 if (buffer_pos == buffer_size) {
34979- atomic_inc(&oprofile_stats.event_lost_overflow);
34980+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
34981 return;
34982 }
34983
fe2de317
MT
34984diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
34985index f8c752e..28bf4fc 100644
34986--- a/drivers/oprofile/oprof.c
34987+++ b/drivers/oprofile/oprof.c
34988@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
ae4e228f
MT
34989 if (oprofile_ops.switch_events())
34990 return;
58c5fc13 34991
ae4e228f
MT
34992- atomic_inc(&oprofile_stats.multiplex_counter);
34993+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
34994 start_switch_worker();
34995 }
58c5fc13 34996
fe2de317
MT
34997diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
34998index 917d28e..d62d981 100644
34999--- a/drivers/oprofile/oprofile_stats.c
35000+++ b/drivers/oprofile/oprofile_stats.c
ae4e228f 35001@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
58c5fc13
MT
35002 cpu_buf->sample_invalid_eip = 0;
35003 }
35004
35005- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
35006- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
35007- atomic_set(&oprofile_stats.event_lost_overflow, 0);
35008- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
ae4e228f 35009- atomic_set(&oprofile_stats.multiplex_counter, 0);
58c5fc13
MT
35010+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
35011+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
35012+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
35013+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
ae4e228f 35014+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
58c5fc13
MT
35015 }
35016
35017
fe2de317
MT
35018diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
35019index 38b6fc0..b5cbfce 100644
35020--- a/drivers/oprofile/oprofile_stats.h
35021+++ b/drivers/oprofile/oprofile_stats.h
ae4e228f 35022@@ -13,11 +13,11 @@
6e9df6a3 35023 #include <linux/atomic.h>
58c5fc13
MT
35024
35025 struct oprofile_stat_struct {
35026- atomic_t sample_lost_no_mm;
35027- atomic_t sample_lost_no_mapping;
35028- atomic_t bt_lost_no_mapping;
35029- atomic_t event_lost_overflow;
ae4e228f 35030- atomic_t multiplex_counter;
58c5fc13
MT
35031+ atomic_unchecked_t sample_lost_no_mm;
35032+ atomic_unchecked_t sample_lost_no_mapping;
35033+ atomic_unchecked_t bt_lost_no_mapping;
35034+ atomic_unchecked_t event_lost_overflow;
ae4e228f 35035+ atomic_unchecked_t multiplex_counter;
58c5fc13
MT
35036 };
35037
35038 extern struct oprofile_stat_struct oprofile_stats;
fe2de317
MT
35039diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
35040index e9ff6f7..28e259a 100644
35041--- a/drivers/oprofile/oprofilefs.c
35042+++ b/drivers/oprofile/oprofilefs.c
35043@@ -186,7 +186,7 @@ static const struct file_operations atomic_ro_fops = {
35044
35045
35046 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
35047- char const *name, atomic_t *val)
35048+ char const *name, atomic_unchecked_t *val)
35049 {
35050 return __oprofilefs_create_file(sb, root, name,
35051 &atomic_ro_fops, 0444, val);
35052diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
35053index 3f56bc0..707d642 100644
35054--- a/drivers/parport/procfs.c
35055+++ b/drivers/parport/procfs.c
35056@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
ae4e228f
MT
35057
35058 *ppos += len;
35059
35060- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
bc901d79 35061+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
ae4e228f
MT
35062 }
35063
35064 #ifdef CONFIG_PARPORT_1284
fe2de317 35065@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
ae4e228f
MT
35066
35067 *ppos += len;
35068
35069- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
bc901d79 35070+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
ae4e228f
MT
35071 }
35072 #endif /* IEEE1284.3 support. */
35073
fe2de317
MT
35074diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
35075index 9fff878..ad0ad53 100644
35076--- a/drivers/pci/hotplug/cpci_hotplug.h
35077+++ b/drivers/pci/hotplug/cpci_hotplug.h
15a11c5b
MT
35078@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
35079 int (*hardware_test) (struct slot* slot, u32 value);
35080 u8 (*get_power) (struct slot* slot);
35081 int (*set_power) (struct slot* slot, int value);
35082-};
35083+} __no_const;
35084
35085 struct cpci_hp_controller {
35086 unsigned int irq;
fe2de317
MT
35087diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
35088index 76ba8a1..20ca857 100644
35089--- a/drivers/pci/hotplug/cpqphp_nvram.c
35090+++ b/drivers/pci/hotplug/cpqphp_nvram.c
35091@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
58c5fc13
MT
35092
35093 void compaq_nvram_init (void __iomem *rom_start)
35094 {
35095+
35096+#ifndef CONFIG_PAX_KERNEXEC
35097 if (rom_start) {
35098 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
35099 }
35100+#endif
35101+
35102 dbg("int15 entry = %p\n", compaq_int15_entry_point);
35103
35104 /* initialize our int15 lock */
fe2de317
MT
35105diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
35106index cbfbab1..6a9fced 100644
35107--- a/drivers/pci/pcie/aspm.c
35108+++ b/drivers/pci/pcie/aspm.c
16454cff
MT
35109@@ -27,9 +27,9 @@
35110 #define MODULE_PARAM_PREFIX "pcie_aspm."
35111
35112 /* Note: those are not register definitions */
35113-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
35114-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
35115-#define ASPM_STATE_L1 (4) /* L1 state */
35116+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
35117+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
35118+#define ASPM_STATE_L1 (4U) /* L1 state */
35119 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
35120 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
35121
fe2de317
MT
35122diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
35123index 6ab6bd3..72bdc69 100644
35124--- a/drivers/pci/probe.c
35125+++ b/drivers/pci/probe.c
35126@@ -136,7 +136,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
bc901d79
MT
35127 u32 l, sz, mask;
35128 u16 orig_cmd;
35129
35130- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
35131+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
35132
35133 if (!dev->mmio_always_on) {
35134 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
fe2de317
MT
35135diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
35136index 27911b5..5b6db88 100644
35137--- a/drivers/pci/proc.c
35138+++ b/drivers/pci/proc.c
35139@@ -476,7 +476,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
58c5fc13
MT
35140 static int __init pci_proc_init(void)
35141 {
35142 struct pci_dev *dev = NULL;
35143+
35144+#ifdef CONFIG_GRKERNSEC_PROC_ADD
35145+#ifdef CONFIG_GRKERNSEC_PROC_USER
35146+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
35147+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
35148+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
35149+#endif
35150+#else
35151 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
35152+#endif
35153 proc_create("devices", 0, proc_bus_pci_dir,
35154 &proc_bus_pci_dev_operations);
35155 proc_initialized = 1;
fe2de317
MT
35156diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
35157index 90832a9..419089a 100644
35158--- a/drivers/pci/xen-pcifront.c
35159+++ b/drivers/pci/xen-pcifront.c
35160@@ -187,6 +187,8 @@ static int pcifront_bus_read(struct pci_bus *bus, unsigned int devfn,
66a7e928
MT
35161 struct pcifront_sd *sd = bus->sysdata;
35162 struct pcifront_device *pdev = pcifront_get_pdev(sd);
35163
35164+ pax_track_stack();
35165+
35166 if (verbose_request)
35167 dev_info(&pdev->xdev->dev,
35168 "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
fe2de317 35169@@ -226,6 +228,8 @@ static int pcifront_bus_write(struct pci_bus *bus, unsigned int devfn,
66a7e928
MT
35170 struct pcifront_sd *sd = bus->sysdata;
35171 struct pcifront_device *pdev = pcifront_get_pdev(sd);
35172
35173+ pax_track_stack();
35174+
35175 if (verbose_request)
35176 dev_info(&pdev->xdev->dev,
35177 "write dev=%04x:%02x:%02x.%01x - "
fe2de317 35178@@ -258,6 +262,8 @@ static int pci_frontend_enable_msix(struct pci_dev *dev,
66a7e928
MT
35179 struct pcifront_device *pdev = pcifront_get_pdev(sd);
35180 struct msi_desc *entry;
35181
35182+ pax_track_stack();
35183+
35184 if (nvec > SH_INFO_MAX_VEC) {
35185 dev_err(&dev->dev, "too much vector for pci frontend: %x."
35186 " Increase SH_INFO_MAX_VEC.\n", nvec);
fe2de317 35187@@ -309,6 +315,8 @@ static void pci_frontend_disable_msix(struct pci_dev *dev)
66a7e928
MT
35188 struct pcifront_sd *sd = dev->bus->sysdata;
35189 struct pcifront_device *pdev = pcifront_get_pdev(sd);
35190
35191+ pax_track_stack();
35192+
35193 err = do_pci_op(pdev, &op);
35194
35195 /* What should do for error ? */
fe2de317 35196@@ -328,6 +336,8 @@ static int pci_frontend_enable_msi(struct pci_dev *dev, int vector[])
66a7e928
MT
35197 struct pcifront_sd *sd = dev->bus->sysdata;
35198 struct pcifront_device *pdev = pcifront_get_pdev(sd);
35199
35200+ pax_track_stack();
35201+
35202 err = do_pci_op(pdev, &op);
35203 if (likely(!err)) {
35204 vector[0] = op.value;
fe2de317
MT
35205diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
35206index 7bd829f..a3237ad 100644
35207--- a/drivers/platform/x86/thinkpad_acpi.c
35208+++ b/drivers/platform/x86/thinkpad_acpi.c
15a11c5b 35209@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
66a7e928
MT
35210 return 0;
35211 }
35212
15a11c5b
MT
35213-void static hotkey_mask_warn_incomplete_mask(void)
35214+static void hotkey_mask_warn_incomplete_mask(void)
66a7e928 35215 {
15a11c5b
MT
35216 /* log only what the user can fix... */
35217 const u32 wantedmask = hotkey_driver_mask &
fe2de317
MT
35218@@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
35219 }
35220 }
35221
35222-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35223- struct tp_nvram_state *newn,
35224- const u32 event_mask)
35225-{
35226-
35227 #define TPACPI_COMPARE_KEY(__scancode, __member) \
35228 do { \
35229 if ((event_mask & (1 << __scancode)) && \
35230@@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35231 tpacpi_hotkey_send_key(__scancode); \
35232 } while (0)
35233
35234- void issue_volchange(const unsigned int oldvol,
35235- const unsigned int newvol)
35236- {
35237- unsigned int i = oldvol;
35238+static void issue_volchange(const unsigned int oldvol,
35239+ const unsigned int newvol,
35240+ const u32 event_mask)
35241+{
35242+ unsigned int i = oldvol;
35243
35244- while (i > newvol) {
35245- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
35246- i--;
35247- }
35248- while (i < newvol) {
35249- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
35250- i++;
35251- }
35252+ while (i > newvol) {
35253+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
35254+ i--;
35255 }
35256+ while (i < newvol) {
35257+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
35258+ i++;
35259+ }
35260+}
35261
35262- void issue_brightnesschange(const unsigned int oldbrt,
35263- const unsigned int newbrt)
35264- {
35265- unsigned int i = oldbrt;
35266+static void issue_brightnesschange(const unsigned int oldbrt,
35267+ const unsigned int newbrt,
35268+ const u32 event_mask)
35269+{
35270+ unsigned int i = oldbrt;
35271
35272- while (i > newbrt) {
35273- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
35274- i--;
35275- }
35276- while (i < newbrt) {
35277- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
35278- i++;
35279- }
35280+ while (i > newbrt) {
35281+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
35282+ i--;
35283+ }
35284+ while (i < newbrt) {
35285+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
35286+ i++;
35287 }
35288+}
35289
35290+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35291+ struct tp_nvram_state *newn,
35292+ const u32 event_mask)
35293+{
35294 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
35295 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
35296 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
35297@@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35298 oldn->volume_level != newn->volume_level) {
35299 /* recently muted, or repeated mute keypress, or
35300 * multiple presses ending in mute */
35301- issue_volchange(oldn->volume_level, newn->volume_level);
35302+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
35303 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
35304 }
35305 } else {
35306@@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35307 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
35308 }
35309 if (oldn->volume_level != newn->volume_level) {
35310- issue_volchange(oldn->volume_level, newn->volume_level);
35311+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
35312 } else if (oldn->volume_toggle != newn->volume_toggle) {
35313 /* repeated vol up/down keypress at end of scale ? */
35314 if (newn->volume_level == 0)
35315@@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35316 /* handle brightness */
35317 if (oldn->brightness_level != newn->brightness_level) {
35318 issue_brightnesschange(oldn->brightness_level,
35319- newn->brightness_level);
35320+ newn->brightness_level,
35321+ event_mask);
35322 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
35323 /* repeated key presses that didn't change state */
35324 if (newn->brightness_level == 0)
35325@@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35326 && !tp_features.bright_unkfw)
35327 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
35328 }
35329+}
35330
35331 #undef TPACPI_COMPARE_KEY
35332 #undef TPACPI_MAY_SEND_KEY
35333-}
35334
35335 /*
35336 * Polling driver
35337diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
35338index b859d16..5cc6b1a 100644
35339--- a/drivers/pnp/pnpbios/bioscalls.c
35340+++ b/drivers/pnp/pnpbios/bioscalls.c
df50ba0c 35341@@ -59,7 +59,7 @@ do { \
ae4e228f 35342 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
58c5fc13
MT
35343 } while(0)
35344
ae4e228f
MT
35345-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
35346+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
35347 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
58c5fc13
MT
35348
35349 /*
fe2de317 35350@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
58c5fc13
MT
35351
35352 cpu = get_cpu();
35353 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
35354+
ae4e228f 35355+ pax_open_kernel();
58c5fc13 35356 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
ae4e228f 35357+ pax_close_kernel();
58c5fc13 35358
58c5fc13
MT
35359 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
35360 spin_lock_irqsave(&pnp_bios_lock, flags);
fe2de317 35361@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
58c5fc13
MT
35362 :"memory");
35363 spin_unlock_irqrestore(&pnp_bios_lock, flags);
35364
ae4e228f 35365+ pax_open_kernel();
58c5fc13 35366 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
ae4e228f 35367+ pax_close_kernel();
58c5fc13
MT
35368+
35369 put_cpu();
35370
35371 /* If we get here and this is set then the PnP BIOS faulted on us. */
fe2de317 35372@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
58c5fc13
MT
35373 return status;
35374 }
35375
35376-void pnpbios_calls_init(union pnp_bios_install_struct *header)
35377+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
35378 {
35379 int i;
35380
fe2de317 35381@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
58c5fc13
MT
35382 pnp_bios_callpoint.offset = header->fields.pm16offset;
35383 pnp_bios_callpoint.segment = PNP_CS16;
35384
ae4e228f 35385+ pax_open_kernel();
58c5fc13 35386+
ae4e228f
MT
35387 for_each_possible_cpu(i) {
35388 struct desc_struct *gdt = get_cpu_gdt_table(i);
35389 if (!gdt)
fe2de317 35390@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
ae4e228f
MT
35391 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
35392 (unsigned long)__va(header->fields.pm16dseg));
58c5fc13
MT
35393 }
35394+
ae4e228f 35395+ pax_close_kernel();
58c5fc13 35396 }
fe2de317
MT
35397diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
35398index b0ecacb..7c9da2e 100644
35399--- a/drivers/pnp/resource.c
35400+++ b/drivers/pnp/resource.c
35401@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
58c5fc13
MT
35402 return 1;
35403
35404 /* check if the resource is valid */
35405- if (*irq < 0 || *irq > 15)
35406+ if (*irq > 15)
35407 return 0;
35408
35409 /* check if the resource is reserved */
fe2de317 35410@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
58c5fc13
MT
35411 return 1;
35412
35413 /* check if the resource is valid */
35414- if (*dma < 0 || *dma == 4 || *dma > 7)
35415+ if (*dma == 4 || *dma > 7)
35416 return 0;
35417
35418 /* check if the resource is reserved */
fe2de317
MT
35419diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
35420index bb16f5b..c751eef 100644
35421--- a/drivers/power/bq27x00_battery.c
35422+++ b/drivers/power/bq27x00_battery.c
15a11c5b
MT
35423@@ -67,7 +67,7 @@
35424 struct bq27x00_device_info;
35425 struct bq27x00_access_methods {
35426 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
35427-};
35428+} __no_const;
35429
35430 enum bq27x00_chip { BQ27000, BQ27500 };
35431
fe2de317
MT
35432diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
35433index 33f5d9a..d957d3f 100644
35434--- a/drivers/regulator/max8660.c
35435+++ b/drivers/regulator/max8660.c
35436@@ -383,8 +383,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
15a11c5b
MT
35437 max8660->shadow_regs[MAX8660_OVER1] = 5;
35438 } else {
35439 /* Otherwise devices can be toggled via software */
35440- max8660_dcdc_ops.enable = max8660_dcdc_enable;
35441- max8660_dcdc_ops.disable = max8660_dcdc_disable;
35442+ pax_open_kernel();
35443+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
35444+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
35445+ pax_close_kernel();
35446 }
66a7e928 35447
15a11c5b 35448 /*
fe2de317
MT
35449diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
35450index 3285d41..ab7c22a 100644
35451--- a/drivers/regulator/mc13892-regulator.c
35452+++ b/drivers/regulator/mc13892-regulator.c
35453@@ -564,10 +564,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
15a11c5b
MT
35454 }
35455 mc13xxx_unlock(mc13892);
66a7e928 35456
15a11c5b
MT
35457- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
35458+ pax_open_kernel();
35459+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
35460 = mc13892_vcam_set_mode;
35461- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
35462+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
35463 = mc13892_vcam_get_mode;
35464+ pax_close_kernel();
35465 for (i = 0; i < pdata->num_regulators; i++) {
35466 init_data = &pdata->regulators[i];
35467 priv->regulators[i] = regulator_register(
fe2de317
MT
35468diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
35469index cace6d3..f623fda 100644
35470--- a/drivers/rtc/rtc-dev.c
35471+++ b/drivers/rtc/rtc-dev.c
bc901d79
MT
35472@@ -14,6 +14,7 @@
35473 #include <linux/module.h>
35474 #include <linux/rtc.h>
35475 #include <linux/sched.h>
35476+#include <linux/grsecurity.h>
35477 #include "rtc-core.h"
35478
35479 static dev_t rtc_devt;
fe2de317 35480@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
bc901d79
MT
35481 if (copy_from_user(&tm, uarg, sizeof(tm)))
35482 return -EFAULT;
35483
35484+ gr_log_timechange();
35485+
35486 return rtc_set_time(rtc, &tm);
35487
35488 case RTC_PIE_ON:
fe2de317
MT
35489diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
35490index f66c33b..7ae5823 100644
35491--- a/drivers/scsi/BusLogic.c
35492+++ b/drivers/scsi/BusLogic.c
35493@@ -962,6 +962,8 @@ static int __init BusLogic_InitializeFlashPointProbeInfo(struct BusLogic_HostAda
35494 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
35495 *PrototypeHostAdapter)
35496 {
35497+ pax_track_stack();
35498+
35499 /*
35500 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
35501 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
35502diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
35503index ffb5878..e6d785c 100644
35504--- a/drivers/scsi/aacraid/aacraid.h
35505+++ b/drivers/scsi/aacraid/aacraid.h
15a11c5b
MT
35506@@ -492,7 +492,7 @@ struct adapter_ops
35507 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
35508 /* Administrative operations */
35509 int (*adapter_comm)(struct aac_dev * dev, int comm);
35510-};
35511+} __no_const;
66a7e928
MT
35512
35513 /*
15a11c5b 35514 * Define which interrupt handler needs to be installed
fe2de317
MT
35515diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
35516index 8a0b330..b4286de 100644
35517--- a/drivers/scsi/aacraid/commctrl.c
35518+++ b/drivers/scsi/aacraid/commctrl.c
35519@@ -482,6 +482,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
66a7e928
MT
35520 u32 actual_fibsize64, actual_fibsize = 0;
35521 int i;
35522
35523+ pax_track_stack();
35524
35525 if (dev->in_reset) {
35526 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
fe2de317
MT
35527diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
35528index c7b6fed..4db0569 100644
35529--- a/drivers/scsi/aacraid/linit.c
35530+++ b/drivers/scsi/aacraid/linit.c
35531@@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
6e9df6a3
MT
35532 #elif defined(__devinitconst)
35533 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
35534 #else
35535-static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
35536+static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
35537 #endif
35538 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
35539 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
fe2de317
MT
35540diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
35541index d5ff142..49c0ebb 100644
35542--- a/drivers/scsi/aic94xx/aic94xx_init.c
35543+++ b/drivers/scsi/aic94xx/aic94xx_init.c
35544@@ -1012,7 +1012,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
6e9df6a3
MT
35545 .lldd_control_phy = asd_control_phy,
35546 };
35547
35548-static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
35549+static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
35550 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
35551 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
35552 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
fe2de317
MT
35553diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
35554index a796de9..1ef20e1 100644
35555--- a/drivers/scsi/bfa/bfa.h
35556+++ b/drivers/scsi/bfa/bfa.h
35557@@ -196,7 +196,7 @@ struct bfa_hwif_s {
35558 u32 *end);
35559 int cpe_vec_q0;
35560 int rme_vec_q0;
35561-};
35562+} __no_const;
35563 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
66a7e928 35564
fe2de317
MT
35565 struct bfa_faa_cbfn_s {
35566diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
35567index e07bd47..dbd260a 100644
35568--- a/drivers/scsi/bfa/bfa_fcpim.c
35569+++ b/drivers/scsi/bfa/bfa_fcpim.c
35570@@ -4179,7 +4179,7 @@ bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
6e9df6a3
MT
35571 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
35572 {
35573 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
35574- struct bfa_itn_s *itn;
35575+ bfa_itn_s_no_const *itn;
35576
35577 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
35578 itn->isr = isr;
fe2de317
MT
35579diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
35580index 1080bcb..a3b39e3 100644
35581--- a/drivers/scsi/bfa/bfa_fcpim.h
35582+++ b/drivers/scsi/bfa/bfa_fcpim.h
6e9df6a3
MT
35583@@ -37,6 +37,7 @@ struct bfa_iotag_s {
35584 struct bfa_itn_s {
35585 bfa_isr_func_t isr;
35586 };
35587+typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
35588
35589 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
35590 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
35591@@ -149,7 +150,7 @@ struct bfa_fcp_mod_s {
35592 struct list_head iotag_tio_free_q; /* free IO resources */
35593 struct list_head iotag_unused_q; /* unused IO resources*/
35594 struct bfa_iotag_s *iotag_arr;
35595- struct bfa_itn_s *itn_arr;
35596+ bfa_itn_s_no_const *itn_arr;
35597 int num_ioim_reqs;
35598 int num_fwtio_reqs;
35599 int num_itns;
fe2de317
MT
35600diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
35601index d4f951f..197c350 100644
35602--- a/drivers/scsi/bfa/bfa_fcs_lport.c
35603+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
35604@@ -1700,6 +1700,8 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
66a7e928
MT
35605 u16 len, count;
35606 u16 templen;
8308f9c9 35607
66a7e928
MT
35608+ pax_track_stack();
35609+
35610 /*
35611 * get hba attributes
35612 */
fe2de317 35613@@ -1977,6 +1979,8 @@ bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi,
66a7e928
MT
35614 u8 count = 0;
35615 u16 templen;
8308f9c9 35616
66a7e928
MT
35617+ pax_track_stack();
35618+
35619 /*
35620 * get port attributes
8308f9c9 35621 */
fe2de317
MT
35622diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c
35623index 52628d5..f89d033 100644
35624--- a/drivers/scsi/bfa/bfa_fcs_rport.c
35625+++ b/drivers/scsi/bfa/bfa_fcs_rport.c
35626@@ -1871,6 +1871,8 @@ bfa_fcs_rport_process_rpsc(struct bfa_fcs_rport_s *rport,
66a7e928
MT
35627 struct fc_rpsc_speed_info_s speeds;
35628 struct bfa_port_attr_s pport_attr;
35629
35630+ pax_track_stack();
35631+
35632 bfa_trc(port->fcs, rx_fchs->s_id);
35633 bfa_trc(port->fcs, rx_fchs->d_id);
35634
fe2de317
MT
35635diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
35636index 546d46b..642fa5b 100644
35637--- a/drivers/scsi/bfa/bfa_ioc.h
35638+++ b/drivers/scsi/bfa/bfa_ioc.h
6e9df6a3 35639@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
15a11c5b
MT
35640 bfa_ioc_disable_cbfn_t disable_cbfn;
35641 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
35642 bfa_ioc_reset_cbfn_t reset_cbfn;
35643-};
35644+} __no_const;
8308f9c9 35645
15a11c5b 35646 /*
6e9df6a3
MT
35647 * IOC event notification mechanism.
35648@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
15a11c5b
MT
35649 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
35650 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
6e9df6a3 35651 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
15a11c5b
MT
35652-};
35653+} __no_const;
35654
6e9df6a3
MT
35655 /*
35656 * Queue element to wait for room in request queue. FIFO order is
fe2de317
MT
35657diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
35658index 66fb725..0fe05ab 100644
35659--- a/drivers/scsi/bfa/bfad.c
35660+++ b/drivers/scsi/bfa/bfad.c
35661@@ -1019,6 +1019,8 @@ bfad_start_ops(struct bfad_s *bfad) {
35662 struct bfad_vport_s *vport, *vport_new;
35663 struct bfa_fcs_driver_info_s driver_info;
35664
66a7e928
MT
35665+ pax_track_stack();
35666+
fe2de317
MT
35667 /* Limit min/max. xfer size to [64k-32MB] */
35668 if (max_xfer_size < BFAD_MIN_SECTORS >> 1)
35669 max_xfer_size = BFAD_MIN_SECTORS >> 1;
35670diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
35671index b4f6c9a..0eb1938 100644
35672--- a/drivers/scsi/dpt_i2o.c
35673+++ b/drivers/scsi/dpt_i2o.c
35674@@ -1811,6 +1811,8 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
66a7e928
MT
35675 dma_addr_t addr;
35676 ulong flags = 0;
bc901d79 35677
66a7e928 35678+ pax_track_stack();
bc901d79 35679+
66a7e928
MT
35680 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
35681 // get user msg size in u32s
35682 if(get_user(size, &user_msg[0])){
fe2de317 35683@@ -2317,6 +2319,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
66a7e928
MT
35684 s32 rcode;
35685 dma_addr_t addr;
35686
35687+ pax_track_stack();
35688+
35689 memset(msg, 0 , sizeof(msg));
35690 len = scsi_bufflen(cmd);
35691 direction = 0x00000000;
fe2de317
MT
35692diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
35693index 94de889..ca4f0cf 100644
35694--- a/drivers/scsi/eata.c
35695+++ b/drivers/scsi/eata.c
35696@@ -1087,6 +1087,8 @@ static int port_detect(unsigned long port_base, unsigned int j,
66a7e928
MT
35697 struct hostdata *ha;
35698 char name[16];
35699
35700+ pax_track_stack();
35701+
35702 sprintf(name, "%s%d", driver_name, j);
35703
35704 if (!request_region(port_base, REGION_SIZE, driver_name)) {
fe2de317
MT
35705diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
35706index c74c4b8..c41ca3f 100644
35707--- a/drivers/scsi/fcoe/fcoe_ctlr.c
35708+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
35709@@ -2503,6 +2503,8 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
66a7e928
MT
35710 } buf;
35711 int rc;
35712
35713+ pax_track_stack();
35714+
35715 fiph = (struct fip_header *)skb->data;
35716 sub = fiph->fip_subcode;
35717
fe2de317
MT
35718diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
35719index 3242bca..45a83e7 100644
35720--- a/drivers/scsi/gdth.c
35721+++ b/drivers/scsi/gdth.c
66a7e928
MT
35722@@ -4107,6 +4107,8 @@ static int ioc_lockdrv(void __user *arg)
35723 unsigned long flags;
35724 gdth_ha_str *ha;
35725
35726+ pax_track_stack();
35727+
35728 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
35729 return -EFAULT;
35730 ha = gdth_find_ha(ldrv.ionode);
fe2de317 35731@@ -4139,6 +4141,8 @@ static int ioc_resetdrv(void __user *arg, char *cmnd)
66a7e928
MT
35732 gdth_ha_str *ha;
35733 int rval;
35734
35735+ pax_track_stack();
35736+
35737 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
35738 res.number >= MAX_HDRIVES)
35739 return -EFAULT;
fe2de317 35740@@ -4174,6 +4178,8 @@ static int ioc_general(void __user *arg, char *cmnd)
66a7e928
MT
35741 gdth_ha_str *ha;
35742 int rval;
35743
35744+ pax_track_stack();
35745+
35746 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
35747 return -EFAULT;
35748 ha = gdth_find_ha(gen.ionode);
35749@@ -4642,6 +4648,9 @@ static void gdth_flush(gdth_ha_str *ha)
35750 int i;
35751 gdth_cmd_str gdtcmd;
35752 char cmnd[MAX_COMMAND_SIZE];
35753+
35754+ pax_track_stack();
35755+
35756 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
35757
35758 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
fe2de317
MT
35759diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
35760index 6527543..81e4fe2 100644
35761--- a/drivers/scsi/gdth_proc.c
35762+++ b/drivers/scsi/gdth_proc.c
35763@@ -47,6 +47,9 @@ static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
66a7e928
MT
35764 u64 paddr;
35765
35766 char cmnd[MAX_COMMAND_SIZE];
35767+
35768+ pax_track_stack();
35769+
35770 memset(cmnd, 0xff, 12);
35771 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
35772
fe2de317 35773@@ -175,6 +178,8 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
66a7e928
MT
35774 gdth_hget_str *phg;
35775 char cmnd[MAX_COMMAND_SIZE];
35776
35777+ pax_track_stack();
35778+
35779 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
35780 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
35781 if (!gdtcmd || !estr)
fe2de317
MT
35782diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
35783index 351dc0b..951dc32 100644
35784--- a/drivers/scsi/hosts.c
35785+++ b/drivers/scsi/hosts.c
66a7e928
MT
35786@@ -42,7 +42,7 @@
35787 #include "scsi_logging.h"
35788
35789
35790-static atomic_t scsi_host_next_hn; /* host_no for next new host */
35791+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
35792
35793
35794 static void scsi_host_cls_release(struct device *dev)
fe2de317 35795@@ -357,7 +357,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
66a7e928
MT
35796 * subtract one because we increment first then return, but we need to
35797 * know what the next host number was before increment
35798 */
35799- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
35800+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
35801 shost->dma_channel = 0xff;
35802
35803 /* These three are default values which can be overridden */
fe2de317
MT
35804diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
35805index 418ce83..7ee1225 100644
35806--- a/drivers/scsi/hpsa.c
35807+++ b/drivers/scsi/hpsa.c
35808@@ -499,7 +499,7 @@ static inline u32 next_command(struct ctlr_info *h)
15a11c5b
MT
35809 u32 a;
35810
35811 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
35812- return h->access.command_completed(h);
35813+ return h->access->command_completed(h);
35814
35815 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
35816 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
fe2de317 35817@@ -2956,7 +2956,7 @@ static void start_io(struct ctlr_info *h)
15a11c5b
MT
35818 while (!list_empty(&h->reqQ)) {
35819 c = list_entry(h->reqQ.next, struct CommandList, list);
35820 /* can't do anything if fifo is full */
35821- if ((h->access.fifo_full(h))) {
35822+ if ((h->access->fifo_full(h))) {
35823 dev_warn(&h->pdev->dev, "fifo full\n");
35824 break;
35825 }
fe2de317 35826@@ -2966,7 +2966,7 @@ static void start_io(struct ctlr_info *h)
15a11c5b 35827 h->Qdepth--;
66a7e928 35828
15a11c5b
MT
35829 /* Tell the controller execute command */
35830- h->access.submit_command(h, c);
35831+ h->access->submit_command(h, c);
66a7e928 35832
15a11c5b
MT
35833 /* Put job onto the completed Q */
35834 addQ(&h->cmpQ, c);
fe2de317 35835@@ -2975,17 +2975,17 @@ static void start_io(struct ctlr_info *h)
66a7e928 35836
15a11c5b
MT
35837 static inline unsigned long get_next_completion(struct ctlr_info *h)
35838 {
35839- return h->access.command_completed(h);
35840+ return h->access->command_completed(h);
35841 }
66a7e928 35842
15a11c5b
MT
35843 static inline bool interrupt_pending(struct ctlr_info *h)
35844 {
35845- return h->access.intr_pending(h);
35846+ return h->access->intr_pending(h);
35847 }
66a7e928 35848
15a11c5b
MT
35849 static inline long interrupt_not_for_us(struct ctlr_info *h)
35850 {
35851- return (h->access.intr_pending(h) == 0) ||
35852+ return (h->access->intr_pending(h) == 0) ||
35853 (h->interrupts_enabled == 0);
35854 }
66a7e928 35855
fe2de317 35856@@ -3882,7 +3882,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
15a11c5b
MT
35857 if (prod_index < 0)
35858 return -ENODEV;
35859 h->product_name = products[prod_index].product_name;
35860- h->access = *(products[prod_index].access);
35861+ h->access = products[prod_index].access;
66a7e928 35862
15a11c5b
MT
35863 if (hpsa_board_disabled(h->pdev)) {
35864 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
883a9837 35865@@ -4163,7 +4163,7 @@ reinit_after_soft_reset:
66a7e928 35866 }
66a7e928 35867
15a11c5b
MT
35868 /* make sure the board interrupts are off */
35869- h->access.set_intr_mask(h, HPSA_INTR_OFF);
35870+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
66a7e928 35871
15a11c5b
MT
35872 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
35873 goto clean2;
883a9837 35874@@ -4197,7 +4197,7 @@ reinit_after_soft_reset:
15a11c5b
MT
35875 * fake ones to scoop up any residual completions.
35876 */
35877 spin_lock_irqsave(&h->lock, flags);
35878- h->access.set_intr_mask(h, HPSA_INTR_OFF);
35879+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
35880 spin_unlock_irqrestore(&h->lock, flags);
35881 free_irq(h->intr[h->intr_mode], h);
35882 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
883a9837 35883@@ -4216,9 +4216,9 @@ reinit_after_soft_reset:
15a11c5b
MT
35884 dev_info(&h->pdev->dev, "Board READY.\n");
35885 dev_info(&h->pdev->dev,
35886 "Waiting for stale completions to drain.\n");
35887- h->access.set_intr_mask(h, HPSA_INTR_ON);
35888+ h->access->set_intr_mask(h, HPSA_INTR_ON);
35889 msleep(10000);
35890- h->access.set_intr_mask(h, HPSA_INTR_OFF);
35891+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
35892
35893 rc = controller_reset_failed(h->cfgtable);
35894 if (rc)
883a9837 35895@@ -4239,7 +4239,7 @@ reinit_after_soft_reset:
15a11c5b
MT
35896 }
35897
35898 /* Turn the interrupts on so we can service requests */
35899- h->access.set_intr_mask(h, HPSA_INTR_ON);
35900+ h->access->set_intr_mask(h, HPSA_INTR_ON);
35901
35902 hpsa_hba_inquiry(h);
35903 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
fe2de317 35904@@ -4292,7 +4292,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
15a11c5b
MT
35905 * To write all data in the battery backed cache to disks
35906 */
35907 hpsa_flush_cache(h);
35908- h->access.set_intr_mask(h, HPSA_INTR_OFF);
35909+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
35910 free_irq(h->intr[h->intr_mode], h);
35911 #ifdef CONFIG_PCI_MSI
35912 if (h->msix_vector)
fe2de317 35913@@ -4455,7 +4455,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
15a11c5b
MT
35914 return;
35915 }
35916 /* Change the access methods to the performant access methods */
35917- h->access = SA5_performant_access;
35918+ h->access = &SA5_performant_access;
35919 h->transMethod = CFGTBL_Trans_Performant;
35920 }
35921
fe2de317
MT
35922diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
35923index 7f53cea..a8c7188 100644
35924--- a/drivers/scsi/hpsa.h
35925+++ b/drivers/scsi/hpsa.h
15a11c5b
MT
35926@@ -73,7 +73,7 @@ struct ctlr_info {
35927 unsigned int msix_vector;
35928 unsigned int msi_vector;
35929 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
35930- struct access_method access;
35931+ struct access_method *access;
35932
35933 /* queue and queue Info */
35934 struct list_head reqQ;
fe2de317
MT
35935diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
35936index f2df059..a3a9930 100644
35937--- a/drivers/scsi/ips.h
35938+++ b/drivers/scsi/ips.h
15a11c5b
MT
35939@@ -1027,7 +1027,7 @@ typedef struct {
35940 int (*intr)(struct ips_ha *);
35941 void (*enableint)(struct ips_ha *);
35942 uint32_t (*statupd)(struct ips_ha *);
35943-} ips_hw_func_t;
35944+} __no_const ips_hw_func_t;
35945
35946 typedef struct ips_ha {
35947 uint8_t ha_id[IPS_MAX_CHANNELS+1];
fe2de317
MT
35948diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
35949index d261e98..1e00f35 100644
35950--- a/drivers/scsi/libfc/fc_exch.c
35951+++ b/drivers/scsi/libfc/fc_exch.c
16454cff 35952@@ -105,12 +105,12 @@ struct fc_exch_mgr {
58c5fc13
MT
35953 * all together if not used XXX
35954 */
35955 struct {
35956- atomic_t no_free_exch;
35957- atomic_t no_free_exch_xid;
35958- atomic_t xid_not_found;
35959- atomic_t xid_busy;
35960- atomic_t seq_not_found;
35961- atomic_t non_bls_resp;
35962+ atomic_unchecked_t no_free_exch;
35963+ atomic_unchecked_t no_free_exch_xid;
35964+ atomic_unchecked_t xid_not_found;
35965+ atomic_unchecked_t xid_busy;
35966+ atomic_unchecked_t seq_not_found;
35967+ atomic_unchecked_t non_bls_resp;
35968 } stats;
58c5fc13 35969 };
16454cff 35970
fe2de317 35971@@ -718,7 +718,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
58c5fc13
MT
35972 /* allocate memory for exchange */
35973 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
35974 if (!ep) {
35975- atomic_inc(&mp->stats.no_free_exch);
35976+ atomic_inc_unchecked(&mp->stats.no_free_exch);
35977 goto out;
35978 }
35979 memset(ep, 0, sizeof(*ep));
6e9df6a3 35980@@ -779,7 +779,7 @@ out:
58c5fc13
MT
35981 return ep;
35982 err:
ae4e228f 35983 spin_unlock_bh(&pool->lock);
58c5fc13
MT
35984- atomic_inc(&mp->stats.no_free_exch_xid);
35985+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
35986 mempool_free(ep, mp->ep_pool);
35987 return NULL;
35988 }
fe2de317 35989@@ -922,7 +922,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
58c5fc13
MT
35990 xid = ntohs(fh->fh_ox_id); /* we originated exch */
35991 ep = fc_exch_find(mp, xid);
35992 if (!ep) {
35993- atomic_inc(&mp->stats.xid_not_found);
35994+ atomic_inc_unchecked(&mp->stats.xid_not_found);
35995 reject = FC_RJT_OX_ID;
35996 goto out;
35997 }
fe2de317 35998@@ -952,7 +952,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
58c5fc13
MT
35999 ep = fc_exch_find(mp, xid);
36000 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
36001 if (ep) {
36002- atomic_inc(&mp->stats.xid_busy);
36003+ atomic_inc_unchecked(&mp->stats.xid_busy);
36004 reject = FC_RJT_RX_ID;
36005 goto rel;
36006 }
fe2de317 36007@@ -963,7 +963,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
58c5fc13
MT
36008 }
36009 xid = ep->xid; /* get our XID */
36010 } else if (!ep) {
36011- atomic_inc(&mp->stats.xid_not_found);
36012+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36013 reject = FC_RJT_RX_ID; /* XID not found */
36014 goto out;
36015 }
fe2de317 36016@@ -980,7 +980,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
58c5fc13
MT
36017 } else {
36018 sp = &ep->seq;
36019 if (sp->id != fh->fh_seq_id) {
36020- atomic_inc(&mp->stats.seq_not_found);
36021+ atomic_inc_unchecked(&mp->stats.seq_not_found);
6e9df6a3
MT
36022 if (f_ctl & FC_FC_END_SEQ) {
36023 /*
36024 * Update sequence_id based on incoming last
fe2de317 36025@@ -1430,22 +1430,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
58c5fc13
MT
36026
36027 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
36028 if (!ep) {
36029- atomic_inc(&mp->stats.xid_not_found);
36030+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36031 goto out;
36032 }
36033 if (ep->esb_stat & ESB_ST_COMPLETE) {
36034- atomic_inc(&mp->stats.xid_not_found);
36035+ atomic_inc_unchecked(&mp->stats.xid_not_found);
16454cff 36036 goto rel;
58c5fc13
MT
36037 }
36038 if (ep->rxid == FC_XID_UNKNOWN)
36039 ep->rxid = ntohs(fh->fh_rx_id);
36040 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
36041- atomic_inc(&mp->stats.xid_not_found);
36042+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36043 goto rel;
36044 }
36045 if (ep->did != ntoh24(fh->fh_s_id) &&
36046 ep->did != FC_FID_FLOGI) {
36047- atomic_inc(&mp->stats.xid_not_found);
36048+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36049 goto rel;
36050 }
36051 sof = fr_sof(fp);
fe2de317 36052@@ -1454,7 +1454,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
57199397
MT
36053 sp->ssb_stat |= SSB_ST_RESP;
36054 sp->id = fh->fh_seq_id;
36055 } else if (sp->id != fh->fh_seq_id) {
36056- atomic_inc(&mp->stats.seq_not_found);
36057+ atomic_inc_unchecked(&mp->stats.seq_not_found);
36058 goto rel;
58c5fc13 36059 }
57199397 36060
fe2de317 36061@@ -1518,9 +1518,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
58c5fc13 36062 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
ae4e228f
MT
36063
36064 if (!sp)
58c5fc13
MT
36065- atomic_inc(&mp->stats.xid_not_found);
36066+ atomic_inc_unchecked(&mp->stats.xid_not_found);
ae4e228f 36067 else
58c5fc13
MT
36068- atomic_inc(&mp->stats.non_bls_resp);
36069+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
ae4e228f 36070
58c5fc13 36071 fc_frame_free(fp);
ae4e228f 36072 }
fe2de317
MT
36073diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
36074index db9238f..4378ed2 100644
36075--- a/drivers/scsi/libsas/sas_ata.c
36076+++ b/drivers/scsi/libsas/sas_ata.c
36077@@ -368,7 +368,7 @@ static struct ata_port_operations sas_sata_ops = {
66a7e928
MT
36078 .postreset = ata_std_postreset,
36079 .error_handler = ata_std_error_handler,
ae4e228f 36080 .post_internal_cmd = sas_ata_post_internal,
66a7e928
MT
36081- .qc_defer = ata_std_qc_defer,
36082+ .qc_defer = ata_std_qc_defer,
36083 .qc_prep = ata_noop_qc_prep,
36084 .qc_issue = sas_ata_qc_issue,
36085 .qc_fill_rtf = sas_ata_qc_fill_rtf,
fe2de317
MT
36086diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
36087index c088a36..01c73b0 100644
36088--- a/drivers/scsi/lpfc/lpfc.h
36089+++ b/drivers/scsi/lpfc/lpfc.h
36090@@ -425,7 +425,7 @@ struct lpfc_vport {
36091 struct dentry *debug_nodelist;
36092 struct dentry *vport_debugfs_root;
36093 struct lpfc_debugfs_trc *disc_trc;
36094- atomic_t disc_trc_cnt;
36095+ atomic_unchecked_t disc_trc_cnt;
36096 #endif
36097 uint8_t stat_data_enabled;
36098 uint8_t stat_data_blocked;
36099@@ -835,8 +835,8 @@ struct lpfc_hba {
36100 struct timer_list fabric_block_timer;
36101 unsigned long bit_flags;
36102 #define FABRIC_COMANDS_BLOCKED 0
36103- atomic_t num_rsrc_err;
36104- atomic_t num_cmd_success;
36105+ atomic_unchecked_t num_rsrc_err;
36106+ atomic_unchecked_t num_cmd_success;
36107 unsigned long last_rsrc_error_time;
36108 unsigned long last_ramp_down_time;
36109 unsigned long last_ramp_up_time;
36110@@ -850,7 +850,7 @@ struct lpfc_hba {
36111 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
36112 struct dentry *debug_slow_ring_trc;
36113 struct lpfc_debugfs_trc *slow_ring_trc;
36114- atomic_t slow_ring_trc_cnt;
36115+ atomic_unchecked_t slow_ring_trc_cnt;
36116 /* iDiag debugfs sub-directory */
36117 struct dentry *idiag_root;
36118 struct dentry *idiag_pci_cfg;
36119diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
36120index a0424dd..2499b6b 100644
36121--- a/drivers/scsi/lpfc/lpfc_debugfs.c
36122+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
36123@@ -105,7 +105,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
66a7e928
MT
36124
36125 #include <linux/debugfs.h>
8308f9c9
MT
36126
36127-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36128+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36129 static unsigned long lpfc_debugfs_start_time = 0L;
36130
66a7e928 36131 /* iDiag */
fe2de317 36132@@ -146,7 +146,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
8308f9c9
MT
36133 lpfc_debugfs_enable = 0;
36134
36135 len = 0;
36136- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
36137+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
36138 (lpfc_debugfs_max_disc_trc - 1);
36139 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
36140 dtp = vport->disc_trc + i;
fe2de317 36141@@ -212,7 +212,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
8308f9c9
MT
36142 lpfc_debugfs_enable = 0;
36143
36144 len = 0;
36145- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
36146+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
36147 (lpfc_debugfs_max_slow_ring_trc - 1);
36148 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
36149 dtp = phba->slow_ring_trc + i;
fe2de317 36150@@ -635,14 +635,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
8308f9c9
MT
36151 !vport || !vport->disc_trc)
36152 return;
36153
36154- index = atomic_inc_return(&vport->disc_trc_cnt) &
36155+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
36156 (lpfc_debugfs_max_disc_trc - 1);
36157 dtp = vport->disc_trc + index;
36158 dtp->fmt = fmt;
36159 dtp->data1 = data1;
36160 dtp->data2 = data2;
36161 dtp->data3 = data3;
36162- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36163+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36164 dtp->jif = jiffies;
36165 #endif
36166 return;
fe2de317 36167@@ -673,14 +673,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
8308f9c9
MT
36168 !phba || !phba->slow_ring_trc)
36169 return;
36170
36171- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
36172+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
36173 (lpfc_debugfs_max_slow_ring_trc - 1);
36174 dtp = phba->slow_ring_trc + index;
36175 dtp->fmt = fmt;
36176 dtp->data1 = data1;
36177 dtp->data2 = data2;
36178 dtp->data3 = data3;
36179- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36180+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36181 dtp->jif = jiffies;
36182 #endif
36183 return;
fe2de317 36184@@ -3828,7 +3828,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
8308f9c9
MT
36185 "slow_ring buffer\n");
36186 goto debug_failed;
36187 }
36188- atomic_set(&phba->slow_ring_trc_cnt, 0);
36189+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
36190 memset(phba->slow_ring_trc, 0,
36191 (sizeof(struct lpfc_debugfs_trc) *
36192 lpfc_debugfs_max_slow_ring_trc));
fe2de317 36193@@ -3874,7 +3874,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
8308f9c9
MT
36194 "buffer\n");
36195 goto debug_failed;
36196 }
36197- atomic_set(&vport->disc_trc_cnt, 0);
36198+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
36199
36200 snprintf(name, sizeof(name), "discovery_trace");
36201 vport->debug_disc_trc =
fe2de317
MT
36202diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
36203index a3c8200..31e562e 100644
36204--- a/drivers/scsi/lpfc/lpfc_init.c
36205+++ b/drivers/scsi/lpfc/lpfc_init.c
6e9df6a3 36206@@ -9969,8 +9969,10 @@ lpfc_init(void)
15a11c5b
MT
36207 printk(LPFC_COPYRIGHT "\n");
36208
36209 if (lpfc_enable_npiv) {
36210- lpfc_transport_functions.vport_create = lpfc_vport_create;
36211- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36212+ pax_open_kernel();
36213+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
36214+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36215+ pax_close_kernel();
36216 }
36217 lpfc_transport_template =
36218 fc_attach_transport(&lpfc_transport_functions);
fe2de317
MT
36219diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
36220index eadd241..26c8e0f 100644
36221--- a/drivers/scsi/lpfc/lpfc_scsi.c
36222+++ b/drivers/scsi/lpfc/lpfc_scsi.c
36223@@ -297,7 +297,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
8308f9c9
MT
36224 uint32_t evt_posted;
36225
36226 spin_lock_irqsave(&phba->hbalock, flags);
36227- atomic_inc(&phba->num_rsrc_err);
36228+ atomic_inc_unchecked(&phba->num_rsrc_err);
36229 phba->last_rsrc_error_time = jiffies;
36230
36231 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
fe2de317 36232@@ -338,7 +338,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
8308f9c9
MT
36233 unsigned long flags;
36234 struct lpfc_hba *phba = vport->phba;
36235 uint32_t evt_posted;
36236- atomic_inc(&phba->num_cmd_success);
36237+ atomic_inc_unchecked(&phba->num_cmd_success);
36238
36239 if (vport->cfg_lun_queue_depth <= queue_depth)
36240 return;
fe2de317 36241@@ -382,8 +382,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
8308f9c9
MT
36242 unsigned long num_rsrc_err, num_cmd_success;
36243 int i;
36244
36245- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
36246- num_cmd_success = atomic_read(&phba->num_cmd_success);
36247+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
36248+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
36249
36250 vports = lpfc_create_vport_work_array(phba);
36251 if (vports != NULL)
fe2de317 36252@@ -403,8 +403,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
8308f9c9
MT
36253 }
36254 }
36255 lpfc_destroy_vport_work_array(phba, vports);
36256- atomic_set(&phba->num_rsrc_err, 0);
36257- atomic_set(&phba->num_cmd_success, 0);
36258+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
36259+ atomic_set_unchecked(&phba->num_cmd_success, 0);
36260 }
36261
36262 /**
fe2de317 36263@@ -438,8 +438,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
8308f9c9
MT
36264 }
36265 }
36266 lpfc_destroy_vport_work_array(phba, vports);
36267- atomic_set(&phba->num_rsrc_err, 0);
36268- atomic_set(&phba->num_cmd_success, 0);
36269+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
36270+ atomic_set_unchecked(&phba->num_cmd_success, 0);
36271 }
36272
36273 /**
fe2de317
MT
36274diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
36275index 2e6619e..fa64494 100644
36276--- a/drivers/scsi/megaraid/megaraid_mbox.c
36277+++ b/drivers/scsi/megaraid/megaraid_mbox.c
36278@@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter)
66a7e928
MT
36279 int rval;
36280 int i;
36281
36282+ pax_track_stack();
36283+
36284 // Allocate memory for the base list of scb for management module.
36285 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
36286
fe2de317
MT
36287diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
36288index 86afb13f..c912398 100644
36289--- a/drivers/scsi/osd/osd_initiator.c
36290+++ b/drivers/scsi/osd/osd_initiator.c
36291@@ -97,6 +97,8 @@ static int _osd_get_print_system_info(struct osd_dev *od,
66a7e928
MT
36292 int nelem = ARRAY_SIZE(get_attrs), a = 0;
36293 int ret;
36294
36295+ pax_track_stack();
36296+
36297 or = osd_start_request(od, GFP_KERNEL);
36298 if (!or)
36299 return -ENOMEM;
fe2de317
MT
36300diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
36301index d079f9a..d26072c 100644
36302--- a/drivers/scsi/pmcraid.c
36303+++ b/drivers/scsi/pmcraid.c
36304@@ -201,8 +201,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
8308f9c9
MT
36305 res->scsi_dev = scsi_dev;
36306 scsi_dev->hostdata = res;
36307 res->change_detected = 0;
36308- atomic_set(&res->read_failures, 0);
36309- atomic_set(&res->write_failures, 0);
36310+ atomic_set_unchecked(&res->read_failures, 0);
36311+ atomic_set_unchecked(&res->write_failures, 0);
36312 rc = 0;
36313 }
36314 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
fe2de317 36315@@ -2677,9 +2677,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
8308f9c9
MT
36316
36317 /* If this was a SCSI read/write command keep count of errors */
36318 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
36319- atomic_inc(&res->read_failures);
36320+ atomic_inc_unchecked(&res->read_failures);
36321 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
36322- atomic_inc(&res->write_failures);
36323+ atomic_inc_unchecked(&res->write_failures);
36324
36325 if (!RES_IS_GSCSI(res->cfg_entry) &&
36326 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
36327@@ -3535,7 +3535,7 @@ static int pmcraid_queuecommand_lck(
36328 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
36329 * hrrq_id assigned here in queuecommand
36330 */
36331- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
36332+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
36333 pinstance->num_hrrq;
36334 cmd->cmd_done = pmcraid_io_done;
36335
66a7e928 36336@@ -3860,7 +3860,7 @@ static long pmcraid_ioctl_passthrough(
8308f9c9
MT
36337 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
36338 * hrrq_id assigned here in queuecommand
36339 */
36340- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
36341+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
36342 pinstance->num_hrrq;
36343
36344 if (request_size) {
fe2de317 36345@@ -4498,7 +4498,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
8308f9c9
MT
36346
36347 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
36348 /* add resources only after host is added into system */
36349- if (!atomic_read(&pinstance->expose_resources))
36350+ if (!atomic_read_unchecked(&pinstance->expose_resources))
36351 return;
36352
36353 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
fe2de317 36354@@ -5332,8 +5332,8 @@ static int __devinit pmcraid_init_instance(
8308f9c9
MT
36355 init_waitqueue_head(&pinstance->reset_wait_q);
36356
36357 atomic_set(&pinstance->outstanding_cmds, 0);
36358- atomic_set(&pinstance->last_message_id, 0);
36359- atomic_set(&pinstance->expose_resources, 0);
36360+ atomic_set_unchecked(&pinstance->last_message_id, 0);
36361+ atomic_set_unchecked(&pinstance->expose_resources, 0);
36362
36363 INIT_LIST_HEAD(&pinstance->free_res_q);
36364 INIT_LIST_HEAD(&pinstance->used_res_q);
15a11c5b 36365@@ -6048,7 +6048,7 @@ static int __devinit pmcraid_probe(
8308f9c9
MT
36366 /* Schedule worker thread to handle CCN and take care of adding and
36367 * removing devices to OS
36368 */
36369- atomic_set(&pinstance->expose_resources, 1);
36370+ atomic_set_unchecked(&pinstance->expose_resources, 1);
36371 schedule_work(&pinstance->worker_q);
36372 return rc;
36373
fe2de317
MT
36374diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
36375index f920baf..4417389 100644
36376--- a/drivers/scsi/pmcraid.h
36377+++ b/drivers/scsi/pmcraid.h
15a11c5b 36378@@ -749,7 +749,7 @@ struct pmcraid_instance {
8308f9c9
MT
36379 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
36380
36381 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
36382- atomic_t last_message_id;
36383+ atomic_unchecked_t last_message_id;
36384
36385 /* configuration table */
36386 struct pmcraid_config_table *cfg_table;
15a11c5b 36387@@ -778,7 +778,7 @@ struct pmcraid_instance {
8308f9c9
MT
36388 atomic_t outstanding_cmds;
36389
36390 /* should add/delete resources to mid-layer now ?*/
36391- atomic_t expose_resources;
36392+ atomic_unchecked_t expose_resources;
36393
36394
36395
15a11c5b 36396@@ -814,8 +814,8 @@ struct pmcraid_resource_entry {
8308f9c9
MT
36397 struct pmcraid_config_table_entry_ext cfg_entry_ext;
36398 };
36399 struct scsi_device *scsi_dev; /* Link scsi_device structure */
36400- atomic_t read_failures; /* count of failed READ commands */
36401- atomic_t write_failures; /* count of failed WRITE commands */
36402+ atomic_unchecked_t read_failures; /* count of failed READ commands */
36403+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
36404
36405 /* To indicate add/delete/modify during CCN */
36406 u8 change_detected;
fe2de317
MT
36407diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
36408index a03eaf4..a6b3fd9 100644
36409--- a/drivers/scsi/qla2xxx/qla_def.h
36410+++ b/drivers/scsi/qla2xxx/qla_def.h
15a11c5b
MT
36411@@ -2244,7 +2244,7 @@ struct isp_operations {
36412 int (*get_flash_version) (struct scsi_qla_host *, void *);
36413 int (*start_scsi) (srb_t *);
36414 int (*abort_isp) (struct scsi_qla_host *);
36415-};
36416+} __no_const;
36417
36418 /* MSI-X Support *************************************************************/
36419
fe2de317
MT
36420diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
36421index 473c5c8..4e2f24a 100644
36422--- a/drivers/scsi/qla4xxx/ql4_def.h
36423+++ b/drivers/scsi/qla4xxx/ql4_def.h
8308f9c9
MT
36424@@ -256,7 +256,7 @@ struct ddb_entry {
36425 atomic_t retry_relogin_timer; /* Min Time between relogins
36426 * (4000 only) */
36427 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
36428- atomic_t relogin_retry_count; /* Num of times relogin has been
36429+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
36430 * retried */
36431
36432 uint16_t port;
fe2de317
MT
36433diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
36434index 42ed5db..0262f9e 100644
36435--- a/drivers/scsi/qla4xxx/ql4_init.c
36436+++ b/drivers/scsi/qla4xxx/ql4_init.c
36437@@ -680,7 +680,7 @@ static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
8308f9c9
MT
36438 ddb_entry->fw_ddb_index = fw_ddb_index;
36439 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
36440 atomic_set(&ddb_entry->relogin_timer, 0);
36441- atomic_set(&ddb_entry->relogin_retry_count, 0);
36442+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
36443 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
36444 list_add_tail(&ddb_entry->list, &ha->ddb_list);
36445 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
fe2de317 36446@@ -1433,7 +1433,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
66a7e928
MT
36447 if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) &&
36448 (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) {
8308f9c9
MT
36449 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
36450- atomic_set(&ddb_entry->relogin_retry_count, 0);
36451+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
36452 atomic_set(&ddb_entry->relogin_timer, 0);
36453 clear_bit(DF_RELOGIN, &ddb_entry->flags);
66a7e928 36454 iscsi_unblock_session(ddb_entry->sess);
fe2de317
MT
36455diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
36456index f2364ec..44c42b1 100644
36457--- a/drivers/scsi/qla4xxx/ql4_os.c
36458+++ b/drivers/scsi/qla4xxx/ql4_os.c
36459@@ -811,13 +811,13 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
8308f9c9
MT
36460 ddb_entry->fw_ddb_device_state ==
36461 DDB_DS_SESSION_FAILED) {
36462 /* Reset retry relogin timer */
36463- atomic_inc(&ddb_entry->relogin_retry_count);
36464+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
36465 DEBUG2(printk("scsi%ld: ddb [%d] relogin"
36466 " timed out-retrying"
36467 " relogin (%d)\n",
36468 ha->host_no,
36469 ddb_entry->fw_ddb_index,
36470- atomic_read(&ddb_entry->
36471+ atomic_read_unchecked(&ddb_entry->
36472 relogin_retry_count))
36473 );
36474 start_dpc++;
fe2de317
MT
36475diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
36476index 2aeb2e9..46e3925 100644
36477--- a/drivers/scsi/scsi.c
36478+++ b/drivers/scsi/scsi.c
36479@@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
8308f9c9
MT
36480 unsigned long timeout;
36481 int rtn = 0;
36482
36483- atomic_inc(&cmd->device->iorequest_cnt);
36484+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
36485
36486 /* check if the device is still usable */
36487 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
fe2de317
MT
36488diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
36489index 6888b2c..45befa1 100644
36490--- a/drivers/scsi/scsi_debug.c
36491+++ b/drivers/scsi/scsi_debug.c
36492@@ -1493,6 +1493,8 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
66a7e928
MT
36493 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
36494 unsigned char *cmd = (unsigned char *)scp->cmnd;
36495
36496+ pax_track_stack();
36497+
36498 if ((errsts = check_readiness(scp, 1, devip)))
36499 return errsts;
36500 memset(arr, 0, sizeof(arr));
fe2de317 36501@@ -1590,6 +1592,8 @@ static int resp_log_sense(struct scsi_cmnd * scp,
66a7e928
MT
36502 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
36503 unsigned char *cmd = (unsigned char *)scp->cmnd;
36504
36505+ pax_track_stack();
36506+
36507 if ((errsts = check_readiness(scp, 1, devip)))
36508 return errsts;
36509 memset(arr, 0, sizeof(arr));
fe2de317
MT
36510diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
36511index 6d219e4..eb3ded3 100644
36512--- a/drivers/scsi/scsi_lib.c
36513+++ b/drivers/scsi/scsi_lib.c
36514@@ -1415,7 +1415,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
8308f9c9
MT
36515 shost = sdev->host;
36516 scsi_init_cmd_errh(cmd);
36517 cmd->result = DID_NO_CONNECT << 16;
36518- atomic_inc(&cmd->device->iorequest_cnt);
36519+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
36520
36521 /*
36522 * SCSI request completion path will do scsi_device_unbusy(),
fe2de317 36523@@ -1441,9 +1441,9 @@ static void scsi_softirq_done(struct request *rq)
8308f9c9
MT
36524
36525 INIT_LIST_HEAD(&cmd->eh_entry);
36526
36527- atomic_inc(&cmd->device->iodone_cnt);
36528+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
36529 if (cmd->result)
36530- atomic_inc(&cmd->device->ioerr_cnt);
36531+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
36532
36533 disposition = scsi_decide_disposition(cmd);
36534 if (disposition != SUCCESS &&
fe2de317
MT
36535diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
36536index e0bd3f7..816b8a6 100644
36537--- a/drivers/scsi/scsi_sysfs.c
36538+++ b/drivers/scsi/scsi_sysfs.c
36539@@ -622,7 +622,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
8308f9c9
MT
36540 char *buf) \
36541 { \
36542 struct scsi_device *sdev = to_scsi_device(dev); \
36543- unsigned long long count = atomic_read(&sdev->field); \
36544+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
36545 return snprintf(buf, 20, "0x%llx\n", count); \
36546 } \
36547 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
fe2de317
MT
36548diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
36549index 84a1fdf..693b0d6 100644
36550--- a/drivers/scsi/scsi_tgt_lib.c
36551+++ b/drivers/scsi/scsi_tgt_lib.c
36552@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
6e9df6a3
MT
36553 int err;
36554
36555 dprintk("%lx %u\n", uaddr, len);
36556- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
36557+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
36558 if (err) {
36559 /*
36560 * TODO: need to fixup sg_tablesize, max_segment_size,
fe2de317
MT
36561diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
36562index 1b21491..1b7f60e 100644
36563--- a/drivers/scsi/scsi_transport_fc.c
36564+++ b/drivers/scsi/scsi_transport_fc.c
36565@@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
8308f9c9
MT
36566 * Netlink Infrastructure
36567 */
36568
36569-static atomic_t fc_event_seq;
36570+static atomic_unchecked_t fc_event_seq;
36571
36572 /**
36573 * fc_get_event_number - Obtain the next sequential FC event number
15a11c5b 36574@@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
8308f9c9
MT
36575 u32
36576 fc_get_event_number(void)
36577 {
36578- return atomic_add_return(1, &fc_event_seq);
36579+ return atomic_add_return_unchecked(1, &fc_event_seq);
36580 }
36581 EXPORT_SYMBOL(fc_get_event_number);
36582
fe2de317 36583@@ -645,7 +645,7 @@ static __init int fc_transport_init(void)
8308f9c9
MT
36584 {
36585 int error;
36586
36587- atomic_set(&fc_event_seq, 0);
36588+ atomic_set_unchecked(&fc_event_seq, 0);
36589
36590 error = transport_class_register(&fc_host_class);
36591 if (error)
fe2de317 36592@@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
71d190be 36593 char *cp;
58c5fc13 36594
71d190be
MT
36595 *val = simple_strtoul(buf, &cp, 0);
36596- if ((*cp && (*cp != '\n')) || (*val < 0))
36597+ if (*cp && (*cp != '\n'))
36598 return -EINVAL;
36599 /*
36600 * Check for overflow; dev_loss_tmo is u32
fe2de317
MT
36601diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
36602index 3fd16d7..ba0871f 100644
36603--- a/drivers/scsi/scsi_transport_iscsi.c
36604+++ b/drivers/scsi/scsi_transport_iscsi.c
8308f9c9
MT
36605@@ -83,7 +83,7 @@ struct iscsi_internal {
36606 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
36607 };
36608
36609-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
36610+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
36611 static struct workqueue_struct *iscsi_eh_timer_workq;
36612
36613 /*
fe2de317 36614@@ -761,7 +761,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
8308f9c9
MT
36615 int err;
36616
36617 ihost = shost->shost_data;
36618- session->sid = atomic_add_return(1, &iscsi_session_nr);
36619+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
36620
36621 if (id == ISCSI_MAX_TARGET) {
36622 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
fe2de317 36623@@ -2200,7 +2200,7 @@ static __init int iscsi_transport_init(void)
8308f9c9
MT
36624 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
36625 ISCSI_TRANSPORT_VERSION);
36626
36627- atomic_set(&iscsi_session_nr, 0);
36628+ atomic_set_unchecked(&iscsi_session_nr, 0);
36629
36630 err = class_register(&iscsi_transport_class);
36631 if (err)
fe2de317
MT
36632diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
36633index 21a045e..ec89e03 100644
36634--- a/drivers/scsi/scsi_transport_srp.c
36635+++ b/drivers/scsi/scsi_transport_srp.c
8308f9c9
MT
36636@@ -33,7 +33,7 @@
36637 #include "scsi_transport_srp_internal.h"
36638
36639 struct srp_host_attrs {
36640- atomic_t next_port_id;
36641+ atomic_unchecked_t next_port_id;
36642 };
36643 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
36644
fe2de317 36645@@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
8308f9c9
MT
36646 struct Scsi_Host *shost = dev_to_shost(dev);
36647 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
36648
36649- atomic_set(&srp_host->next_port_id, 0);
36650+ atomic_set_unchecked(&srp_host->next_port_id, 0);
36651 return 0;
36652 }
36653
fe2de317 36654@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
8308f9c9
MT
36655 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
36656 rport->roles = ids->roles;
36657
36658- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
36659+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
36660 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
36661
36662 transport_setup_device(&rport->dev);
fe2de317
MT
36663diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
36664index 909ed9e..1ae290a 100644
36665--- a/drivers/scsi/sg.c
36666+++ b/drivers/scsi/sg.c
36667@@ -1075,7 +1075,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
6e9df6a3
MT
36668 sdp->disk->disk_name,
36669 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
36670 NULL,
36671- (char *)arg);
36672+ (char __user *)arg);
36673 case BLKTRACESTART:
36674 return blk_trace_startstop(sdp->device->request_queue, 1);
36675 case BLKTRACESTOP:
bc901d79 36676@@ -2310,7 +2310,7 @@ struct sg_proc_leaf {
ae4e228f 36677 const struct file_operations * fops;
58c5fc13
MT
36678 };
36679
ae4e228f
MT
36680-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
36681+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
36682 {"allow_dio", &adio_fops},
36683 {"debug", &debug_fops},
36684 {"def_reserved_size", &dressz_fops},
bc901d79 36685@@ -2325,7 +2325,7 @@ sg_proc_init(void)
ae4e228f
MT
36686 {
36687 int k, mask;
36688 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
36689- struct sg_proc_leaf * leaf;
36690+ const struct sg_proc_leaf * leaf;
36691
36692 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
36693 if (!sg_proc_sgp)
fe2de317
MT
36694diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
36695index b4543f5..e1b34b8 100644
36696--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
36697+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
36698@@ -1756,6 +1756,8 @@ static int __devinit sym2_probe(struct pci_dev *pdev,
66a7e928
MT
36699 int do_iounmap = 0;
36700 int do_disable_device = 1;
36701
36702+ pax_track_stack();
36703+
36704 memset(&sym_dev, 0, sizeof(sym_dev));
36705 memset(&nvram, 0, sizeof(nvram));
36706 sym_dev.pdev = pdev;
fe2de317
MT
36707diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
36708index a18996d..fe993cb 100644
36709--- a/drivers/scsi/vmw_pvscsi.c
36710+++ b/drivers/scsi/vmw_pvscsi.c
36711@@ -447,6 +447,8 @@ static void pvscsi_setup_all_rings(const struct pvscsi_adapter *adapter)
66a7e928
MT
36712 dma_addr_t base;
36713 unsigned i;
36714
36715+ pax_track_stack();
36716+
36717 cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
36718 cmd.reqRingNumPages = adapter->req_pages;
36719 cmd.cmpRingNumPages = adapter->cmp_pages;
fe2de317
MT
36720diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
36721index c5f37f0..898d202 100644
36722--- a/drivers/spi/spi-dw-pci.c
36723+++ b/drivers/spi/spi-dw-pci.c
36724@@ -148,7 +148,7 @@ static int spi_resume(struct pci_dev *pdev)
6e9df6a3
MT
36725 #define spi_resume NULL
36726 #endif
36727
36728-static const struct pci_device_id pci_ids[] __devinitdata = {
36729+static const struct pci_device_id pci_ids[] __devinitconst = {
36730 /* Intel MID platform SPI controller 0 */
36731 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
36732 {},
fe2de317
MT
36733diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
36734index 4d1b9f5..8408fe3 100644
36735--- a/drivers/spi/spi.c
36736+++ b/drivers/spi/spi.c
36737@@ -1023,7 +1023,7 @@ int spi_bus_unlock(struct spi_master *master)
36738 EXPORT_SYMBOL_GPL(spi_bus_unlock);
36739
36740 /* portable code must never pass more than 32 bytes */
36741-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
36742+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
36743
36744 static u8 *buf;
36745
36746diff --git a/drivers/staging/ath6kl/os/linux/ar6000_drv.c b/drivers/staging/ath6kl/os/linux/ar6000_drv.c
36747index 32ee39a..3004c3d 100644
36748--- a/drivers/staging/ath6kl/os/linux/ar6000_drv.c
36749+++ b/drivers/staging/ath6kl/os/linux/ar6000_drv.c
36750@@ -362,7 +362,7 @@ static struct ar_cookie s_ar_cookie_mem[MAX_COOKIE_NUM];
15a11c5b
MT
36751 (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_HOST_INTEREST_ITEM_ADDRESS(item) : 0))
36752
36753
36754-static struct net_device_ops ar6000_netdev_ops = {
36755+static net_device_ops_no_const ar6000_netdev_ops = {
36756 .ndo_init = NULL,
36757 .ndo_open = ar6000_open,
36758 .ndo_stop = ar6000_close,
fe2de317
MT
36759diff --git a/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h b/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
36760index 39e0873..0925710 100644
36761--- a/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
36762+++ b/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
36763@@ -30,7 +30,7 @@ typedef bool (*ar6k_pal_recv_pkt_t)(void *pHciPalInfo, void *skb);
15a11c5b
MT
36764 typedef struct ar6k_pal_config_s
36765 {
36766 ar6k_pal_recv_pkt_t fpar6k_pal_recv_pkt;
36767-}ar6k_pal_config_t;
36768+} __no_const ar6k_pal_config_t;
36769
36770 void register_pal_cb(ar6k_pal_config_t *palConfig_p);
36771 #endif /* _AR6K_PAL_H_ */
fe2de317
MT
36772diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_linux.c b/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
36773index 05dada9..96171c6 100644
36774--- a/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
36775+++ b/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
36776@@ -451,14 +451,14 @@ static void brcmf_op_if(struct brcmf_if *ifp)
c52201e0
MT
36777 free_netdev(ifp->net);
36778 }
36779 /* Allocate etherdev, including space for private structure */
6e9df6a3
MT
36780- ifp->net = alloc_etherdev(sizeof(drvr_priv));
36781+ ifp->net = alloc_etherdev(sizeof(*drvr_priv));
c52201e0 36782 if (!ifp->net) {
6e9df6a3 36783 BRCMF_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
c52201e0
MT
36784 ret = -ENOMEM;
36785 }
36786 if (ret == 0) {
36787 strcpy(ifp->net->name, ifp->name);
6e9df6a3
MT
36788- memcpy(netdev_priv(ifp->net), &drvr_priv, sizeof(drvr_priv));
36789+ memcpy(netdev_priv(ifp->net), &drvr_priv, sizeof(*drvr_priv));
36790 err = brcmf_net_attach(&drvr_priv->pub, ifp->idx);
c52201e0 36791 if (err != 0) {
6e9df6a3 36792 BRCMF_ERROR(("%s: brcmf_net_attach failed, "
fe2de317 36793@@ -1279,7 +1279,7 @@ struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus, uint bus_hdrlen)
6e9df6a3 36794 BRCMF_TRACE(("%s: Enter\n", __func__));
c52201e0
MT
36795
36796 /* Allocate etherdev, including space for private structure */
6e9df6a3
MT
36797- net = alloc_etherdev(sizeof(drvr_priv));
36798+ net = alloc_etherdev(sizeof(*drvr_priv));
c52201e0 36799 if (!net) {
6e9df6a3 36800 BRCMF_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
c52201e0 36801 goto fail;
fe2de317 36802@@ -1295,7 +1295,7 @@ struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus, uint bus_hdrlen)
c52201e0 36803 /*
6e9df6a3 36804 * Save the brcmf_info into the priv
c52201e0 36805 */
6e9df6a3
MT
36806- memcpy(netdev_priv(net), &drvr_priv, sizeof(drvr_priv));
36807+ memcpy(netdev_priv(net), &drvr_priv, sizeof(*drvr_priv));
c52201e0
MT
36808
36809 /* Set network interface name if it was provided as module parameter */
66a7e928 36810 if (iface_name[0]) {
fe2de317 36811@@ -1352,7 +1352,7 @@ struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus, uint bus_hdrlen)
c52201e0 36812 /*
6e9df6a3 36813 * Save the brcmf_info into the priv
c52201e0 36814 */
6e9df6a3
MT
36815- memcpy(netdev_priv(net), &drvr_priv, sizeof(drvr_priv));
36816+ memcpy(netdev_priv(net), &drvr_priv, sizeof(*drvr_priv));
36817
36818 #if defined(CONFIG_PM_SLEEP)
36819 atomic_set(&brcmf_mmc_suspend, false);
fe2de317
MT
36820diff --git a/drivers/staging/brcm80211/brcmfmac/sdio_host.h b/drivers/staging/brcm80211/brcmfmac/sdio_host.h
36821index d345472..cedb19e 100644
36822--- a/drivers/staging/brcm80211/brcmfmac/sdio_host.h
36823+++ b/drivers/staging/brcm80211/brcmfmac/sdio_host.h
6e9df6a3
MT
36824@@ -263,7 +263,7 @@ struct brcmf_sdioh_driver {
36825 u16 func, uint bustype, u32 regsva, void *param);
36826 /* detach from device */
36827 void (*detach) (void *ch);
36828-};
36829+} __no_const;
36830
36831 struct sdioh_info;
36832
fe2de317
MT
36833diff --git a/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h b/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h
36834index a01b01c..b3f721c 100644
36835--- a/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h
36836+++ b/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h
6e9df6a3 36837@@ -591,7 +591,7 @@ struct phy_func_ptr {
15a11c5b
MT
36838 initfn_t carrsuppr;
36839 rxsigpwrfn_t rxsigpwr;
36840 detachfn_t detach;
36841-};
36842+} __no_const;
6e9df6a3
MT
36843
36844 struct brcms_phy {
36845 struct brcms_phy_pub pubpi_ro;
fe2de317
MT
36846diff --git a/drivers/staging/et131x/et1310_tx.c b/drivers/staging/et131x/et1310_tx.c
36847index 8fb3051..a8b6c67 100644
36848--- a/drivers/staging/et131x/et1310_tx.c
36849+++ b/drivers/staging/et131x/et1310_tx.c
36850@@ -635,11 +635,11 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev,
8308f9c9
MT
36851 struct net_device_stats *stats = &etdev->net_stats;
36852
36853 if (tcb->flags & fMP_DEST_BROAD)
6e9df6a3
MT
36854- atomic_inc(&etdev->stats.brdcstxmt);
36855+ atomic_inc_unchecked(&etdev->stats.brdcstxmt);
8308f9c9 36856 else if (tcb->flags & fMP_DEST_MULTI)
6e9df6a3
MT
36857- atomic_inc(&etdev->stats.multixmt);
36858+ atomic_inc_unchecked(&etdev->stats.multixmt);
8308f9c9 36859 else
6e9df6a3
MT
36860- atomic_inc(&etdev->stats.unixmt);
36861+ atomic_inc_unchecked(&etdev->stats.unixmt);
8308f9c9
MT
36862
36863 if (tcb->skb) {
36864 stats->tx_bytes += tcb->skb->len;
fe2de317
MT
36865diff --git a/drivers/staging/et131x/et131x_adapter.h b/drivers/staging/et131x/et131x_adapter.h
36866index 408c50b..fd65e9f 100644
36867--- a/drivers/staging/et131x/et131x_adapter.h
36868+++ b/drivers/staging/et131x/et131x_adapter.h
6e9df6a3 36869@@ -106,11 +106,11 @@ struct ce_stats {
8308f9c9
MT
36870 * operations
36871 */
36872 u32 unircv; /* # multicast packets received */
36873- atomic_t unixmt; /* # multicast packets for Tx */
36874+ atomic_unchecked_t unixmt; /* # multicast packets for Tx */
36875 u32 multircv; /* # multicast packets received */
36876- atomic_t multixmt; /* # multicast packets for Tx */
36877+ atomic_unchecked_t multixmt; /* # multicast packets for Tx */
36878 u32 brdcstrcv; /* # broadcast packets received */
36879- atomic_t brdcstxmt; /* # broadcast packets for Tx */
36880+ atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
36881 u32 norcvbuf; /* # Rx packets discarded */
36882 u32 noxmtbuf; /* # Tx packets discarded */
36883
fe2de317
MT
36884diff --git a/drivers/staging/hv/channel.c b/drivers/staging/hv/channel.c
36885index 455f47a..86205ff 100644
36886--- a/drivers/staging/hv/channel.c
36887+++ b/drivers/staging/hv/channel.c
36888@@ -447,8 +447,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
8308f9c9 36889 int ret = 0;
15a11c5b 36890 int t;
8308f9c9 36891
66a7e928
MT
36892- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
36893- atomic_inc(&vmbus_connection.next_gpadl_handle);
36894+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
36895+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
8308f9c9
MT
36896
36897 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
36898 if (ret)
fe2de317
MT
36899diff --git a/drivers/staging/hv/hv.c b/drivers/staging/hv/hv.c
36900index 824f816..a800af7 100644
36901--- a/drivers/staging/hv/hv.c
36902+++ b/drivers/staging/hv/hv.c
36903@@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
16454cff
MT
36904 u64 output_address = (output) ? virt_to_phys(output) : 0;
36905 u32 output_address_hi = output_address >> 32;
36906 u32 output_address_lo = output_address & 0xFFFFFFFF;
36907- volatile void *hypercall_page = hv_context.hypercall_page;
36908+ volatile void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
df50ba0c 36909
15a11c5b
MT
36910 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
36911 "=a"(hv_status_lo) : "d" (control_hi),
fe2de317
MT
36912diff --git a/drivers/staging/hv/hv_mouse.c b/drivers/staging/hv/hv_mouse.c
36913index d957fc2..43cedd9 100644
36914--- a/drivers/staging/hv/hv_mouse.c
36915+++ b/drivers/staging/hv/hv_mouse.c
36916@@ -878,8 +878,10 @@ static void reportdesc_callback(struct hv_device *dev, void *packet, u32 len)
15a11c5b
MT
36917 if (hid_dev) {
36918 DPRINT_INFO(INPUTVSC_DRV, "hid_device created");
36919
36920- hid_dev->ll_driver->open = mousevsc_hid_open;
36921- hid_dev->ll_driver->close = mousevsc_hid_close;
36922+ pax_open_kernel();
36923+ *(void **)&hid_dev->ll_driver->open = mousevsc_hid_open;
36924+ *(void **)&hid_dev->ll_driver->close = mousevsc_hid_close;
36925+ pax_close_kernel();
36926
36927 hid_dev->bus = BUS_VIRTUAL;
36928 hid_dev->vendor = input_device_ctx->device_info.vendor;
fe2de317
MT
36929diff --git a/drivers/staging/hv/hyperv_vmbus.h b/drivers/staging/hv/hyperv_vmbus.h
36930index 349ad80..3f75719 100644
36931--- a/drivers/staging/hv/hyperv_vmbus.h
36932+++ b/drivers/staging/hv/hyperv_vmbus.h
15a11c5b
MT
36933@@ -559,7 +559,7 @@ enum vmbus_connect_state {
36934 struct vmbus_connection {
36935 enum vmbus_connect_state conn_state;
36936
36937- atomic_t next_gpadl_handle;
36938+ atomic_unchecked_t next_gpadl_handle;
36939
36940 /*
36941 * Represents channel interrupts. Each bit position represents a
fe2de317
MT
36942diff --git a/drivers/staging/hv/rndis_filter.c b/drivers/staging/hv/rndis_filter.c
36943index dbb5201..d6047c6 100644
36944--- a/drivers/staging/hv/rndis_filter.c
36945+++ b/drivers/staging/hv/rndis_filter.c
15a11c5b 36946@@ -43,7 +43,7 @@ struct rndis_device {
8308f9c9
MT
36947
36948 enum rndis_device_state state;
36949 u32 link_stat;
36950- atomic_t new_req_id;
36951+ atomic_unchecked_t new_req_id;
36952
36953 spinlock_t request_lock;
36954 struct list_head req_list;
fe2de317 36955@@ -117,7 +117,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
8308f9c9
MT
36956 * template
36957 */
36958 set = &rndis_msg->msg.set_req;
36959- set->req_id = atomic_inc_return(&dev->new_req_id);
36960+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
36961
36962 /* Add to the request list */
36963 spin_lock_irqsave(&dev->request_lock, flags);
fe2de317 36964@@ -622,7 +622,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
8308f9c9
MT
36965
36966 /* Setup the rndis set */
36967 halt = &request->request_msg.msg.halt_req;
36968- halt->req_id = atomic_inc_return(&dev->new_req_id);
36969+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
36970
36971 /* Ignore return since this msg is optional. */
36972 rndis_filter_send_request(dev, request);
fe2de317
MT
36973diff --git a/drivers/staging/hv/vmbus_drv.c b/drivers/staging/hv/vmbus_drv.c
36974index 1c949f5..7a8b104 100644
36975--- a/drivers/staging/hv/vmbus_drv.c
36976+++ b/drivers/staging/hv/vmbus_drv.c
36977@@ -660,11 +660,11 @@ int vmbus_child_device_register(struct hv_device *child_device_obj)
66a7e928
MT
36978 {
36979 int ret = 0;
36980
8308f9c9
MT
36981- static atomic_t device_num = ATOMIC_INIT(0);
36982+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
36983
8308f9c9 36984 /* Set the device name. Otherwise, device_register() will fail. */
66a7e928 36985 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
8308f9c9
MT
36986- atomic_inc_return(&device_num));
36987+ atomic_inc_return_unchecked(&device_num));
36988
36989 /* The new device belongs to this bus */
15a11c5b 36990 child_device_obj->device.bus = &hv_bus; /* device->dev.bus; */
fe2de317
MT
36991diff --git a/drivers/staging/iio/ring_generic.h b/drivers/staging/iio/ring_generic.h
36992index 3f26f71..fb5c787 100644
36993--- a/drivers/staging/iio/ring_generic.h
36994+++ b/drivers/staging/iio/ring_generic.h
15a11c5b
MT
36995@@ -62,7 +62,7 @@ struct iio_ring_access_funcs {
36996
36997 int (*is_enabled)(struct iio_ring_buffer *ring);
36998 int (*enable)(struct iio_ring_buffer *ring);
36999-};
37000+} __no_const;
37001
37002 struct iio_ring_setup_ops {
66a7e928 37003 int (*preenable)(struct iio_dev *);
fe2de317
MT
37004diff --git a/drivers/staging/mei/interface.c b/drivers/staging/mei/interface.c
37005index cfec92d..a65dacf 100644
37006--- a/drivers/staging/mei/interface.c
37007+++ b/drivers/staging/mei/interface.c
37008@@ -332,7 +332,7 @@ int mei_send_flow_control(struct mei_device *dev, struct mei_cl *cl)
6e9df6a3
MT
37009 mei_hdr->reserved = 0;
37010
37011 mei_flow_control = (struct hbm_flow_control *) &dev->wr_msg_buf[1];
37012- memset(mei_flow_control, 0, sizeof(mei_flow_control));
37013+ memset(mei_flow_control, 0, sizeof(*mei_flow_control));
37014 mei_flow_control->host_addr = cl->host_client_id;
37015 mei_flow_control->me_addr = cl->me_client_id;
37016 mei_flow_control->cmd.cmd = MEI_FLOW_CONTROL_CMD;
fe2de317 37017@@ -396,7 +396,7 @@ int mei_disconnect(struct mei_device *dev, struct mei_cl *cl)
6e9df6a3
MT
37018
37019 mei_cli_disconnect =
37020 (struct hbm_client_disconnect_request *) &dev->wr_msg_buf[1];
37021- memset(mei_cli_disconnect, 0, sizeof(mei_cli_disconnect));
37022+ memset(mei_cli_disconnect, 0, sizeof(*mei_cli_disconnect));
37023 mei_cli_disconnect->host_addr = cl->host_client_id;
37024 mei_cli_disconnect->me_addr = cl->me_client_id;
37025 mei_cli_disconnect->cmd.cmd = CLIENT_DISCONNECT_REQ_CMD;
fe2de317
MT
37026diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
37027index 8b307b4..a97ac91 100644
37028--- a/drivers/staging/octeon/ethernet-rx.c
37029+++ b/drivers/staging/octeon/ethernet-rx.c
37030@@ -420,11 +420,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
8308f9c9
MT
37031 /* Increment RX stats for virtual ports */
37032 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
37033 #ifdef CONFIG_64BIT
37034- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
37035- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
37036+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
37037+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
37038 #else
37039- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
37040- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
37041+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
37042+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
37043 #endif
37044 }
37045 netif_receive_skb(skb);
fe2de317 37046@@ -436,9 +436,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
8308f9c9
MT
37047 dev->name);
37048 */
37049 #ifdef CONFIG_64BIT
37050- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
37051+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
37052 #else
37053- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
37054+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
37055 #endif
37056 dev_kfree_skb_irq(skb);
37057 }
fe2de317
MT
37058diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
37059index a8f780e..aef1098 100644
37060--- a/drivers/staging/octeon/ethernet.c
37061+++ b/drivers/staging/octeon/ethernet.c
37062@@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
37063 * since the RX tasklet also increments it.
37064 */
37065 #ifdef CONFIG_64BIT
37066- atomic64_add(rx_status.dropped_packets,
37067- (atomic64_t *)&priv->stats.rx_dropped);
37068+ atomic64_add_unchecked(rx_status.dropped_packets,
37069+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
37070 #else
37071- atomic_add(rx_status.dropped_packets,
37072- (atomic_t *)&priv->stats.rx_dropped);
37073+ atomic_add_unchecked(rx_status.dropped_packets,
37074+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
37075 #endif
37076 }
37077
37078diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
37079index f3c6060..56bf826 100644
37080--- a/drivers/staging/pohmelfs/inode.c
37081+++ b/drivers/staging/pohmelfs/inode.c
37082@@ -1861,7 +1861,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
ae4e228f
MT
37083 mutex_init(&psb->mcache_lock);
37084 psb->mcache_root = RB_ROOT;
37085 psb->mcache_timeout = msecs_to_jiffies(5000);
37086- atomic_long_set(&psb->mcache_gen, 0);
37087+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
37088
37089 psb->trans_max_pages = 100;
37090
fe2de317 37091@@ -1876,7 +1876,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
8308f9c9
MT
37092 INIT_LIST_HEAD(&psb->crypto_ready_list);
37093 INIT_LIST_HEAD(&psb->crypto_active_list);
37094
37095- atomic_set(&psb->trans_gen, 1);
37096+ atomic_set_unchecked(&psb->trans_gen, 1);
37097 atomic_long_set(&psb->total_inodes, 0);
37098
37099 mutex_init(&psb->state_lock);
fe2de317
MT
37100diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c
37101index e22665c..a2a9390 100644
37102--- a/drivers/staging/pohmelfs/mcache.c
37103+++ b/drivers/staging/pohmelfs/mcache.c
37104@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start
ae4e228f
MT
37105 m->data = data;
37106 m->start = start;
37107 m->size = size;
37108- m->gen = atomic_long_inc_return(&psb->mcache_gen);
37109+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
37110
37111 mutex_lock(&psb->mcache_lock);
37112 err = pohmelfs_mcache_insert(psb, m);
fe2de317
MT
37113diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
37114index 985b6b7..7699e05 100644
37115--- a/drivers/staging/pohmelfs/netfs.h
37116+++ b/drivers/staging/pohmelfs/netfs.h
8308f9c9 37117@@ -571,14 +571,14 @@ struct pohmelfs_config;
ae4e228f
MT
37118 struct pohmelfs_sb {
37119 struct rb_root mcache_root;
37120 struct mutex mcache_lock;
37121- atomic_long_t mcache_gen;
37122+ atomic_long_unchecked_t mcache_gen;
37123 unsigned long mcache_timeout;
37124
37125 unsigned int idx;
8308f9c9
MT
37126
37127 unsigned int trans_retries;
37128
37129- atomic_t trans_gen;
37130+ atomic_unchecked_t trans_gen;
37131
37132 unsigned int crypto_attached_size;
37133 unsigned int crypto_align_size;
fe2de317
MT
37134diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c
37135index 36a2535..0591bf4 100644
37136--- a/drivers/staging/pohmelfs/trans.c
37137+++ b/drivers/staging/pohmelfs/trans.c
37138@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
8308f9c9
MT
37139 int err;
37140 struct netfs_cmd *cmd = t->iovec.iov_base;
37141
37142- t->gen = atomic_inc_return(&psb->trans_gen);
37143+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
37144
37145 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
37146 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
fe2de317
MT
37147diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
37148index b70cb2b..4db41a7 100644
37149--- a/drivers/staging/rtl8712/rtl871x_io.h
37150+++ b/drivers/staging/rtl8712/rtl871x_io.h
15a11c5b
MT
37151@@ -83,7 +83,7 @@ struct _io_ops {
37152 u8 *pmem);
37153 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
37154 u8 *pmem);
37155-};
37156+} __no_const;
37157
37158 struct io_req {
37159 struct list_head list;
fe2de317
MT
37160diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
37161index c7b5e8b..783d6cb 100644
37162--- a/drivers/staging/sbe-2t3e3/netdev.c
37163+++ b/drivers/staging/sbe-2t3e3/netdev.c
37164@@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
15a11c5b
MT
37165 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
37166
37167 if (rlen)
37168- if (copy_to_user(data, &resp, rlen))
37169+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
37170 return -EFAULT;
66a7e928 37171
66a7e928 37172 return 0;
fe2de317
MT
37173diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
37174index be21617..0954e45 100644
37175--- a/drivers/staging/usbip/usbip_common.h
37176+++ b/drivers/staging/usbip/usbip_common.h
6e9df6a3 37177@@ -289,7 +289,7 @@ struct usbip_device {
15a11c5b
MT
37178 void (*shutdown)(struct usbip_device *);
37179 void (*reset)(struct usbip_device *);
37180 void (*unusable)(struct usbip_device *);
37181- } eh_ops;
37182+ } __no_const eh_ops;
37183 };
37184
6e9df6a3 37185 #if 0
fe2de317
MT
37186diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
37187index 71a586e..4d8a91a 100644
37188--- a/drivers/staging/usbip/vhci.h
37189+++ b/drivers/staging/usbip/vhci.h
6e9df6a3 37190@@ -85,7 +85,7 @@ struct vhci_hcd {
15a11c5b
MT
37191 unsigned resuming:1;
37192 unsigned long re_timeout;
8308f9c9
MT
37193
37194- atomic_t seqnum;
37195+ atomic_unchecked_t seqnum;
37196
37197 /*
37198 * NOTE:
fe2de317
MT
37199diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
37200index 2ee97e2..0420b86 100644
37201--- a/drivers/staging/usbip/vhci_hcd.c
37202+++ b/drivers/staging/usbip/vhci_hcd.c
6e9df6a3 37203@@ -527,7 +527,7 @@ static void vhci_tx_urb(struct urb *urb)
8308f9c9
MT
37204 return;
37205 }
37206
37207- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
37208+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37209 if (priv->seqnum == 0xffff)
15a11c5b 37210 dev_info(&urb->dev->dev, "seqnum max\n");
8308f9c9 37211
fe2de317 37212@@ -779,7 +779,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
8308f9c9
MT
37213 return -ENOMEM;
37214 }
37215
37216- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
37217+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37218 if (unlink->seqnum == 0xffff)
15a11c5b 37219 pr_info("seqnum max\n");
8308f9c9 37220
fe2de317 37221@@ -969,7 +969,7 @@ static int vhci_start(struct usb_hcd *hcd)
8308f9c9
MT
37222 vdev->rhport = rhport;
37223 }
37224
37225- atomic_set(&vhci->seqnum, 0);
37226+ atomic_set_unchecked(&vhci->seqnum, 0);
37227 spin_lock_init(&vhci->lock);
37228
15a11c5b 37229 hcd->power_budget = 0; /* no limit */
fe2de317
MT
37230diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
37231index 3872b8c..fe6d2f4 100644
37232--- a/drivers/staging/usbip/vhci_rx.c
37233+++ b/drivers/staging/usbip/vhci_rx.c
37234@@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
15a11c5b
MT
37235 if (!urb) {
37236 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
37237 pr_info("max seqnum %d\n",
37238- atomic_read(&the_controller->seqnum));
37239+ atomic_read_unchecked(&the_controller->seqnum));
8308f9c9
MT
37240 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
37241 return;
37242 }
fe2de317
MT
37243diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
37244index 7735027..30eed13 100644
37245--- a/drivers/staging/vt6655/hostap.c
37246+++ b/drivers/staging/vt6655/hostap.c
37247@@ -79,14 +79,13 @@ static int msglevel =MSG_LEVEL_INFO;
15a11c5b
MT
37248 *
37249 */
66a7e928 37250
15a11c5b
MT
37251+static net_device_ops_no_const apdev_netdev_ops;
37252+
37253 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
66a7e928 37254 {
15a11c5b
MT
37255 PSDevice apdev_priv;
37256 struct net_device *dev = pDevice->dev;
37257 int ret;
37258- const struct net_device_ops apdev_netdev_ops = {
37259- .ndo_start_xmit = pDevice->tx_80211,
37260- };
66a7e928 37261
15a11c5b 37262 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
66a7e928 37263
fe2de317 37264@@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
15a11c5b
MT
37265 *apdev_priv = *pDevice;
37266 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
37267
37268+ /* only half broken now */
37269+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
37270 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
37271
37272 pDevice->apdev->type = ARPHRD_IEEE80211;
fe2de317
MT
37273diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
37274index 51b5adf..098e320 100644
37275--- a/drivers/staging/vt6656/hostap.c
37276+++ b/drivers/staging/vt6656/hostap.c
37277@@ -80,14 +80,13 @@ static int msglevel =MSG_LEVEL_INFO;
15a11c5b
MT
37278 *
37279 */
66a7e928 37280
15a11c5b
MT
37281+static net_device_ops_no_const apdev_netdev_ops;
37282+
37283 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
66a7e928 37284 {
15a11c5b
MT
37285 PSDevice apdev_priv;
37286 struct net_device *dev = pDevice->dev;
37287 int ret;
37288- const struct net_device_ops apdev_netdev_ops = {
37289- .ndo_start_xmit = pDevice->tx_80211,
37290- };
37291
37292 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
66a7e928 37293
fe2de317 37294@@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
15a11c5b
MT
37295 *apdev_priv = *pDevice;
37296 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
37297
37298+ /* only half broken now */
37299+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
37300 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
37301
37302 pDevice->apdev->type = ARPHRD_IEEE80211;
fe2de317
MT
37303diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
37304index 7843dfd..3db105f 100644
37305--- a/drivers/staging/wlan-ng/hfa384x_usb.c
37306+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
37307@@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
15a11c5b
MT
37308
37309 struct usbctlx_completor {
37310 int (*complete) (struct usbctlx_completor *);
37311-};
37312+} __no_const;
37313
37314 static int
37315 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
fe2de317
MT
37316diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
37317index 1ca66ea..76f1343 100644
37318--- a/drivers/staging/zcache/tmem.c
37319+++ b/drivers/staging/zcache/tmem.c
66a7e928
MT
37320@@ -39,7 +39,7 @@
37321 * A tmem host implementation must use this function to register callbacks
37322 * for memory allocation.
37323 */
37324-static struct tmem_hostops tmem_hostops;
15a11c5b 37325+static tmem_hostops_no_const tmem_hostops;
66a7e928
MT
37326
37327 static void tmem_objnode_tree_init(void);
37328
fe2de317 37329@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
66a7e928
MT
37330 * A tmem host implementation must use this function to register
37331 * callbacks for a page-accessible memory (PAM) implementation
37332 */
37333-static struct tmem_pamops tmem_pamops;
15a11c5b 37334+static tmem_pamops_no_const tmem_pamops;
66a7e928
MT
37335
37336 void tmem_register_pamops(struct tmem_pamops *m)
37337 {
fe2de317
MT
37338diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
37339index ed147c4..94fc3c6 100644
37340--- a/drivers/staging/zcache/tmem.h
37341+++ b/drivers/staging/zcache/tmem.h
6e9df6a3
MT
37342@@ -180,6 +180,7 @@ struct tmem_pamops {
37343 void (*new_obj)(struct tmem_obj *);
37344 int (*replace_in_obj)(void *, struct tmem_obj *);
15a11c5b
MT
37345 };
37346+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
37347 extern void tmem_register_pamops(struct tmem_pamops *m);
66a7e928 37348
15a11c5b 37349 /* memory allocation methods provided by the host implementation */
6e9df6a3 37350@@ -189,6 +190,7 @@ struct tmem_hostops {
15a11c5b
MT
37351 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
37352 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
37353 };
37354+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
37355 extern void tmem_register_hostops(struct tmem_hostops *m);
66a7e928 37356
15a11c5b 37357 /* core tmem accessor functions */
fe2de317
MT
37358diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
37359index 26a5d8b..74434f8 100644
37360--- a/drivers/target/iscsi/iscsi_target.c
37361+++ b/drivers/target/iscsi/iscsi_target.c
37362@@ -1368,7 +1368,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
6e9df6a3
MT
37363 * outstanding_r2ts reaches zero, go ahead and send the delayed
37364 * TASK_ABORTED status.
37365 */
37366- if (atomic_read(&se_cmd->t_transport_aborted) != 0) {
37367+ if (atomic_read_unchecked(&se_cmd->t_transport_aborted) != 0) {
37368 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
37369 if (--cmd->outstanding_r2ts < 1) {
37370 iscsit_stop_dataout_timer(cmd);
fe2de317
MT
37371diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
37372index 8badcb4..94c9ac6 100644
37373--- a/drivers/target/target_core_alua.c
37374+++ b/drivers/target/target_core_alua.c
37375@@ -723,6 +723,8 @@ static int core_alua_update_tpg_primary_metadata(
66a7e928
MT
37376 char path[ALUA_METADATA_PATH_LEN];
37377 int len;
37378
37379+ pax_track_stack();
37380+
37381 memset(path, 0, ALUA_METADATA_PATH_LEN);
37382
37383 len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
fe2de317 37384@@ -986,6 +988,8 @@ static int core_alua_update_tpg_secondary_metadata(
66a7e928
MT
37385 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
37386 int len;
37387
37388+ pax_track_stack();
37389+
37390 memset(path, 0, ALUA_METADATA_PATH_LEN);
37391 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
37392
fe2de317
MT
37393diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
37394index f04d4ef..7de212b 100644
37395--- a/drivers/target/target_core_cdb.c
37396+++ b/drivers/target/target_core_cdb.c
37397@@ -933,6 +933,8 @@ target_emulate_modesense(struct se_cmd *cmd, int ten)
66a7e928
MT
37398 int length = 0;
37399 unsigned char buf[SE_MODE_PAGE_BUF];
37400
37401+ pax_track_stack();
37402+
37403 memset(buf, 0, SE_MODE_PAGE_BUF);
37404
37405 switch (cdb[2] & 0x3f) {
fe2de317
MT
37406diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
37407index b2575d8..b6b28fd 100644
37408--- a/drivers/target/target_core_configfs.c
37409+++ b/drivers/target/target_core_configfs.c
37410@@ -1267,6 +1267,8 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
66a7e928
MT
37411 ssize_t len = 0;
37412 int reg_count = 0, prf_isid;
37413
37414+ pax_track_stack();
37415+
6e9df6a3 37416 if (!su_dev->se_dev_ptr)
66a7e928
MT
37417 return -ENODEV;
37418
fe2de317
MT
37419diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
37420index 7fd3a16..bc2fb3e 100644
37421--- a/drivers/target/target_core_pr.c
37422+++ b/drivers/target/target_core_pr.c
37423@@ -918,6 +918,8 @@ static int __core_scsi3_check_aptpl_registration(
66a7e928
MT
37424 unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
37425 u16 tpgt;
37426
37427+ pax_track_stack();
37428+
37429 memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
37430 memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
37431 /*
fe2de317 37432@@ -1867,6 +1869,8 @@ static int __core_scsi3_update_aptpl_buf(
66a7e928
MT
37433 ssize_t len = 0;
37434 int reg_count = 0;
37435
37436+ pax_track_stack();
37437+
37438 memset(buf, 0, pr_aptpl_buf_len);
37439 /*
37440 * Called to clear metadata once APTPL has been deactivated.
fe2de317 37441@@ -1989,6 +1993,8 @@ static int __core_scsi3_write_aptpl_to_file(
66a7e928
MT
37442 char path[512];
37443 int ret;
37444
37445+ pax_track_stack();
37446+
37447 memset(iov, 0, sizeof(struct iovec));
37448 memset(path, 0, 512);
37449
fe2de317
MT
37450diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
37451index 5c1b8c5..0cb7d0e 100644
37452--- a/drivers/target/target_core_tmr.c
37453+++ b/drivers/target/target_core_tmr.c
6e9df6a3
MT
37454@@ -255,7 +255,7 @@ static void core_tmr_drain_task_list(
37455 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
37456 cmd->t_task_list_num,
37457 atomic_read(&cmd->t_task_cdbs_left),
37458- atomic_read(&cmd->t_task_cdbs_sent),
37459+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
37460 atomic_read(&cmd->t_transport_active),
37461 atomic_read(&cmd->t_transport_stop),
37462 atomic_read(&cmd->t_transport_sent));
37463@@ -291,7 +291,7 @@ static void core_tmr_drain_task_list(
37464 pr_debug("LUN_RESET: got t_transport_active = 1 for"
8308f9c9
MT
37465 " task: %p, t_fe_count: %d dev: %p\n", task,
37466 fe_count, dev);
6e9df6a3
MT
37467- atomic_set(&cmd->t_transport_aborted, 1);
37468+ atomic_set_unchecked(&cmd->t_transport_aborted, 1);
37469 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
37470
8308f9c9 37471 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
6e9df6a3 37472@@ -299,7 +299,7 @@ static void core_tmr_drain_task_list(
8308f9c9 37473 }
6e9df6a3 37474 pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
8308f9c9 37475 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
6e9df6a3
MT
37476- atomic_set(&cmd->t_transport_aborted, 1);
37477+ atomic_set_unchecked(&cmd->t_transport_aborted, 1);
37478 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
8308f9c9 37479
6e9df6a3 37480 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
fe2de317
MT
37481diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
37482index 013c100..8fd2e57 100644
37483--- a/drivers/target/target_core_transport.c
37484+++ b/drivers/target/target_core_transport.c
37485@@ -1445,7 +1445,7 @@ struct se_device *transport_add_device_to_core_hba(
8308f9c9
MT
37486
37487 dev->queue_depth = dev_limits->queue_depth;
37488 atomic_set(&dev->depth_left, dev->queue_depth);
37489- atomic_set(&dev->dev_ordered_id, 0);
37490+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
37491
37492 se_dev_set_default_attribs(dev, dev_limits);
37493
fe2de317 37494@@ -1633,7 +1633,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
8308f9c9
MT
37495 * Used to determine when ORDERED commands should go from
37496 * Dormant to Active status.
37497 */
6e9df6a3
MT
37498- cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
37499+ cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
8308f9c9 37500 smp_mb__after_atomic_inc();
6e9df6a3 37501 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
8308f9c9 37502 cmd->se_ordered_id, cmd->sam_task_attr,
fe2de317 37503@@ -1960,7 +1960,7 @@ static void transport_generic_request_failure(
8308f9c9 37504 " t_transport_active: %d t_transport_stop: %d"
6e9df6a3
MT
37505 " t_transport_sent: %d\n", cmd->t_task_list_num,
37506 atomic_read(&cmd->t_task_cdbs_left),
37507- atomic_read(&cmd->t_task_cdbs_sent),
37508+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
37509 atomic_read(&cmd->t_task_cdbs_ex_left),
37510 atomic_read(&cmd->t_transport_active),
37511 atomic_read(&cmd->t_transport_stop),
37512@@ -2460,9 +2460,9 @@ check_depth:
37513 spin_lock_irqsave(&cmd->t_state_lock, flags);
8308f9c9
MT
37514 atomic_set(&task->task_active, 1);
37515 atomic_set(&task->task_sent, 1);
6e9df6a3
MT
37516- atomic_inc(&cmd->t_task_cdbs_sent);
37517+ atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
8308f9c9 37518
6e9df6a3
MT
37519- if (atomic_read(&cmd->t_task_cdbs_sent) ==
37520+ if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
37521 cmd->t_task_list_num)
8308f9c9
MT
37522 atomic_set(&cmd->transport_sent, 1);
37523
fe2de317 37524@@ -4665,7 +4665,7 @@ static void transport_generic_wait_for_tasks(
6e9df6a3 37525 atomic_set(&cmd->transport_lun_stop, 0);
8308f9c9 37526 }
6e9df6a3
MT
37527 if (!atomic_read(&cmd->t_transport_active) ||
37528- atomic_read(&cmd->t_transport_aborted))
37529+ atomic_read_unchecked(&cmd->t_transport_aborted))
8308f9c9
MT
37530 goto remove;
37531
6e9df6a3 37532 atomic_set(&cmd->t_transport_stop, 1);
fe2de317 37533@@ -4900,7 +4900,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
8308f9c9
MT
37534 {
37535 int ret = 0;
37536
6e9df6a3
MT
37537- if (atomic_read(&cmd->t_transport_aborted) != 0) {
37538+ if (atomic_read_unchecked(&cmd->t_transport_aborted) != 0) {
37539 if (!send_status ||
8308f9c9
MT
37540 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
37541 return 1;
fe2de317 37542@@ -4937,7 +4937,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
8308f9c9
MT
37543 */
37544 if (cmd->data_direction == DMA_TO_DEVICE) {
6e9df6a3
MT
37545 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
37546- atomic_inc(&cmd->t_transport_aborted);
37547+ atomic_inc_unchecked(&cmd->t_transport_aborted);
8308f9c9
MT
37548 smp_mb__after_atomic_inc();
37549 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
37550 transport_new_cmd_failure(cmd);
fe2de317 37551@@ -5051,7 +5051,7 @@ static void transport_processing_shutdown(struct se_device *dev)
6e9df6a3
MT
37552 cmd->se_tfo->get_task_tag(cmd),
37553 cmd->t_task_list_num,
37554 atomic_read(&cmd->t_task_cdbs_left),
37555- atomic_read(&cmd->t_task_cdbs_sent),
37556+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
37557 atomic_read(&cmd->t_transport_active),
37558 atomic_read(&cmd->t_transport_stop),
37559 atomic_read(&cmd->t_transport_sent));
fe2de317
MT
37560diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
37561index d5f923b..9c78228 100644
37562--- a/drivers/telephony/ixj.c
37563+++ b/drivers/telephony/ixj.c
66a7e928
MT
37564@@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
37565 bool mContinue;
37566 char *pIn, *pOut;
37567
37568+ pax_track_stack();
37569+
37570 if (!SCI_Prepare(j))
37571 return 0;
37572
fe2de317
MT
37573diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
37574index 4c8b665..1d931eb 100644
37575--- a/drivers/tty/hvc/hvcs.c
37576+++ b/drivers/tty/hvc/hvcs.c
16454cff
MT
37577@@ -83,6 +83,7 @@
37578 #include <asm/hvcserver.h>
37579 #include <asm/uaccess.h>
37580 #include <asm/vio.h>
37581+#include <asm/local.h>
37582
37583 /*
37584 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
37585@@ -270,7 +271,7 @@ struct hvcs_struct {
37586 unsigned int index;
37587
37588 struct tty_struct *tty;
37589- int open_count;
37590+ local_t open_count;
37591
37592 /*
37593 * Used to tell the driver kernel_thread what operations need to take
fe2de317 37594@@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
16454cff
MT
37595
37596 spin_lock_irqsave(&hvcsd->lock, flags);
37597
37598- if (hvcsd->open_count > 0) {
37599+ if (local_read(&hvcsd->open_count) > 0) {
37600 spin_unlock_irqrestore(&hvcsd->lock, flags);
37601 printk(KERN_INFO "HVCS: vterm state unchanged. "
37602 "The hvcs device node is still in use.\n");
fe2de317 37603@@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
16454cff
MT
37604 if ((retval = hvcs_partner_connect(hvcsd)))
37605 goto error_release;
37606
37607- hvcsd->open_count = 1;
37608+ local_set(&hvcsd->open_count, 1);
37609 hvcsd->tty = tty;
37610 tty->driver_data = hvcsd;
37611
66a7e928 37612@@ -1179,7 +1180,7 @@ fast_open:
16454cff
MT
37613
37614 spin_lock_irqsave(&hvcsd->lock, flags);
37615 kref_get(&hvcsd->kref);
37616- hvcsd->open_count++;
37617+ local_inc(&hvcsd->open_count);
37618 hvcsd->todo_mask |= HVCS_SCHED_READ;
37619 spin_unlock_irqrestore(&hvcsd->lock, flags);
37620
fe2de317 37621@@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
16454cff
MT
37622 hvcsd = tty->driver_data;
37623
37624 spin_lock_irqsave(&hvcsd->lock, flags);
37625- if (--hvcsd->open_count == 0) {
37626+ if (local_dec_and_test(&hvcsd->open_count)) {
37627
37628 vio_disable_interrupts(hvcsd->vdev);
37629
fe2de317 37630@@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
16454cff
MT
37631 free_irq(irq, hvcsd);
37632 kref_put(&hvcsd->kref, destroy_hvcs_struct);
37633 return;
37634- } else if (hvcsd->open_count < 0) {
37635+ } else if (local_read(&hvcsd->open_count) < 0) {
37636 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
37637 " is missmanaged.\n",
37638- hvcsd->vdev->unit_address, hvcsd->open_count);
37639+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
37640 }
37641
37642 spin_unlock_irqrestore(&hvcsd->lock, flags);
fe2de317 37643@@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struct * tty)
16454cff
MT
37644
37645 spin_lock_irqsave(&hvcsd->lock, flags);
37646 /* Preserve this so that we know how many kref refs to put */
37647- temp_open_count = hvcsd->open_count;
37648+ temp_open_count = local_read(&hvcsd->open_count);
37649
37650 /*
37651 * Don't kref put inside the spinlock because the destruction
fe2de317 37652@@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struct * tty)
16454cff
MT
37653 hvcsd->tty->driver_data = NULL;
37654 hvcsd->tty = NULL;
37655
37656- hvcsd->open_count = 0;
37657+ local_set(&hvcsd->open_count, 0);
37658
37659 /* This will drop any buffered data on the floor which is OK in a hangup
37660 * scenario. */
fe2de317 37661@@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct *tty,
16454cff
MT
37662 * the middle of a write operation? This is a crummy place to do this
37663 * but we want to keep it all in the spinlock.
37664 */
37665- if (hvcsd->open_count <= 0) {
37666+ if (local_read(&hvcsd->open_count) <= 0) {
37667 spin_unlock_irqrestore(&hvcsd->lock, flags);
37668 return -ENODEV;
37669 }
fe2de317 37670@@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_struct *tty)
16454cff
MT
37671 {
37672 struct hvcs_struct *hvcsd = tty->driver_data;
37673
37674- if (!hvcsd || hvcsd->open_count <= 0)
37675+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
37676 return 0;
37677
37678 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
fe2de317
MT
37679diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
37680index ef92869..f4ebd88 100644
37681--- a/drivers/tty/ipwireless/tty.c
37682+++ b/drivers/tty/ipwireless/tty.c
66a7e928
MT
37683@@ -29,6 +29,7 @@
37684 #include <linux/tty_driver.h>
37685 #include <linux/tty_flip.h>
37686 #include <linux/uaccess.h>
37687+#include <asm/local.h>
37688
37689 #include "tty.h"
37690 #include "network.h"
37691@@ -51,7 +52,7 @@ struct ipw_tty {
37692 int tty_type;
37693 struct ipw_network *network;
37694 struct tty_struct *linux_tty;
37695- int open_count;
37696+ local_t open_count;
37697 unsigned int control_lines;
37698 struct mutex ipw_tty_mutex;
37699 int tx_bytes_queued;
fe2de317 37700@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
66a7e928
MT
37701 mutex_unlock(&tty->ipw_tty_mutex);
37702 return -ENODEV;
37703 }
37704- if (tty->open_count == 0)
37705+ if (local_read(&tty->open_count) == 0)
37706 tty->tx_bytes_queued = 0;
37707
37708- tty->open_count++;
37709+ local_inc(&tty->open_count);
37710
37711 tty->linux_tty = linux_tty;
37712 linux_tty->driver_data = tty;
fe2de317 37713@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
66a7e928
MT
37714
37715 static void do_ipw_close(struct ipw_tty *tty)
37716 {
37717- tty->open_count--;
37718-
37719- if (tty->open_count == 0) {
37720+ if (local_dec_return(&tty->open_count) == 0) {
37721 struct tty_struct *linux_tty = tty->linux_tty;
37722
37723 if (linux_tty != NULL) {
fe2de317 37724@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
66a7e928
MT
37725 return;
37726
37727 mutex_lock(&tty->ipw_tty_mutex);
37728- if (tty->open_count == 0) {
37729+ if (local_read(&tty->open_count) == 0) {
37730 mutex_unlock(&tty->ipw_tty_mutex);
37731 return;
37732 }
fe2de317 37733@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
66a7e928
MT
37734 return;
37735 }
37736
37737- if (!tty->open_count) {
37738+ if (!local_read(&tty->open_count)) {
37739 mutex_unlock(&tty->ipw_tty_mutex);
37740 return;
37741 }
fe2de317 37742@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
66a7e928
MT
37743 return -ENODEV;
37744
37745 mutex_lock(&tty->ipw_tty_mutex);
37746- if (!tty->open_count) {
37747+ if (!local_read(&tty->open_count)) {
37748 mutex_unlock(&tty->ipw_tty_mutex);
37749 return -EINVAL;
37750 }
fe2de317 37751@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
66a7e928
MT
37752 if (!tty)
37753 return -ENODEV;
37754
37755- if (!tty->open_count)
37756+ if (!local_read(&tty->open_count))
37757 return -EINVAL;
37758
37759 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
fe2de317 37760@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
66a7e928
MT
37761 if (!tty)
37762 return 0;
37763
37764- if (!tty->open_count)
37765+ if (!local_read(&tty->open_count))
37766 return 0;
37767
37768 return tty->tx_bytes_queued;
fe2de317 37769@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
66a7e928
MT
37770 if (!tty)
37771 return -ENODEV;
37772
37773- if (!tty->open_count)
37774+ if (!local_read(&tty->open_count))
37775 return -EINVAL;
37776
37777 return get_control_lines(tty);
fe2de317 37778@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
66a7e928
MT
37779 if (!tty)
37780 return -ENODEV;
37781
37782- if (!tty->open_count)
37783+ if (!local_read(&tty->open_count))
37784 return -EINVAL;
37785
37786 return set_control_lines(tty, set, clear);
fe2de317 37787@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
66a7e928
MT
37788 if (!tty)
37789 return -ENODEV;
37790
37791- if (!tty->open_count)
37792+ if (!local_read(&tty->open_count))
37793 return -EINVAL;
37794
37795 /* FIXME: Exactly how is the tty object locked here .. */
fe2de317 37796@@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
66a7e928
MT
37797 against a parallel ioctl etc */
37798 mutex_lock(&ttyj->ipw_tty_mutex);
37799 }
37800- while (ttyj->open_count)
37801+ while (local_read(&ttyj->open_count))
37802 do_ipw_close(ttyj);
37803 ipwireless_disassociate_network_ttys(network,
37804 ttyj->channel_idx);
fe2de317
MT
37805diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
37806index 8a50e4e..7d9ca3d 100644
37807--- a/drivers/tty/n_gsm.c
37808+++ b/drivers/tty/n_gsm.c
37809@@ -1625,7 +1625,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
6e9df6a3
MT
37810 kref_init(&dlci->ref);
37811 mutex_init(&dlci->mutex);
bc901d79
MT
37812 dlci->fifo = &dlci->_fifo;
37813- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
37814+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
37815 kfree(dlci);
37816 return NULL;
37817 }
fe2de317
MT
37818diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
37819index 39d6ab6..eb97f41 100644
37820--- a/drivers/tty/n_tty.c
37821+++ b/drivers/tty/n_tty.c
37822@@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
bc901d79
MT
37823 {
37824 *ops = tty_ldisc_N_TTY;
37825 ops->owner = NULL;
37826- ops->refcount = ops->flags = 0;
37827+ atomic_set(&ops->refcount, 0);
37828+ ops->flags = 0;
37829 }
37830 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
fe2de317
MT
37831diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
37832index e18604b..a7d5a11 100644
37833--- a/drivers/tty/pty.c
37834+++ b/drivers/tty/pty.c
6e9df6a3 37835@@ -773,8 +773,10 @@ static void __init unix98_pty_init(void)
bc901d79
MT
37836 register_sysctl_table(pty_root_table);
37837
15a11c5b
MT
37838 /* Now create the /dev/ptmx special device */
37839+ pax_open_kernel();
37840 tty_default_fops(&ptmx_fops);
bc901d79 37841- ptmx_fops.open = ptmx_open;
15a11c5b
MT
37842+ *(void **)&ptmx_fops.open = ptmx_open;
37843+ pax_close_kernel();
37844
bc901d79
MT
37845 cdev_init(&ptmx_cdev, &ptmx_fops);
37846 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
fe2de317
MT
37847diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
37848index 6a1241c..d04ab0d 100644
37849--- a/drivers/tty/rocket.c
37850+++ b/drivers/tty/rocket.c
37851@@ -1277,6 +1277,8 @@ static int get_ports(struct r_port *info, struct rocket_ports __user *retports)
66a7e928
MT
37852 struct rocket_ports tmp;
37853 int board;
37854
37855+ pax_track_stack();
37856+
37857 if (!retports)
37858 return -EFAULT;
37859 memset(&tmp, 0, sizeof (tmp));
fe2de317
MT
37860diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
37861index 87e7e6c..89744e0 100644
37862--- a/drivers/tty/serial/kgdboc.c
37863+++ b/drivers/tty/serial/kgdboc.c
15a11c5b
MT
37864@@ -23,8 +23,9 @@
37865 #define MAX_CONFIG_LEN 40
66a7e928 37866
15a11c5b
MT
37867 static struct kgdb_io kgdboc_io_ops;
37868+static struct kgdb_io kgdboc_io_ops_console;
66a7e928 37869
15a11c5b
MT
37870-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
37871+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
37872 static int configured = -1;
66a7e928 37873
15a11c5b
MT
37874 static char config[MAX_CONFIG_LEN];
37875@@ -147,6 +148,8 @@ static void cleanup_kgdboc(void)
37876 kgdboc_unregister_kbd();
37877 if (configured == 1)
37878 kgdb_unregister_io_module(&kgdboc_io_ops);
37879+ else if (configured == 2)
37880+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
66a7e928
MT
37881 }
37882
15a11c5b
MT
37883 static int configure_kgdboc(void)
37884@@ -156,13 +159,13 @@ static int configure_kgdboc(void)
37885 int err;
37886 char *cptr = config;
37887 struct console *cons;
37888+ int is_console = 0;
37889
37890 err = kgdboc_option_setup(config);
37891 if (err || !strlen(config) || isspace(config[0]))
37892 goto noconfig;
37893
37894 err = -ENODEV;
37895- kgdboc_io_ops.is_console = 0;
37896 kgdb_tty_driver = NULL;
37897
37898 kgdboc_use_kms = 0;
37899@@ -183,7 +186,7 @@ static int configure_kgdboc(void)
37900 int idx;
37901 if (cons->device && cons->device(cons, &idx) == p &&
37902 idx == tty_line) {
37903- kgdboc_io_ops.is_console = 1;
37904+ is_console = 1;
37905 break;
37906 }
37907 cons = cons->next;
37908@@ -193,12 +196,16 @@ static int configure_kgdboc(void)
37909 kgdb_tty_line = tty_line;
37910
37911 do_register:
37912- err = kgdb_register_io_module(&kgdboc_io_ops);
37913+ if (is_console) {
37914+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
37915+ configured = 2;
37916+ } else {
37917+ err = kgdb_register_io_module(&kgdboc_io_ops);
37918+ configured = 1;
37919+ }
37920 if (err)
37921 goto noconfig;
66a7e928 37922
15a11c5b
MT
37923- configured = 1;
37924-
66a7e928 37925 return 0;
66a7e928 37926
15a11c5b
MT
37927 noconfig:
37928@@ -212,7 +219,7 @@ noconfig:
37929 static int __init init_kgdboc(void)
37930 {
37931 /* Already configured? */
37932- if (configured == 1)
37933+ if (configured >= 1)
37934 return 0;
16454cff 37935
15a11c5b 37936 return configure_kgdboc();
fe2de317 37937@@ -261,7 +268,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
15a11c5b
MT
37938 if (config[len - 1] == '\n')
37939 config[len - 1] = '\0';
66a7e928 37940
15a11c5b
MT
37941- if (configured == 1)
37942+ if (configured >= 1)
37943 cleanup_kgdboc();
66a7e928 37944
15a11c5b
MT
37945 /* Go and configure with the new params. */
37946@@ -301,6 +308,15 @@ static struct kgdb_io kgdboc_io_ops = {
37947 .post_exception = kgdboc_post_exp_handler,
66a7e928 37948 };
66a7e928 37949
15a11c5b
MT
37950+static struct kgdb_io kgdboc_io_ops_console = {
37951+ .name = "kgdboc",
37952+ .read_char = kgdboc_get_char,
37953+ .write_char = kgdboc_put_char,
37954+ .pre_exception = kgdboc_pre_exp_handler,
37955+ .post_exception = kgdboc_post_exp_handler,
37956+ .is_console = 1
37957+};
37958+
37959 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
37960 /* This is only available if kgdboc is a built in for early debugging */
37961 static int __init kgdboc_early_init(char *opt)
fe2de317
MT
37962diff --git a/drivers/tty/serial/mfd.c b/drivers/tty/serial/mfd.c
37963index cab52f4..29fc6aa 100644
37964--- a/drivers/tty/serial/mfd.c
37965+++ b/drivers/tty/serial/mfd.c
37966@@ -1423,7 +1423,7 @@ static void serial_hsu_remove(struct pci_dev *pdev)
6e9df6a3
MT
37967 }
37968
37969 /* First 3 are UART ports, and the 4th is the DMA */
37970-static const struct pci_device_id pci_ids[] __devinitdata = {
37971+static const struct pci_device_id pci_ids[] __devinitconst = {
37972 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081B) },
37973 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081C) },
37974 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081D) },
fe2de317
MT
37975diff --git a/drivers/tty/serial/mrst_max3110.c b/drivers/tty/serial/mrst_max3110.c
37976index 23bc743..d425c07 100644
37977--- a/drivers/tty/serial/mrst_max3110.c
37978+++ b/drivers/tty/serial/mrst_max3110.c
37979@@ -393,6 +393,8 @@ static void max3110_con_receive(struct uart_max3110 *max)
66a7e928
MT
37980 int loop = 1, num, total = 0;
37981 u8 recv_buf[512], *pbuf;
37982
37983+ pax_track_stack();
37984+
37985 pbuf = recv_buf;
37986 do {
37987 num = max3110_read_multi(max, pbuf);
fe2de317
MT
37988diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
37989index 1a890e2..1d8139c 100644
37990--- a/drivers/tty/tty_io.c
37991+++ b/drivers/tty/tty_io.c
6e9df6a3 37992@@ -3238,7 +3238,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
66a7e928 37993
15a11c5b 37994 void tty_default_fops(struct file_operations *fops)
bc901d79 37995 {
15a11c5b
MT
37996- *fops = tty_fops;
37997+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
bc901d79 37998 }
bc901d79 37999
bc901d79 38000 /*
fe2de317
MT
38001diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
38002index a76c808..ecbc743 100644
38003--- a/drivers/tty/tty_ldisc.c
38004+++ b/drivers/tty/tty_ldisc.c
38005@@ -75,7 +75,7 @@ static void put_ldisc(struct tty_ldisc *ld)
bc901d79
MT
38006 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
38007 struct tty_ldisc_ops *ldo = ld->ops;
38008
38009- ldo->refcount--;
38010+ atomic_dec(&ldo->refcount);
38011 module_put(ldo->owner);
38012 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
38013
fe2de317 38014@@ -110,7 +110,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
bc901d79
MT
38015 spin_lock_irqsave(&tty_ldisc_lock, flags);
38016 tty_ldiscs[disc] = new_ldisc;
38017 new_ldisc->num = disc;
38018- new_ldisc->refcount = 0;
38019+ atomic_set(&new_ldisc->refcount, 0);
38020 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
38021
38022 return ret;
883a9837 38023@@ -138,7 +138,7 @@ int tty_unregister_ldisc(int disc)
bc901d79
MT
38024 return -EINVAL;
38025
38026 spin_lock_irqsave(&tty_ldisc_lock, flags);
38027- if (tty_ldiscs[disc]->refcount)
38028+ if (atomic_read(&tty_ldiscs[disc]->refcount))
38029 ret = -EBUSY;
38030 else
38031 tty_ldiscs[disc] = NULL;
fe2de317 38032@@ -159,7 +159,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
bc901d79
MT
38033 if (ldops) {
38034 ret = ERR_PTR(-EAGAIN);
38035 if (try_module_get(ldops->owner)) {
38036- ldops->refcount++;
38037+ atomic_inc(&ldops->refcount);
38038 ret = ldops;
38039 }
38040 }
fe2de317 38041@@ -172,7 +172,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
bc901d79
MT
38042 unsigned long flags;
38043
38044 spin_lock_irqsave(&tty_ldisc_lock, flags);
38045- ldops->refcount--;
38046+ atomic_dec(&ldops->refcount);
38047 module_put(ldops->owner);
38048 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
38049 }
fe2de317
MT
38050diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
38051index 3761ccf..2c613b3 100644
38052--- a/drivers/tty/vt/keyboard.c
38053+++ b/drivers/tty/vt/keyboard.c
38054@@ -656,6 +656,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
66a7e928 38055 kbd->kbdmode == VC_OFF) &&
bc901d79
MT
38056 value != KVAL(K_SAK))
38057 return; /* SAK is allowed even in raw mode */
38058+
38059+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
38060+ {
38061+ void *func = fn_handler[value];
38062+ if (func == fn_show_state || func == fn_show_ptregs ||
38063+ func == fn_show_mem)
38064+ return;
38065+ }
38066+#endif
38067+
38068 fn_handler[value](vc);
38069 }
38070
fe2de317
MT
38071diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
38072index b3915b7..e716839 100644
38073--- a/drivers/tty/vt/vt.c
38074+++ b/drivers/tty/vt/vt.c
38075@@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier);
16454cff
MT
38076
38077 static void notify_write(struct vc_data *vc, unsigned int unicode)
38078 {
38079- struct vt_notifier_param param = { .vc = vc, unicode = unicode };
38080+ struct vt_notifier_param param = { .vc = vc, .c = unicode };
38081 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
38082 }
38083
fe2de317
MT
38084diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
38085index 5e096f4..0da1363 100644
38086--- a/drivers/tty/vt/vt_ioctl.c
38087+++ b/drivers/tty/vt/vt_ioctl.c
38088@@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
bc901d79
MT
38089 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
38090 return -EFAULT;
38091
38092- if (!capable(CAP_SYS_TTY_CONFIG))
38093- perm = 0;
38094-
38095 switch (cmd) {
38096 case KDGKBENT:
38097 key_map = key_maps[s];
fe2de317 38098@@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
bc901d79
MT
38099 val = (i ? K_HOLE : K_NOSUCHMAP);
38100 return put_user(val, &user_kbe->kb_value);
38101 case KDSKBENT:
38102+ if (!capable(CAP_SYS_TTY_CONFIG))
38103+ perm = 0;
38104+
38105 if (!perm)
38106 return -EPERM;
38107 if (!i && v == K_NOSUCHMAP) {
fe2de317 38108@@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
bc901d79
MT
38109 int i, j, k;
38110 int ret;
38111
38112- if (!capable(CAP_SYS_TTY_CONFIG))
38113- perm = 0;
38114-
38115 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
38116 if (!kbs) {
38117 ret = -ENOMEM;
fe2de317 38118@@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
bc901d79
MT
38119 kfree(kbs);
38120 return ((p && *p) ? -EOVERFLOW : 0);
38121 case KDSKBSENT:
38122+ if (!capable(CAP_SYS_TTY_CONFIG))
38123+ perm = 0;
38124+
38125 if (!perm) {
38126 ret = -EPERM;
38127 goto reterr;
fe2de317
MT
38128diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
38129index d2efe82..9440ab6 100644
38130--- a/drivers/uio/uio.c
38131+++ b/drivers/uio/uio.c
c52201e0
MT
38132@@ -25,6 +25,7 @@
38133 #include <linux/kobject.h>
38134 #include <linux/cdev.h>
38135 #include <linux/uio_driver.h>
38136+#include <asm/local.h>
38137
38138 #define UIO_MAX_DEVICES (1U << MINORBITS)
38139
8308f9c9
MT
38140@@ -32,10 +33,10 @@ struct uio_device {
38141 struct module *owner;
38142 struct device *dev;
38143 int minor;
38144- atomic_t event;
38145+ atomic_unchecked_t event;
c52201e0
MT
38146 struct fasync_struct *async_queue;
38147 wait_queue_head_t wait;
38148- int vma_count;
38149+ local_t vma_count;
38150 struct uio_info *info;
38151 struct kobject *map_dir;
38152 struct kobject *portio_dir;
fe2de317 38153@@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
8308f9c9
MT
38154 struct device_attribute *attr, char *buf)
38155 {
38156 struct uio_device *idev = dev_get_drvdata(dev);
38157- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
38158+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
38159 }
38160
38161 static struct device_attribute uio_class_attributes[] = {
fe2de317 38162@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
8308f9c9
MT
38163 {
38164 struct uio_device *idev = info->uio_dev;
38165
38166- atomic_inc(&idev->event);
38167+ atomic_inc_unchecked(&idev->event);
38168 wake_up_interruptible(&idev->wait);
38169 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
38170 }
fe2de317 38171@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
8308f9c9
MT
38172 }
38173
38174 listener->dev = idev;
38175- listener->event_count = atomic_read(&idev->event);
38176+ listener->event_count = atomic_read_unchecked(&idev->event);
38177 filep->private_data = listener;
38178
38179 if (idev->info->open) {
fe2de317 38180@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
8308f9c9
MT
38181 return -EIO;
38182
38183 poll_wait(filep, &idev->wait, wait);
38184- if (listener->event_count != atomic_read(&idev->event))
38185+ if (listener->event_count != atomic_read_unchecked(&idev->event))
38186 return POLLIN | POLLRDNORM;
38187 return 0;
38188 }
fe2de317 38189@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
8308f9c9
MT
38190 do {
38191 set_current_state(TASK_INTERRUPTIBLE);
38192
38193- event_count = atomic_read(&idev->event);
38194+ event_count = atomic_read_unchecked(&idev->event);
38195 if (event_count != listener->event_count) {
38196 if (copy_to_user(buf, &event_count, count))
38197 retval = -EFAULT;
fe2de317 38198@@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
c52201e0
MT
38199 static void uio_vma_open(struct vm_area_struct *vma)
38200 {
38201 struct uio_device *idev = vma->vm_private_data;
38202- idev->vma_count++;
38203+ local_inc(&idev->vma_count);
38204 }
38205
38206 static void uio_vma_close(struct vm_area_struct *vma)
38207 {
38208 struct uio_device *idev = vma->vm_private_data;
38209- idev->vma_count--;
38210+ local_dec(&idev->vma_count);
38211 }
38212
38213 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
fe2de317 38214@@ -823,7 +824,7 @@ int __uio_register_device(struct module *owner,
8308f9c9
MT
38215 idev->owner = owner;
38216 idev->info = info;
38217 init_waitqueue_head(&idev->wait);
38218- atomic_set(&idev->event, 0);
38219+ atomic_set_unchecked(&idev->event, 0);
38220
38221 ret = uio_get_minor(idev);
38222 if (ret)
fe2de317
MT
38223diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
38224index a845f8b..4f54072 100644
38225--- a/drivers/usb/atm/cxacru.c
38226+++ b/drivers/usb/atm/cxacru.c
38227@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
6892158b
MT
38228 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
38229 if (ret < 2)
38230 return -EINVAL;
38231- if (index < 0 || index > 0x7f)
38232+ if (index > 0x7f)
38233 return -EINVAL;
38234 pos += tmp;
38235
fe2de317
MT
38236diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
38237index d3448ca..d2864ca 100644
38238--- a/drivers/usb/atm/usbatm.c
38239+++ b/drivers/usb/atm/usbatm.c
38240@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
58c5fc13
MT
38241 if (printk_ratelimit())
38242 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
38243 __func__, vpi, vci);
38244- atomic_inc(&vcc->stats->rx_err);
38245+ atomic_inc_unchecked(&vcc->stats->rx_err);
38246 return;
38247 }
38248
fe2de317 38249@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
58c5fc13
MT
38250 if (length > ATM_MAX_AAL5_PDU) {
38251 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
38252 __func__, length, vcc);
38253- atomic_inc(&vcc->stats->rx_err);
38254+ atomic_inc_unchecked(&vcc->stats->rx_err);
38255 goto out;
38256 }
38257
fe2de317 38258@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
58c5fc13
MT
38259 if (sarb->len < pdu_length) {
38260 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
38261 __func__, pdu_length, sarb->len, vcc);
38262- atomic_inc(&vcc->stats->rx_err);
38263+ atomic_inc_unchecked(&vcc->stats->rx_err);
38264 goto out;
38265 }
38266
38267 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
38268 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
38269 __func__, vcc);
38270- atomic_inc(&vcc->stats->rx_err);
38271+ atomic_inc_unchecked(&vcc->stats->rx_err);
38272 goto out;
38273 }
38274
fe2de317 38275@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
58c5fc13
MT
38276 if (printk_ratelimit())
38277 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
38278 __func__, length);
38279- atomic_inc(&vcc->stats->rx_drop);
38280+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38281 goto out;
38282 }
38283
fe2de317 38284@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
58c5fc13
MT
38285
38286 vcc->push(vcc, skb);
38287
38288- atomic_inc(&vcc->stats->rx);
38289+ atomic_inc_unchecked(&vcc->stats->rx);
38290 out:
38291 skb_trim(sarb, 0);
38292 }
fe2de317 38293@@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned long data)
58c5fc13
MT
38294 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
38295
38296 usbatm_pop(vcc, skb);
38297- atomic_inc(&vcc->stats->tx);
38298+ atomic_inc_unchecked(&vcc->stats->tx);
38299
38300 skb = skb_dequeue(&instance->sndqueue);
38301 }
fe2de317 38302@@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
58c5fc13
MT
38303 if (!left--)
38304 return sprintf(page,
38305 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
38306- atomic_read(&atm_dev->stats.aal5.tx),
38307- atomic_read(&atm_dev->stats.aal5.tx_err),
38308- atomic_read(&atm_dev->stats.aal5.rx),
38309- atomic_read(&atm_dev->stats.aal5.rx_err),
38310- atomic_read(&atm_dev->stats.aal5.rx_drop));
38311+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
38312+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
38313+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
38314+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
38315+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
38316
38317 if (!left--) {
38318 if (instance->disconnected)
fe2de317
MT
38319diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
38320index 0149c09..f108812 100644
38321--- a/drivers/usb/core/devices.c
38322+++ b/drivers/usb/core/devices.c
15a11c5b 38323@@ -126,7 +126,7 @@ static const char format_endpt[] =
8308f9c9
MT
38324 * time it gets called.
38325 */
38326 static struct device_connect_event {
38327- atomic_t count;
38328+ atomic_unchecked_t count;
38329 wait_queue_head_t wait;
38330 } device_event = {
38331 .count = ATOMIC_INIT(1),
fe2de317 38332@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
8308f9c9
MT
38333
38334 void usbfs_conn_disc_event(void)
38335 {
38336- atomic_add(2, &device_event.count);
38337+ atomic_add_unchecked(2, &device_event.count);
38338 wake_up(&device_event.wait);
38339 }
38340
fe2de317 38341@@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file,
8308f9c9
MT
38342
38343 poll_wait(file, &device_event.wait, wait);
38344
38345- event_count = atomic_read(&device_event.count);
38346+ event_count = atomic_read_unchecked(&device_event.count);
38347 if (file->f_version != event_count) {
38348 file->f_version = event_count;
38349 return POLLIN | POLLRDNORM;
fe2de317
MT
38350diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
38351index 0b5ec23..0da3d76 100644
38352--- a/drivers/usb/core/message.c
38353+++ b/drivers/usb/core/message.c
38354@@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device *udev, int index)
ae4e228f 38355 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
58c5fc13
MT
38356 if (buf) {
38357 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
38358- if (len > 0) {
ae4e228f 38359- smallbuf = kmalloc(++len, GFP_NOIO);
58c5fc13 38360+ if (len++ > 0) {
ae4e228f 38361+ smallbuf = kmalloc(len, GFP_NOIO);
58c5fc13
MT
38362 if (!smallbuf)
38363 return buf;
38364 memcpy(smallbuf, buf, len);
fe2de317
MT
38365diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
38366index 1fc8f12..20647c1 100644
38367--- a/drivers/usb/early/ehci-dbgp.c
38368+++ b/drivers/usb/early/ehci-dbgp.c
38369@@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
6892158b
MT
38370
38371 #ifdef CONFIG_KGDB
15a11c5b
MT
38372 static struct kgdb_io kgdbdbgp_io_ops;
38373-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
38374+static struct kgdb_io kgdbdbgp_io_ops_console;
38375+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
6892158b 38376 #else
16454cff 38377 #define dbgp_kgdb_mode (0)
15a11c5b 38378 #endif
fe2de317 38379@@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
16454cff 38380 .write_char = kgdbdbgp_write_char,
66a7e928
MT
38381 };
38382
15a11c5b
MT
38383+static struct kgdb_io kgdbdbgp_io_ops_console = {
38384+ .name = "kgdbdbgp",
38385+ .read_char = kgdbdbgp_read_char,
38386+ .write_char = kgdbdbgp_write_char,
38387+ .is_console = 1
38388+};
38389+
38390 static int kgdbdbgp_wait_time;
66a7e928 38391
15a11c5b 38392 static int __init kgdbdbgp_parse_config(char *str)
fe2de317 38393@@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(char *str)
15a11c5b
MT
38394 ptr++;
38395 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
38396 }
38397- kgdb_register_io_module(&kgdbdbgp_io_ops);
38398- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
38399+ if (early_dbgp_console.index != -1)
38400+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
38401+ else
38402+ kgdb_register_io_module(&kgdbdbgp_io_ops);
66a7e928 38403
66a7e928
MT
38404 return 0;
38405 }
fe2de317
MT
38406diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
38407index d718033..6075579 100644
38408--- a/drivers/usb/host/xhci-mem.c
38409+++ b/drivers/usb/host/xhci-mem.c
38410@@ -1685,6 +1685,8 @@ static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
66a7e928
MT
38411 unsigned int num_tests;
38412 int i, ret;
38413
38414+ pax_track_stack();
38415+
38416 num_tests = ARRAY_SIZE(simple_test_vector);
38417 for (i = 0; i < num_tests; i++) {
38418 ret = xhci_test_trb_in_td(xhci,
fe2de317
MT
38419diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
38420index d6bea3e..60b250e 100644
38421--- a/drivers/usb/wusbcore/wa-hc.h
38422+++ b/drivers/usb/wusbcore/wa-hc.h
8308f9c9
MT
38423@@ -192,7 +192,7 @@ struct wahc {
38424 struct list_head xfer_delayed_list;
38425 spinlock_t xfer_list_lock;
38426 struct work_struct xfer_work;
38427- atomic_t xfer_id_count;
38428+ atomic_unchecked_t xfer_id_count;
38429 };
38430
38431
fe2de317 38432@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
8308f9c9
MT
38433 INIT_LIST_HEAD(&wa->xfer_delayed_list);
38434 spin_lock_init(&wa->xfer_list_lock);
38435 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
38436- atomic_set(&wa->xfer_id_count, 1);
38437+ atomic_set_unchecked(&wa->xfer_id_count, 1);
38438 }
38439
38440 /**
fe2de317
MT
38441diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
38442index 4193345..49ae93d 100644
38443--- a/drivers/usb/wusbcore/wa-xfer.c
38444+++ b/drivers/usb/wusbcore/wa-xfer.c
6e9df6a3 38445@@ -295,7 +295,7 @@ out:
8308f9c9
MT
38446 */
38447 static void wa_xfer_id_init(struct wa_xfer *xfer)
38448 {
38449- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
38450+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
38451 }
38452
38453 /*
fe2de317
MT
38454diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
38455index c14c42b..f955cc2 100644
38456--- a/drivers/vhost/vhost.c
38457+++ b/drivers/vhost/vhost.c
38458@@ -629,7 +629,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
6e9df6a3 38459 return 0;
57199397
MT
38460 }
38461
38462-static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
38463+static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
38464 {
38465 struct file *eventfp, *filep = NULL,
38466 *pollstart = NULL, *pollstop = NULL;
fe2de317
MT
38467diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
38468index b0b2ac3..89a4399 100644
38469--- a/drivers/video/aty/aty128fb.c
38470+++ b/drivers/video/aty/aty128fb.c
6e9df6a3
MT
38471@@ -148,7 +148,7 @@ enum {
38472 };
38473
38474 /* Must match above enum */
38475-static const char *r128_family[] __devinitdata = {
38476+static const char *r128_family[] __devinitconst = {
38477 "AGP",
38478 "PCI",
38479 "PRO AGP",
fe2de317
MT
38480diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
38481index 5c3960d..15cf8fc 100644
38482--- a/drivers/video/fbcmap.c
38483+++ b/drivers/video/fbcmap.c
38484@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
df50ba0c
MT
38485 rc = -ENODEV;
38486 goto out;
38487 }
38488- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
38489- !info->fbops->fb_setcmap)) {
38490+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
38491 rc = -EINVAL;
38492 goto out1;
38493 }
fe2de317
MT
38494diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
38495index ad93629..ca6a218 100644
38496--- a/drivers/video/fbmem.c
38497+++ b/drivers/video/fbmem.c
38498@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
58c5fc13
MT
38499 image->dx += image->width + 8;
38500 }
38501 } else if (rotate == FB_ROTATE_UD) {
38502- for (x = 0; x < num && image->dx >= 0; x++) {
38503+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
38504 info->fbops->fb_imageblit(info, image);
38505 image->dx -= image->width + 8;
38506 }
fe2de317 38507@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
58c5fc13
MT
38508 image->dy += image->height + 8;
38509 }
38510 } else if (rotate == FB_ROTATE_CCW) {
38511- for (x = 0; x < num && image->dy >= 0; x++) {
38512+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
38513 info->fbops->fb_imageblit(info, image);
38514 image->dy -= image->height + 8;
38515 }
fe2de317 38516@@ -939,6 +939,8 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
66a7e928
MT
38517 int flags = info->flags;
38518 int ret = 0;
38519
38520+ pax_track_stack();
38521+
38522 if (var->activate & FB_ACTIVATE_INV_MODE) {
38523 struct fb_videomode mode1, mode2;
38524
fe2de317 38525@@ -1064,6 +1066,8 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
66a7e928
MT
38526 void __user *argp = (void __user *)arg;
38527 long ret = 0;
38528
38529+ pax_track_stack();
38530+
38531 switch (cmd) {
38532 case FBIOGET_VSCREENINFO:
38533 if (!lock_fb_info(info))
fe2de317 38534@@ -1143,7 +1147,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
58c5fc13
MT
38535 return -EFAULT;
38536 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
38537 return -EINVAL;
38538- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
38539+ if (con2fb.framebuffer >= FB_MAX)
38540 return -EINVAL;
38541 if (!registered_fb[con2fb.framebuffer])
38542 request_module("fb%d", con2fb.framebuffer);
fe2de317
MT
38543diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
38544index 5a5d092..265c5ed 100644
38545--- a/drivers/video/geode/gx1fb_core.c
38546+++ b/drivers/video/geode/gx1fb_core.c
6e9df6a3
MT
38547@@ -29,7 +29,7 @@ static int crt_option = 1;
38548 static char panel_option[32] = "";
38549
38550 /* Modes relevant to the GX1 (taken from modedb.c) */
38551-static const struct fb_videomode __devinitdata gx1_modedb[] = {
38552+static const struct fb_videomode __devinitconst gx1_modedb[] = {
38553 /* 640x480-60 VESA */
38554 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
38555 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
fe2de317
MT
38556diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
38557index 896e53d..4d87d0b 100644
38558--- a/drivers/video/gxt4500.c
38559+++ b/drivers/video/gxt4500.c
6e9df6a3
MT
38560@@ -156,7 +156,7 @@ struct gxt4500_par {
38561 static char *mode_option;
38562
38563 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
38564-static const struct fb_videomode defaultmode __devinitdata = {
38565+static const struct fb_videomode defaultmode __devinitconst = {
38566 .refresh = 60,
38567 .xres = 1280,
38568 .yres = 1024,
fe2de317 38569@@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
6e9df6a3
MT
38570 return 0;
38571 }
38572
38573-static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
38574+static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
38575 .id = "IBM GXT4500P",
38576 .type = FB_TYPE_PACKED_PIXELS,
38577 .visual = FB_VISUAL_PSEUDOCOLOR,
fe2de317
MT
38578diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
38579index 7672d2e..b56437f 100644
38580--- a/drivers/video/i810/i810_accel.c
38581+++ b/drivers/video/i810/i810_accel.c
38582@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
58c5fc13
MT
38583 }
38584 }
38585 printk("ringbuffer lockup!!!\n");
38586+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
38587 i810_report_error(mmio);
38588 par->dev_flags |= LOCKUP;
38589 info->pixmap.scan_align = 1;
fe2de317
MT
38590diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
38591index 318f6fb..9a389c1 100644
38592--- a/drivers/video/i810/i810_main.c
38593+++ b/drivers/video/i810/i810_main.c
38594@@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
6e9df6a3
MT
38595 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
38596
38597 /* PCI */
38598-static const char *i810_pci_list[] __devinitdata = {
38599+static const char *i810_pci_list[] __devinitconst = {
38600 "Intel(R) 810 Framebuffer Device" ,
38601 "Intel(R) 810-DC100 Framebuffer Device" ,
38602 "Intel(R) 810E Framebuffer Device" ,
fe2de317
MT
38603diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
38604index de36693..3c63fc2 100644
38605--- a/drivers/video/jz4740_fb.c
38606+++ b/drivers/video/jz4740_fb.c
6e9df6a3
MT
38607@@ -136,7 +136,7 @@ struct jzfb {
38608 uint32_t pseudo_palette[16];
38609 };
38610
38611-static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
38612+static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
38613 .id = "JZ4740 FB",
38614 .type = FB_TYPE_PACKED_PIXELS,
38615 .visual = FB_VISUAL_TRUECOLOR,
fe2de317
MT
38616diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
38617index 3c14e43..eafa544 100644
38618--- a/drivers/video/logo/logo_linux_clut224.ppm
38619+++ b/drivers/video/logo/logo_linux_clut224.ppm
15a11c5b
MT
38620@@ -1,1604 +1,1123 @@
38621 P3
38622-# Standard 224-color Linux logo
38623 80 80
38624 255
38625- 0 0 0 0 0 0 0 0 0 0 0 0
38626- 0 0 0 0 0 0 0 0 0 0 0 0
38627- 0 0 0 0 0 0 0 0 0 0 0 0
38628- 0 0 0 0 0 0 0 0 0 0 0 0
38629- 0 0 0 0 0 0 0 0 0 0 0 0
38630- 0 0 0 0 0 0 0 0 0 0 0 0
38631- 0 0 0 0 0 0 0 0 0 0 0 0
38632- 0 0 0 0 0 0 0 0 0 0 0 0
38633- 0 0 0 0 0 0 0 0 0 0 0 0
38634- 6 6 6 6 6 6 10 10 10 10 10 10
38635- 10 10 10 6 6 6 6 6 6 6 6 6
38636- 0 0 0 0 0 0 0 0 0 0 0 0
38637- 0 0 0 0 0 0 0 0 0 0 0 0
38638- 0 0 0 0 0 0 0 0 0 0 0 0
38639- 0 0 0 0 0 0 0 0 0 0 0 0
38640- 0 0 0 0 0 0 0 0 0 0 0 0
38641- 0 0 0 0 0 0 0 0 0 0 0 0
38642- 0 0 0 0 0 0 0 0 0 0 0 0
38643- 0 0 0 0 0 0 0 0 0 0 0 0
38644- 0 0 0 0 0 0 0 0 0 0 0 0
38645- 0 0 0 0 0 0 0 0 0 0 0 0
38646- 0 0 0 0 0 0 0 0 0 0 0 0
38647- 0 0 0 0 0 0 0 0 0 0 0 0
38648- 0 0 0 0 0 0 0 0 0 0 0 0
38649- 0 0 0 0 0 0 0 0 0 0 0 0
38650- 0 0 0 0 0 0 0 0 0 0 0 0
38651- 0 0 0 0 0 0 0 0 0 0 0 0
38652- 0 0 0 0 0 0 0 0 0 0 0 0
38653- 0 0 0 6 6 6 10 10 10 14 14 14
38654- 22 22 22 26 26 26 30 30 30 34 34 34
38655- 30 30 30 30 30 30 26 26 26 18 18 18
38656- 14 14 14 10 10 10 6 6 6 0 0 0
38657- 0 0 0 0 0 0 0 0 0 0 0 0
38658- 0 0 0 0 0 0 0 0 0 0 0 0
38659- 0 0 0 0 0 0 0 0 0 0 0 0
38660- 0 0 0 0 0 0 0 0 0 0 0 0
38661- 0 0 0 0 0 0 0 0 0 0 0 0
38662- 0 0 0 0 0 0 0 0 0 0 0 0
38663- 0 0 0 0 0 0 0 0 0 0 0 0
38664- 0 0 0 0 0 0 0 0 0 0 0 0
38665- 0 0 0 0 0 0 0 0 0 0 0 0
38666- 0 0 0 0 0 1 0 0 1 0 0 0
38667- 0 0 0 0 0 0 0 0 0 0 0 0
38668- 0 0 0 0 0 0 0 0 0 0 0 0
38669- 0 0 0 0 0 0 0 0 0 0 0 0
38670- 0 0 0 0 0 0 0 0 0 0 0 0
38671- 0 0 0 0 0 0 0 0 0 0 0 0
38672- 0 0 0 0 0 0 0 0 0 0 0 0
38673- 6 6 6 14 14 14 26 26 26 42 42 42
38674- 54 54 54 66 66 66 78 78 78 78 78 78
38675- 78 78 78 74 74 74 66 66 66 54 54 54
38676- 42 42 42 26 26 26 18 18 18 10 10 10
38677- 6 6 6 0 0 0 0 0 0 0 0 0
38678- 0 0 0 0 0 0 0 0 0 0 0 0
38679- 0 0 0 0 0 0 0 0 0 0 0 0
38680- 0 0 0 0 0 0 0 0 0 0 0 0
38681- 0 0 0 0 0 0 0 0 0 0 0 0
38682- 0 0 0 0 0 0 0 0 0 0 0 0
38683- 0 0 0 0 0 0 0 0 0 0 0 0
38684- 0 0 0 0 0 0 0 0 0 0 0 0
38685- 0 0 0 0 0 0 0 0 0 0 0 0
38686- 0 0 1 0 0 0 0 0 0 0 0 0
38687- 0 0 0 0 0 0 0 0 0 0 0 0
38688- 0 0 0 0 0 0 0 0 0 0 0 0
38689- 0 0 0 0 0 0 0 0 0 0 0 0
38690- 0 0 0 0 0 0 0 0 0 0 0 0
38691- 0 0 0 0 0 0 0 0 0 0 0 0
38692- 0 0 0 0 0 0 0 0 0 10 10 10
38693- 22 22 22 42 42 42 66 66 66 86 86 86
38694- 66 66 66 38 38 38 38 38 38 22 22 22
38695- 26 26 26 34 34 34 54 54 54 66 66 66
38696- 86 86 86 70 70 70 46 46 46 26 26 26
38697- 14 14 14 6 6 6 0 0 0 0 0 0
38698- 0 0 0 0 0 0 0 0 0 0 0 0
38699- 0 0 0 0 0 0 0 0 0 0 0 0
38700- 0 0 0 0 0 0 0 0 0 0 0 0
38701- 0 0 0 0 0 0 0 0 0 0 0 0
38702- 0 0 0 0 0 0 0 0 0 0 0 0
38703- 0 0 0 0 0 0 0 0 0 0 0 0
38704- 0 0 0 0 0 0 0 0 0 0 0 0
38705- 0 0 0 0 0 0 0 0 0 0 0 0
38706- 0 0 1 0 0 1 0 0 1 0 0 0
38707- 0 0 0 0 0 0 0 0 0 0 0 0
38708- 0 0 0 0 0 0 0 0 0 0 0 0
38709- 0 0 0 0 0 0 0 0 0 0 0 0
38710- 0 0 0 0 0 0 0 0 0 0 0 0
38711- 0 0 0 0 0 0 0 0 0 0 0 0
38712- 0 0 0 0 0 0 10 10 10 26 26 26
38713- 50 50 50 82 82 82 58 58 58 6 6 6
38714- 2 2 6 2 2 6 2 2 6 2 2 6
38715- 2 2 6 2 2 6 2 2 6 2 2 6
38716- 6 6 6 54 54 54 86 86 86 66 66 66
38717- 38 38 38 18 18 18 6 6 6 0 0 0
38718- 0 0 0 0 0 0 0 0 0 0 0 0
38719- 0 0 0 0 0 0 0 0 0 0 0 0
38720- 0 0 0 0 0 0 0 0 0 0 0 0
38721- 0 0 0 0 0 0 0 0 0 0 0 0
38722- 0 0 0 0 0 0 0 0 0 0 0 0
38723- 0 0 0 0 0 0 0 0 0 0 0 0
38724- 0 0 0 0 0 0 0 0 0 0 0 0
38725- 0 0 0 0 0 0 0 0 0 0 0 0
38726- 0 0 0 0 0 0 0 0 0 0 0 0
38727- 0 0 0 0 0 0 0 0 0 0 0 0
38728- 0 0 0 0 0 0 0 0 0 0 0 0
38729- 0 0 0 0 0 0 0 0 0 0 0 0
38730- 0 0 0 0 0 0 0 0 0 0 0 0
38731- 0 0 0 0 0 0 0 0 0 0 0 0
38732- 0 0 0 6 6 6 22 22 22 50 50 50
38733- 78 78 78 34 34 34 2 2 6 2 2 6
38734- 2 2 6 2 2 6 2 2 6 2 2 6
38735- 2 2 6 2 2 6 2 2 6 2 2 6
38736- 2 2 6 2 2 6 6 6 6 70 70 70
38737- 78 78 78 46 46 46 22 22 22 6 6 6
38738- 0 0 0 0 0 0 0 0 0 0 0 0
38739- 0 0 0 0 0 0 0 0 0 0 0 0
38740- 0 0 0 0 0 0 0 0 0 0 0 0
38741- 0 0 0 0 0 0 0 0 0 0 0 0
38742- 0 0 0 0 0 0 0 0 0 0 0 0
38743- 0 0 0 0 0 0 0 0 0 0 0 0
38744- 0 0 0 0 0 0 0 0 0 0 0 0
38745- 0 0 0 0 0 0 0 0 0 0 0 0
38746- 0 0 1 0 0 1 0 0 1 0 0 0
38747- 0 0 0 0 0 0 0 0 0 0 0 0
38748- 0 0 0 0 0 0 0 0 0 0 0 0
38749- 0 0 0 0 0 0 0 0 0 0 0 0
38750- 0 0 0 0 0 0 0 0 0 0 0 0
38751- 0 0 0 0 0 0 0 0 0 0 0 0
38752- 6 6 6 18 18 18 42 42 42 82 82 82
38753- 26 26 26 2 2 6 2 2 6 2 2 6
38754- 2 2 6 2 2 6 2 2 6 2 2 6
38755- 2 2 6 2 2 6 2 2 6 14 14 14
38756- 46 46 46 34 34 34 6 6 6 2 2 6
38757- 42 42 42 78 78 78 42 42 42 18 18 18
38758- 6 6 6 0 0 0 0 0 0 0 0 0
38759- 0 0 0 0 0 0 0 0 0 0 0 0
38760- 0 0 0 0 0 0 0 0 0 0 0 0
38761- 0 0 0 0 0 0 0 0 0 0 0 0
38762- 0 0 0 0 0 0 0 0 0 0 0 0
38763- 0 0 0 0 0 0 0 0 0 0 0 0
38764- 0 0 0 0 0 0 0 0 0 0 0 0
38765- 0 0 0 0 0 0 0 0 0 0 0 0
38766- 0 0 1 0 0 0 0 0 1 0 0 0
38767- 0 0 0 0 0 0 0 0 0 0 0 0
38768- 0 0 0 0 0 0 0 0 0 0 0 0
38769- 0 0 0 0 0 0 0 0 0 0 0 0
38770- 0 0 0 0 0 0 0 0 0 0 0 0
38771- 0 0 0 0 0 0 0 0 0 0 0 0
38772- 10 10 10 30 30 30 66 66 66 58 58 58
38773- 2 2 6 2 2 6 2 2 6 2 2 6
38774- 2 2 6 2 2 6 2 2 6 2 2 6
38775- 2 2 6 2 2 6 2 2 6 26 26 26
38776- 86 86 86 101 101 101 46 46 46 10 10 10
38777- 2 2 6 58 58 58 70 70 70 34 34 34
38778- 10 10 10 0 0 0 0 0 0 0 0 0
38779- 0 0 0 0 0 0 0 0 0 0 0 0
38780- 0 0 0 0 0 0 0 0 0 0 0 0
38781- 0 0 0 0 0 0 0 0 0 0 0 0
38782- 0 0 0 0 0 0 0 0 0 0 0 0
38783- 0 0 0 0 0 0 0 0 0 0 0 0
38784- 0 0 0 0 0 0 0 0 0 0 0 0
38785- 0 0 0 0 0 0 0 0 0 0 0 0
38786- 0 0 1 0 0 1 0 0 1 0 0 0
38787- 0 0 0 0 0 0 0 0 0 0 0 0
38788- 0 0 0 0 0 0 0 0 0 0 0 0
38789- 0 0 0 0 0 0 0 0 0 0 0 0
38790- 0 0 0 0 0 0 0 0 0 0 0 0
38791- 0 0 0 0 0 0 0 0 0 0 0 0
38792- 14 14 14 42 42 42 86 86 86 10 10 10
38793- 2 2 6 2 2 6 2 2 6 2 2 6
38794- 2 2 6 2 2 6 2 2 6 2 2 6
38795- 2 2 6 2 2 6 2 2 6 30 30 30
38796- 94 94 94 94 94 94 58 58 58 26 26 26
38797- 2 2 6 6 6 6 78 78 78 54 54 54
38798- 22 22 22 6 6 6 0 0 0 0 0 0
38799- 0 0 0 0 0 0 0 0 0 0 0 0
38800- 0 0 0 0 0 0 0 0 0 0 0 0
38801- 0 0 0 0 0 0 0 0 0 0 0 0
38802- 0 0 0 0 0 0 0 0 0 0 0 0
38803- 0 0 0 0 0 0 0 0 0 0 0 0
38804- 0 0 0 0 0 0 0 0 0 0 0 0
38805- 0 0 0 0 0 0 0 0 0 0 0 0
38806- 0 0 0 0 0 0 0 0 0 0 0 0
38807- 0 0 0 0 0 0 0 0 0 0 0 0
38808- 0 0 0 0 0 0 0 0 0 0 0 0
38809- 0 0 0 0 0 0 0 0 0 0 0 0
38810- 0 0 0 0 0 0 0 0 0 0 0 0
38811- 0 0 0 0 0 0 0 0 0 6 6 6
38812- 22 22 22 62 62 62 62 62 62 2 2 6
38813- 2 2 6 2 2 6 2 2 6 2 2 6
38814- 2 2 6 2 2 6 2 2 6 2 2 6
38815- 2 2 6 2 2 6 2 2 6 26 26 26
38816- 54 54 54 38 38 38 18 18 18 10 10 10
38817- 2 2 6 2 2 6 34 34 34 82 82 82
38818- 38 38 38 14 14 14 0 0 0 0 0 0
38819- 0 0 0 0 0 0 0 0 0 0 0 0
38820- 0 0 0 0 0 0 0 0 0 0 0 0
38821- 0 0 0 0 0 0 0 0 0 0 0 0
38822- 0 0 0 0 0 0 0 0 0 0 0 0
38823- 0 0 0 0 0 0 0 0 0 0 0 0
38824- 0 0 0 0 0 0 0 0 0 0 0 0
38825- 0 0 0 0 0 0 0 0 0 0 0 0
38826- 0 0 0 0 0 1 0 0 1 0 0 0
38827- 0 0 0 0 0 0 0 0 0 0 0 0
38828- 0 0 0 0 0 0 0 0 0 0 0 0
38829- 0 0 0 0 0 0 0 0 0 0 0 0
38830- 0 0 0 0 0 0 0 0 0 0 0 0
38831- 0 0 0 0 0 0 0 0 0 6 6 6
38832- 30 30 30 78 78 78 30 30 30 2 2 6
38833- 2 2 6 2 2 6 2 2 6 2 2 6
38834- 2 2 6 2 2 6 2 2 6 2 2 6
38835- 2 2 6 2 2 6 2 2 6 10 10 10
38836- 10 10 10 2 2 6 2 2 6 2 2 6
38837- 2 2 6 2 2 6 2 2 6 78 78 78
38838- 50 50 50 18 18 18 6 6 6 0 0 0
38839- 0 0 0 0 0 0 0 0 0 0 0 0
38840- 0 0 0 0 0 0 0 0 0 0 0 0
38841- 0 0 0 0 0 0 0 0 0 0 0 0
38842- 0 0 0 0 0 0 0 0 0 0 0 0
38843- 0 0 0 0 0 0 0 0 0 0 0 0
38844- 0 0 0 0 0 0 0 0 0 0 0 0
38845- 0 0 0 0 0 0 0 0 0 0 0 0
38846- 0 0 1 0 0 0 0 0 0 0 0 0
38847- 0 0 0 0 0 0 0 0 0 0 0 0
38848- 0 0 0 0 0 0 0 0 0 0 0 0
38849- 0 0 0 0 0 0 0 0 0 0 0 0
38850- 0 0 0 0 0 0 0 0 0 0 0 0
38851- 0 0 0 0 0 0 0 0 0 10 10 10
38852- 38 38 38 86 86 86 14 14 14 2 2 6
38853- 2 2 6 2 2 6 2 2 6 2 2 6
38854- 2 2 6 2 2 6 2 2 6 2 2 6
38855- 2 2 6 2 2 6 2 2 6 2 2 6
38856- 2 2 6 2 2 6 2 2 6 2 2 6
38857- 2 2 6 2 2 6 2 2 6 54 54 54
38858- 66 66 66 26 26 26 6 6 6 0 0 0
38859- 0 0 0 0 0 0 0 0 0 0 0 0
38860- 0 0 0 0 0 0 0 0 0 0 0 0
38861- 0 0 0 0 0 0 0 0 0 0 0 0
38862- 0 0 0 0 0 0 0 0 0 0 0 0
38863- 0 0 0 0 0 0 0 0 0 0 0 0
38864- 0 0 0 0 0 0 0 0 0 0 0 0
38865- 0 0 0 0 0 0 0 0 0 0 0 0
38866- 0 0 0 0 0 1 0 0 1 0 0 0
38867- 0 0 0 0 0 0 0 0 0 0 0 0
38868- 0 0 0 0 0 0 0 0 0 0 0 0
38869- 0 0 0 0 0 0 0 0 0 0 0 0
38870- 0 0 0 0 0 0 0 0 0 0 0 0
38871- 0 0 0 0 0 0 0 0 0 14 14 14
38872- 42 42 42 82 82 82 2 2 6 2 2 6
38873- 2 2 6 6 6 6 10 10 10 2 2 6
38874- 2 2 6 2 2 6 2 2 6 2 2 6
38875- 2 2 6 2 2 6 2 2 6 6 6 6
38876- 14 14 14 10 10 10 2 2 6 2 2 6
38877- 2 2 6 2 2 6 2 2 6 18 18 18
38878- 82 82 82 34 34 34 10 10 10 0 0 0
38879- 0 0 0 0 0 0 0 0 0 0 0 0
38880- 0 0 0 0 0 0 0 0 0 0 0 0
38881- 0 0 0 0 0 0 0 0 0 0 0 0
38882- 0 0 0 0 0 0 0 0 0 0 0 0
38883- 0 0 0 0 0 0 0 0 0 0 0 0
38884- 0 0 0 0 0 0 0 0 0 0 0 0
38885- 0 0 0 0 0 0 0 0 0 0 0 0
38886- 0 0 1 0 0 0 0 0 0 0 0 0
38887- 0 0 0 0 0 0 0 0 0 0 0 0
38888- 0 0 0 0 0 0 0 0 0 0 0 0
38889- 0 0 0 0 0 0 0 0 0 0 0 0
38890- 0 0 0 0 0 0 0 0 0 0 0 0
38891- 0 0 0 0 0 0 0 0 0 14 14 14
38892- 46 46 46 86 86 86 2 2 6 2 2 6
38893- 6 6 6 6 6 6 22 22 22 34 34 34
38894- 6 6 6 2 2 6 2 2 6 2 2 6
38895- 2 2 6 2 2 6 18 18 18 34 34 34
38896- 10 10 10 50 50 50 22 22 22 2 2 6
38897- 2 2 6 2 2 6 2 2 6 10 10 10
38898- 86 86 86 42 42 42 14 14 14 0 0 0
38899- 0 0 0 0 0 0 0 0 0 0 0 0
38900- 0 0 0 0 0 0 0 0 0 0 0 0
38901- 0 0 0 0 0 0 0 0 0 0 0 0
38902- 0 0 0 0 0 0 0 0 0 0 0 0
38903- 0 0 0 0 0 0 0 0 0 0 0 0
38904- 0 0 0 0 0 0 0 0 0 0 0 0
38905- 0 0 0 0 0 0 0 0 0 0 0 0
38906- 0 0 1 0 0 1 0 0 1 0 0 0
38907- 0 0 0 0 0 0 0 0 0 0 0 0
38908- 0 0 0 0 0 0 0 0 0 0 0 0
38909- 0 0 0 0 0 0 0 0 0 0 0 0
38910- 0 0 0 0 0 0 0 0 0 0 0 0
38911- 0 0 0 0 0 0 0 0 0 14 14 14
38912- 46 46 46 86 86 86 2 2 6 2 2 6
38913- 38 38 38 116 116 116 94 94 94 22 22 22
38914- 22 22 22 2 2 6 2 2 6 2 2 6
38915- 14 14 14 86 86 86 138 138 138 162 162 162
38916-154 154 154 38 38 38 26 26 26 6 6 6
38917- 2 2 6 2 2 6 2 2 6 2 2 6
38918- 86 86 86 46 46 46 14 14 14 0 0 0
38919- 0 0 0 0 0 0 0 0 0 0 0 0
38920- 0 0 0 0 0 0 0 0 0 0 0 0
38921- 0 0 0 0 0 0 0 0 0 0 0 0
38922- 0 0 0 0 0 0 0 0 0 0 0 0
38923- 0 0 0 0 0 0 0 0 0 0 0 0
38924- 0 0 0 0 0 0 0 0 0 0 0 0
38925- 0 0 0 0 0 0 0 0 0 0 0 0
38926- 0 0 0 0 0 0 0 0 0 0 0 0
38927- 0 0 0 0 0 0 0 0 0 0 0 0
38928- 0 0 0 0 0 0 0 0 0 0 0 0
38929- 0 0 0 0 0 0 0 0 0 0 0 0
38930- 0 0 0 0 0 0 0 0 0 0 0 0
38931- 0 0 0 0 0 0 0 0 0 14 14 14
38932- 46 46 46 86 86 86 2 2 6 14 14 14
38933-134 134 134 198 198 198 195 195 195 116 116 116
38934- 10 10 10 2 2 6 2 2 6 6 6 6
38935-101 98 89 187 187 187 210 210 210 218 218 218
38936-214 214 214 134 134 134 14 14 14 6 6 6
38937- 2 2 6 2 2 6 2 2 6 2 2 6
38938- 86 86 86 50 50 50 18 18 18 6 6 6
38939- 0 0 0 0 0 0 0 0 0 0 0 0
38940- 0 0 0 0 0 0 0 0 0 0 0 0
38941- 0 0 0 0 0 0 0 0 0 0 0 0
38942- 0 0 0 0 0 0 0 0 0 0 0 0
38943- 0 0 0 0 0 0 0 0 0 0 0 0
38944- 0 0 0 0 0 0 0 0 0 0 0 0
38945- 0 0 0 0 0 0 0 0 1 0 0 0
38946- 0 0 1 0 0 1 0 0 1 0 0 0
38947- 0 0 0 0 0 0 0 0 0 0 0 0
38948- 0 0 0 0 0 0 0 0 0 0 0 0
38949- 0 0 0 0 0 0 0 0 0 0 0 0
38950- 0 0 0 0 0 0 0 0 0 0 0 0
38951- 0 0 0 0 0 0 0 0 0 14 14 14
38952- 46 46 46 86 86 86 2 2 6 54 54 54
38953-218 218 218 195 195 195 226 226 226 246 246 246
38954- 58 58 58 2 2 6 2 2 6 30 30 30
38955-210 210 210 253 253 253 174 174 174 123 123 123
38956-221 221 221 234 234 234 74 74 74 2 2 6
38957- 2 2 6 2 2 6 2 2 6 2 2 6
38958- 70 70 70 58 58 58 22 22 22 6 6 6
38959- 0 0 0 0 0 0 0 0 0 0 0 0
38960- 0 0 0 0 0 0 0 0 0 0 0 0
38961- 0 0 0 0 0 0 0 0 0 0 0 0
38962- 0 0 0 0 0 0 0 0 0 0 0 0
38963- 0 0 0 0 0 0 0 0 0 0 0 0
38964- 0 0 0 0 0 0 0 0 0 0 0 0
38965- 0 0 0 0 0 0 0 0 0 0 0 0
38966- 0 0 0 0 0 0 0 0 0 0 0 0
38967- 0 0 0 0 0 0 0 0 0 0 0 0
38968- 0 0 0 0 0 0 0 0 0 0 0 0
38969- 0 0 0 0 0 0 0 0 0 0 0 0
38970- 0 0 0 0 0 0 0 0 0 0 0 0
38971- 0 0 0 0 0 0 0 0 0 14 14 14
38972- 46 46 46 82 82 82 2 2 6 106 106 106
38973-170 170 170 26 26 26 86 86 86 226 226 226
38974-123 123 123 10 10 10 14 14 14 46 46 46
38975-231 231 231 190 190 190 6 6 6 70 70 70
38976- 90 90 90 238 238 238 158 158 158 2 2 6
38977- 2 2 6 2 2 6 2 2 6 2 2 6
38978- 70 70 70 58 58 58 22 22 22 6 6 6
38979- 0 0 0 0 0 0 0 0 0 0 0 0
38980- 0 0 0 0 0 0 0 0 0 0 0 0
38981- 0 0 0 0 0 0 0 0 0 0 0 0
38982- 0 0 0 0 0 0 0 0 0 0 0 0
38983- 0 0 0 0 0 0 0 0 0 0 0 0
38984- 0 0 0 0 0 0 0 0 0 0 0 0
38985- 0 0 0 0 0 0 0 0 1 0 0 0
38986- 0 0 1 0 0 1 0 0 1 0 0 0
38987- 0 0 0 0 0 0 0 0 0 0 0 0
38988- 0 0 0 0 0 0 0 0 0 0 0 0
38989- 0 0 0 0 0 0 0 0 0 0 0 0
38990- 0 0 0 0 0 0 0 0 0 0 0 0
38991- 0 0 0 0 0 0 0 0 0 14 14 14
38992- 42 42 42 86 86 86 6 6 6 116 116 116
38993-106 106 106 6 6 6 70 70 70 149 149 149
38994-128 128 128 18 18 18 38 38 38 54 54 54
38995-221 221 221 106 106 106 2 2 6 14 14 14
38996- 46 46 46 190 190 190 198 198 198 2 2 6
38997- 2 2 6 2 2 6 2 2 6 2 2 6
38998- 74 74 74 62 62 62 22 22 22 6 6 6
38999- 0 0 0 0 0 0 0 0 0 0 0 0
39000- 0 0 0 0 0 0 0 0 0 0 0 0
39001- 0 0 0 0 0 0 0 0 0 0 0 0
39002- 0 0 0 0 0 0 0 0 0 0 0 0
39003- 0 0 0 0 0 0 0 0 0 0 0 0
39004- 0 0 0 0 0 0 0 0 0 0 0 0
39005- 0 0 0 0 0 0 0 0 1 0 0 0
39006- 0 0 1 0 0 0 0 0 1 0 0 0
39007- 0 0 0 0 0 0 0 0 0 0 0 0
39008- 0 0 0 0 0 0 0 0 0 0 0 0
39009- 0 0 0 0 0 0 0 0 0 0 0 0
39010- 0 0 0 0 0 0 0 0 0 0 0 0
39011- 0 0 0 0 0 0 0 0 0 14 14 14
39012- 42 42 42 94 94 94 14 14 14 101 101 101
39013-128 128 128 2 2 6 18 18 18 116 116 116
39014-118 98 46 121 92 8 121 92 8 98 78 10
39015-162 162 162 106 106 106 2 2 6 2 2 6
39016- 2 2 6 195 195 195 195 195 195 6 6 6
39017- 2 2 6 2 2 6 2 2 6 2 2 6
39018- 74 74 74 62 62 62 22 22 22 6 6 6
39019- 0 0 0 0 0 0 0 0 0 0 0 0
39020- 0 0 0 0 0 0 0 0 0 0 0 0
39021- 0 0 0 0 0 0 0 0 0 0 0 0
39022- 0 0 0 0 0 0 0 0 0 0 0 0
39023- 0 0 0 0 0 0 0 0 0 0 0 0
39024- 0 0 0 0 0 0 0 0 0 0 0 0
39025- 0 0 0 0 0 0 0 0 1 0 0 1
39026- 0 0 1 0 0 0 0 0 1 0 0 0
39027- 0 0 0 0 0 0 0 0 0 0 0 0
39028- 0 0 0 0 0 0 0 0 0 0 0 0
39029- 0 0 0 0 0 0 0 0 0 0 0 0
39030- 0 0 0 0 0 0 0 0 0 0 0 0
39031- 0 0 0 0 0 0 0 0 0 10 10 10
39032- 38 38 38 90 90 90 14 14 14 58 58 58
39033-210 210 210 26 26 26 54 38 6 154 114 10
39034-226 170 11 236 186 11 225 175 15 184 144 12
39035-215 174 15 175 146 61 37 26 9 2 2 6
39036- 70 70 70 246 246 246 138 138 138 2 2 6
39037- 2 2 6 2 2 6 2 2 6 2 2 6
39038- 70 70 70 66 66 66 26 26 26 6 6 6
39039- 0 0 0 0 0 0 0 0 0 0 0 0
39040- 0 0 0 0 0 0 0 0 0 0 0 0
39041- 0 0 0 0 0 0 0 0 0 0 0 0
39042- 0 0 0 0 0 0 0 0 0 0 0 0
39043- 0 0 0 0 0 0 0 0 0 0 0 0
39044- 0 0 0 0 0 0 0 0 0 0 0 0
39045- 0 0 0 0 0 0 0 0 0 0 0 0
39046- 0 0 0 0 0 0 0 0 0 0 0 0
39047- 0 0 0 0 0 0 0 0 0 0 0 0
39048- 0 0 0 0 0 0 0 0 0 0 0 0
39049- 0 0 0 0 0 0 0 0 0 0 0 0
39050- 0 0 0 0 0 0 0 0 0 0 0 0
39051- 0 0 0 0 0 0 0 0 0 10 10 10
39052- 38 38 38 86 86 86 14 14 14 10 10 10
39053-195 195 195 188 164 115 192 133 9 225 175 15
39054-239 182 13 234 190 10 232 195 16 232 200 30
39055-245 207 45 241 208 19 232 195 16 184 144 12
39056-218 194 134 211 206 186 42 42 42 2 2 6
39057- 2 2 6 2 2 6 2 2 6 2 2 6
39058- 50 50 50 74 74 74 30 30 30 6 6 6
39059- 0 0 0 0 0 0 0 0 0 0 0 0
39060- 0 0 0 0 0 0 0 0 0 0 0 0
39061- 0 0 0 0 0 0 0 0 0 0 0 0
39062- 0 0 0 0 0 0 0 0 0 0 0 0
39063- 0 0 0 0 0 0 0 0 0 0 0 0
39064- 0 0 0 0 0 0 0 0 0 0 0 0
39065- 0 0 0 0 0 0 0 0 0 0 0 0
39066- 0 0 0 0 0 0 0 0 0 0 0 0
39067- 0 0 0 0 0 0 0 0 0 0 0 0
39068- 0 0 0 0 0 0 0 0 0 0 0 0
39069- 0 0 0 0 0 0 0 0 0 0 0 0
39070- 0 0 0 0 0 0 0 0 0 0 0 0
39071- 0 0 0 0 0 0 0 0 0 10 10 10
39072- 34 34 34 86 86 86 14 14 14 2 2 6
39073-121 87 25 192 133 9 219 162 10 239 182 13
39074-236 186 11 232 195 16 241 208 19 244 214 54
39075-246 218 60 246 218 38 246 215 20 241 208 19
39076-241 208 19 226 184 13 121 87 25 2 2 6
39077- 2 2 6 2 2 6 2 2 6 2 2 6
39078- 50 50 50 82 82 82 34 34 34 10 10 10
39079- 0 0 0 0 0 0 0 0 0 0 0 0
39080- 0 0 0 0 0 0 0 0 0 0 0 0
39081- 0 0 0 0 0 0 0 0 0 0 0 0
39082- 0 0 0 0 0 0 0 0 0 0 0 0
39083- 0 0 0 0 0 0 0 0 0 0 0 0
39084- 0 0 0 0 0 0 0 0 0 0 0 0
39085- 0 0 0 0 0 0 0 0 0 0 0 0
39086- 0 0 0 0 0 0 0 0 0 0 0 0
39087- 0 0 0 0 0 0 0 0 0 0 0 0
39088- 0 0 0 0 0 0 0 0 0 0 0 0
39089- 0 0 0 0 0 0 0 0 0 0 0 0
39090- 0 0 0 0 0 0 0 0 0 0 0 0
39091- 0 0 0 0 0 0 0 0 0 10 10 10
39092- 34 34 34 82 82 82 30 30 30 61 42 6
39093-180 123 7 206 145 10 230 174 11 239 182 13
39094-234 190 10 238 202 15 241 208 19 246 218 74
39095-246 218 38 246 215 20 246 215 20 246 215 20
39096-226 184 13 215 174 15 184 144 12 6 6 6
39097- 2 2 6 2 2 6 2 2 6 2 2 6
39098- 26 26 26 94 94 94 42 42 42 14 14 14
39099- 0 0 0 0 0 0 0 0 0 0 0 0
39100- 0 0 0 0 0 0 0 0 0 0 0 0
39101- 0 0 0 0 0 0 0 0 0 0 0 0
39102- 0 0 0 0 0 0 0 0 0 0 0 0
39103- 0 0 0 0 0 0 0 0 0 0 0 0
39104- 0 0 0 0 0 0 0 0 0 0 0 0
39105- 0 0 0 0 0 0 0 0 0 0 0 0
39106- 0 0 0 0 0 0 0 0 0 0 0 0
39107- 0 0 0 0 0 0 0 0 0 0 0 0
39108- 0 0 0 0 0 0 0 0 0 0 0 0
39109- 0 0 0 0 0 0 0 0 0 0 0 0
39110- 0 0 0 0 0 0 0 0 0 0 0 0
39111- 0 0 0 0 0 0 0 0 0 10 10 10
39112- 30 30 30 78 78 78 50 50 50 104 69 6
39113-192 133 9 216 158 10 236 178 12 236 186 11
39114-232 195 16 241 208 19 244 214 54 245 215 43
39115-246 215 20 246 215 20 241 208 19 198 155 10
39116-200 144 11 216 158 10 156 118 10 2 2 6
39117- 2 2 6 2 2 6 2 2 6 2 2 6
39118- 6 6 6 90 90 90 54 54 54 18 18 18
39119- 6 6 6 0 0 0 0 0 0 0 0 0
39120- 0 0 0 0 0 0 0 0 0 0 0 0
39121- 0 0 0 0 0 0 0 0 0 0 0 0
39122- 0 0 0 0 0 0 0 0 0 0 0 0
39123- 0 0 0 0 0 0 0 0 0 0 0 0
39124- 0 0 0 0 0 0 0 0 0 0 0 0
39125- 0 0 0 0 0 0 0 0 0 0 0 0
39126- 0 0 0 0 0 0 0 0 0 0 0 0
39127- 0 0 0 0 0 0 0 0 0 0 0 0
39128- 0 0 0 0 0 0 0 0 0 0 0 0
39129- 0 0 0 0 0 0 0 0 0 0 0 0
39130- 0 0 0 0 0 0 0 0 0 0 0 0
39131- 0 0 0 0 0 0 0 0 0 10 10 10
39132- 30 30 30 78 78 78 46 46 46 22 22 22
39133-137 92 6 210 162 10 239 182 13 238 190 10
39134-238 202 15 241 208 19 246 215 20 246 215 20
39135-241 208 19 203 166 17 185 133 11 210 150 10
39136-216 158 10 210 150 10 102 78 10 2 2 6
39137- 6 6 6 54 54 54 14 14 14 2 2 6
39138- 2 2 6 62 62 62 74 74 74 30 30 30
39139- 10 10 10 0 0 0 0 0 0 0 0 0
39140- 0 0 0 0 0 0 0 0 0 0 0 0
39141- 0 0 0 0 0 0 0 0 0 0 0 0
39142- 0 0 0 0 0 0 0 0 0 0 0 0
39143- 0 0 0 0 0 0 0 0 0 0 0 0
39144- 0 0 0 0 0 0 0 0 0 0 0 0
39145- 0 0 0 0 0 0 0 0 0 0 0 0
39146- 0 0 0 0 0 0 0 0 0 0 0 0
39147- 0 0 0 0 0 0 0 0 0 0 0 0
39148- 0 0 0 0 0 0 0 0 0 0 0 0
39149- 0 0 0 0 0 0 0 0 0 0 0 0
39150- 0 0 0 0 0 0 0 0 0 0 0 0
39151- 0 0 0 0 0 0 0 0 0 10 10 10
39152- 34 34 34 78 78 78 50 50 50 6 6 6
39153- 94 70 30 139 102 15 190 146 13 226 184 13
39154-232 200 30 232 195 16 215 174 15 190 146 13
39155-168 122 10 192 133 9 210 150 10 213 154 11
39156-202 150 34 182 157 106 101 98 89 2 2 6
39157- 2 2 6 78 78 78 116 116 116 58 58 58
39158- 2 2 6 22 22 22 90 90 90 46 46 46
39159- 18 18 18 6 6 6 0 0 0 0 0 0
39160- 0 0 0 0 0 0 0 0 0 0 0 0
39161- 0 0 0 0 0 0 0 0 0 0 0 0
39162- 0 0 0 0 0 0 0 0 0 0 0 0
39163- 0 0 0 0 0 0 0 0 0 0 0 0
39164- 0 0 0 0 0 0 0 0 0 0 0 0
39165- 0 0 0 0 0 0 0 0 0 0 0 0
39166- 0 0 0 0 0 0 0 0 0 0 0 0
39167- 0 0 0 0 0 0 0 0 0 0 0 0
39168- 0 0 0 0 0 0 0 0 0 0 0 0
39169- 0 0 0 0 0 0 0 0 0 0 0 0
39170- 0 0 0 0 0 0 0 0 0 0 0 0
39171- 0 0 0 0 0 0 0 0 0 10 10 10
39172- 38 38 38 86 86 86 50 50 50 6 6 6
39173-128 128 128 174 154 114 156 107 11 168 122 10
39174-198 155 10 184 144 12 197 138 11 200 144 11
39175-206 145 10 206 145 10 197 138 11 188 164 115
39176-195 195 195 198 198 198 174 174 174 14 14 14
39177- 2 2 6 22 22 22 116 116 116 116 116 116
39178- 22 22 22 2 2 6 74 74 74 70 70 70
39179- 30 30 30 10 10 10 0 0 0 0 0 0
39180- 0 0 0 0 0 0 0 0 0 0 0 0
39181- 0 0 0 0 0 0 0 0 0 0 0 0
39182- 0 0 0 0 0 0 0 0 0 0 0 0
39183- 0 0 0 0 0 0 0 0 0 0 0 0
39184- 0 0 0 0 0 0 0 0 0 0 0 0
39185- 0 0 0 0 0 0 0 0 0 0 0 0
39186- 0 0 0 0 0 0 0 0 0 0 0 0
39187- 0 0 0 0 0 0 0 0 0 0 0 0
39188- 0 0 0 0 0 0 0 0 0 0 0 0
39189- 0 0 0 0 0 0 0 0 0 0 0 0
39190- 0 0 0 0 0 0 0 0 0 0 0 0
39191- 0 0 0 0 0 0 6 6 6 18 18 18
39192- 50 50 50 101 101 101 26 26 26 10 10 10
39193-138 138 138 190 190 190 174 154 114 156 107 11
39194-197 138 11 200 144 11 197 138 11 192 133 9
39195-180 123 7 190 142 34 190 178 144 187 187 187
39196-202 202 202 221 221 221 214 214 214 66 66 66
39197- 2 2 6 2 2 6 50 50 50 62 62 62
39198- 6 6 6 2 2 6 10 10 10 90 90 90
39199- 50 50 50 18 18 18 6 6 6 0 0 0
39200- 0 0 0 0 0 0 0 0 0 0 0 0
39201- 0 0 0 0 0 0 0 0 0 0 0 0
39202- 0 0 0 0 0 0 0 0 0 0 0 0
39203- 0 0 0 0 0 0 0 0 0 0 0 0
39204- 0 0 0 0 0 0 0 0 0 0 0 0
39205- 0 0 0 0 0 0 0 0 0 0 0 0
39206- 0 0 0 0 0 0 0 0 0 0 0 0
39207- 0 0 0 0 0 0 0 0 0 0 0 0
39208- 0 0 0 0 0 0 0 0 0 0 0 0
39209- 0 0 0 0 0 0 0 0 0 0 0 0
39210- 0 0 0 0 0 0 0 0 0 0 0 0
39211- 0 0 0 0 0 0 10 10 10 34 34 34
39212- 74 74 74 74 74 74 2 2 6 6 6 6
39213-144 144 144 198 198 198 190 190 190 178 166 146
39214-154 121 60 156 107 11 156 107 11 168 124 44
39215-174 154 114 187 187 187 190 190 190 210 210 210
39216-246 246 246 253 253 253 253 253 253 182 182 182
39217- 6 6 6 2 2 6 2 2 6 2 2 6
39218- 2 2 6 2 2 6 2 2 6 62 62 62
39219- 74 74 74 34 34 34 14 14 14 0 0 0
39220- 0 0 0 0 0 0 0 0 0 0 0 0
39221- 0 0 0 0 0 0 0 0 0 0 0 0
39222- 0 0 0 0 0 0 0 0 0 0 0 0
39223- 0 0 0 0 0 0 0 0 0 0 0 0
39224- 0 0 0 0 0 0 0 0 0 0 0 0
39225- 0 0 0 0 0 0 0 0 0 0 0 0
39226- 0 0 0 0 0 0 0 0 0 0 0 0
39227- 0 0 0 0 0 0 0 0 0 0 0 0
39228- 0 0 0 0 0 0 0 0 0 0 0 0
39229- 0 0 0 0 0 0 0 0 0 0 0 0
39230- 0 0 0 0 0 0 0 0 0 0 0 0
39231- 0 0 0 10 10 10 22 22 22 54 54 54
39232- 94 94 94 18 18 18 2 2 6 46 46 46
39233-234 234 234 221 221 221 190 190 190 190 190 190
39234-190 190 190 187 187 187 187 187 187 190 190 190
39235-190 190 190 195 195 195 214 214 214 242 242 242
39236-253 253 253 253 253 253 253 253 253 253 253 253
39237- 82 82 82 2 2 6 2 2 6 2 2 6
39238- 2 2 6 2 2 6 2 2 6 14 14 14
39239- 86 86 86 54 54 54 22 22 22 6 6 6
39240- 0 0 0 0 0 0 0 0 0 0 0 0
39241- 0 0 0 0 0 0 0 0 0 0 0 0
39242- 0 0 0 0 0 0 0 0 0 0 0 0
39243- 0 0 0 0 0 0 0 0 0 0 0 0
39244- 0 0 0 0 0 0 0 0 0 0 0 0
39245- 0 0 0 0 0 0 0 0 0 0 0 0
39246- 0 0 0 0 0 0 0 0 0 0 0 0
39247- 0 0 0 0 0 0 0 0 0 0 0 0
39248- 0 0 0 0 0 0 0 0 0 0 0 0
39249- 0 0 0 0 0 0 0 0 0 0 0 0
39250- 0 0 0 0 0 0 0 0 0 0 0 0
39251- 6 6 6 18 18 18 46 46 46 90 90 90
39252- 46 46 46 18 18 18 6 6 6 182 182 182
39253-253 253 253 246 246 246 206 206 206 190 190 190
39254-190 190 190 190 190 190 190 190 190 190 190 190
39255-206 206 206 231 231 231 250 250 250 253 253 253
39256-253 253 253 253 253 253 253 253 253 253 253 253
39257-202 202 202 14 14 14 2 2 6 2 2 6
39258- 2 2 6 2 2 6 2 2 6 2 2 6
39259- 42 42 42 86 86 86 42 42 42 18 18 18
39260- 6 6 6 0 0 0 0 0 0 0 0 0
39261- 0 0 0 0 0 0 0 0 0 0 0 0
39262- 0 0 0 0 0 0 0 0 0 0 0 0
39263- 0 0 0 0 0 0 0 0 0 0 0 0
39264- 0 0 0 0 0 0 0 0 0 0 0 0
39265- 0 0 0 0 0 0 0 0 0 0 0 0
39266- 0 0 0 0 0 0 0 0 0 0 0 0
39267- 0 0 0 0 0 0 0 0 0 0 0 0
39268- 0 0 0 0 0 0 0 0 0 0 0 0
39269- 0 0 0 0 0 0 0 0 0 0 0 0
39270- 0 0 0 0 0 0 0 0 0 6 6 6
39271- 14 14 14 38 38 38 74 74 74 66 66 66
39272- 2 2 6 6 6 6 90 90 90 250 250 250
39273-253 253 253 253 253 253 238 238 238 198 198 198
39274-190 190 190 190 190 190 195 195 195 221 221 221
39275-246 246 246 253 253 253 253 253 253 253 253 253
39276-253 253 253 253 253 253 253 253 253 253 253 253
39277-253 253 253 82 82 82 2 2 6 2 2 6
39278- 2 2 6 2 2 6 2 2 6 2 2 6
39279- 2 2 6 78 78 78 70 70 70 34 34 34
39280- 14 14 14 6 6 6 0 0 0 0 0 0
39281- 0 0 0 0 0 0 0 0 0 0 0 0
39282- 0 0 0 0 0 0 0 0 0 0 0 0
39283- 0 0 0 0 0 0 0 0 0 0 0 0
39284- 0 0 0 0 0 0 0 0 0 0 0 0
39285- 0 0 0 0 0 0 0 0 0 0 0 0
39286- 0 0 0 0 0 0 0 0 0 0 0 0
39287- 0 0 0 0 0 0 0 0 0 0 0 0
39288- 0 0 0 0 0 0 0 0 0 0 0 0
39289- 0 0 0 0 0 0 0 0 0 0 0 0
39290- 0 0 0 0 0 0 0 0 0 14 14 14
39291- 34 34 34 66 66 66 78 78 78 6 6 6
39292- 2 2 6 18 18 18 218 218 218 253 253 253
39293-253 253 253 253 253 253 253 253 253 246 246 246
39294-226 226 226 231 231 231 246 246 246 253 253 253
39295-253 253 253 253 253 253 253 253 253 253 253 253
39296-253 253 253 253 253 253 253 253 253 253 253 253
39297-253 253 253 178 178 178 2 2 6 2 2 6
39298- 2 2 6 2 2 6 2 2 6 2 2 6
39299- 2 2 6 18 18 18 90 90 90 62 62 62
39300- 30 30 30 10 10 10 0 0 0 0 0 0
39301- 0 0 0 0 0 0 0 0 0 0 0 0
39302- 0 0 0 0 0 0 0 0 0 0 0 0
39303- 0 0 0 0 0 0 0 0 0 0 0 0
39304- 0 0 0 0 0 0 0 0 0 0 0 0
39305- 0 0 0 0 0 0 0 0 0 0 0 0
39306- 0 0 0 0 0 0 0 0 0 0 0 0
39307- 0 0 0 0 0 0 0 0 0 0 0 0
39308- 0 0 0 0 0 0 0 0 0 0 0 0
39309- 0 0 0 0 0 0 0 0 0 0 0 0
39310- 0 0 0 0 0 0 10 10 10 26 26 26
39311- 58 58 58 90 90 90 18 18 18 2 2 6
39312- 2 2 6 110 110 110 253 253 253 253 253 253
39313-253 253 253 253 253 253 253 253 253 253 253 253
39314-250 250 250 253 253 253 253 253 253 253 253 253
39315-253 253 253 253 253 253 253 253 253 253 253 253
39316-253 253 253 253 253 253 253 253 253 253 253 253
39317-253 253 253 231 231 231 18 18 18 2 2 6
39318- 2 2 6 2 2 6 2 2 6 2 2 6
39319- 2 2 6 2 2 6 18 18 18 94 94 94
39320- 54 54 54 26 26 26 10 10 10 0 0 0
39321- 0 0 0 0 0 0 0 0 0 0 0 0
39322- 0 0 0 0 0 0 0 0 0 0 0 0
39323- 0 0 0 0 0 0 0 0 0 0 0 0
39324- 0 0 0 0 0 0 0 0 0 0 0 0
39325- 0 0 0 0 0 0 0 0 0 0 0 0
39326- 0 0 0 0 0 0 0 0 0 0 0 0
39327- 0 0 0 0 0 0 0 0 0 0 0 0
39328- 0 0 0 0 0 0 0 0 0 0 0 0
39329- 0 0 0 0 0 0 0 0 0 0 0 0
39330- 0 0 0 6 6 6 22 22 22 50 50 50
39331- 90 90 90 26 26 26 2 2 6 2 2 6
39332- 14 14 14 195 195 195 250 250 250 253 253 253
39333-253 253 253 253 253 253 253 253 253 253 253 253
39334-253 253 253 253 253 253 253 253 253 253 253 253
39335-253 253 253 253 253 253 253 253 253 253 253 253
39336-253 253 253 253 253 253 253 253 253 253 253 253
39337-250 250 250 242 242 242 54 54 54 2 2 6
39338- 2 2 6 2 2 6 2 2 6 2 2 6
39339- 2 2 6 2 2 6 2 2 6 38 38 38
39340- 86 86 86 50 50 50 22 22 22 6 6 6
39341- 0 0 0 0 0 0 0 0 0 0 0 0
39342- 0 0 0 0 0 0 0 0 0 0 0 0
39343- 0 0 0 0 0 0 0 0 0 0 0 0
39344- 0 0 0 0 0 0 0 0 0 0 0 0
39345- 0 0 0 0 0 0 0 0 0 0 0 0
39346- 0 0 0 0 0 0 0 0 0 0 0 0
39347- 0 0 0 0 0 0 0 0 0 0 0 0
39348- 0 0 0 0 0 0 0 0 0 0 0 0
39349- 0 0 0 0 0 0 0 0 0 0 0 0
39350- 6 6 6 14 14 14 38 38 38 82 82 82
39351- 34 34 34 2 2 6 2 2 6 2 2 6
39352- 42 42 42 195 195 195 246 246 246 253 253 253
39353-253 253 253 253 253 253 253 253 253 250 250 250
39354-242 242 242 242 242 242 250 250 250 253 253 253
39355-253 253 253 253 253 253 253 253 253 253 253 253
39356-253 253 253 250 250 250 246 246 246 238 238 238
39357-226 226 226 231 231 231 101 101 101 6 6 6
39358- 2 2 6 2 2 6 2 2 6 2 2 6
39359- 2 2 6 2 2 6 2 2 6 2 2 6
39360- 38 38 38 82 82 82 42 42 42 14 14 14
39361- 6 6 6 0 0 0 0 0 0 0 0 0
39362- 0 0 0 0 0 0 0 0 0 0 0 0
39363- 0 0 0 0 0 0 0 0 0 0 0 0
39364- 0 0 0 0 0 0 0 0 0 0 0 0
39365- 0 0 0 0 0 0 0 0 0 0 0 0
39366- 0 0 0 0 0 0 0 0 0 0 0 0
39367- 0 0 0 0 0 0 0 0 0 0 0 0
39368- 0 0 0 0 0 0 0 0 0 0 0 0
39369- 0 0 0 0 0 0 0 0 0 0 0 0
39370- 10 10 10 26 26 26 62 62 62 66 66 66
39371- 2 2 6 2 2 6 2 2 6 6 6 6
39372- 70 70 70 170 170 170 206 206 206 234 234 234
39373-246 246 246 250 250 250 250 250 250 238 238 238
39374-226 226 226 231 231 231 238 238 238 250 250 250
39375-250 250 250 250 250 250 246 246 246 231 231 231
39376-214 214 214 206 206 206 202 202 202 202 202 202
39377-198 198 198 202 202 202 182 182 182 18 18 18
39378- 2 2 6 2 2 6 2 2 6 2 2 6
39379- 2 2 6 2 2 6 2 2 6 2 2 6
39380- 2 2 6 62 62 62 66 66 66 30 30 30
39381- 10 10 10 0 0 0 0 0 0 0 0 0
39382- 0 0 0 0 0 0 0 0 0 0 0 0
39383- 0 0 0 0 0 0 0 0 0 0 0 0
39384- 0 0 0 0 0 0 0 0 0 0 0 0
39385- 0 0 0 0 0 0 0 0 0 0 0 0
39386- 0 0 0 0 0 0 0 0 0 0 0 0
39387- 0 0 0 0 0 0 0 0 0 0 0 0
39388- 0 0 0 0 0 0 0 0 0 0 0 0
39389- 0 0 0 0 0 0 0 0 0 0 0 0
39390- 14 14 14 42 42 42 82 82 82 18 18 18
39391- 2 2 6 2 2 6 2 2 6 10 10 10
39392- 94 94 94 182 182 182 218 218 218 242 242 242
39393-250 250 250 253 253 253 253 253 253 250 250 250
39394-234 234 234 253 253 253 253 253 253 253 253 253
39395-253 253 253 253 253 253 253 253 253 246 246 246
39396-238 238 238 226 226 226 210 210 210 202 202 202
39397-195 195 195 195 195 195 210 210 210 158 158 158
39398- 6 6 6 14 14 14 50 50 50 14 14 14
39399- 2 2 6 2 2 6 2 2 6 2 2 6
39400- 2 2 6 6 6 6 86 86 86 46 46 46
39401- 18 18 18 6 6 6 0 0 0 0 0 0
39402- 0 0 0 0 0 0 0 0 0 0 0 0
39403- 0 0 0 0 0 0 0 0 0 0 0 0
39404- 0 0 0 0 0 0 0 0 0 0 0 0
39405- 0 0 0 0 0 0 0 0 0 0 0 0
39406- 0 0 0 0 0 0 0 0 0 0 0 0
39407- 0 0 0 0 0 0 0 0 0 0 0 0
39408- 0 0 0 0 0 0 0 0 0 0 0 0
39409- 0 0 0 0 0 0 0 0 0 6 6 6
39410- 22 22 22 54 54 54 70 70 70 2 2 6
39411- 2 2 6 10 10 10 2 2 6 22 22 22
39412-166 166 166 231 231 231 250 250 250 253 253 253
39413-253 253 253 253 253 253 253 253 253 250 250 250
39414-242 242 242 253 253 253 253 253 253 253 253 253
39415-253 253 253 253 253 253 253 253 253 253 253 253
39416-253 253 253 253 253 253 253 253 253 246 246 246
39417-231 231 231 206 206 206 198 198 198 226 226 226
39418- 94 94 94 2 2 6 6 6 6 38 38 38
39419- 30 30 30 2 2 6 2 2 6 2 2 6
39420- 2 2 6 2 2 6 62 62 62 66 66 66
39421- 26 26 26 10 10 10 0 0 0 0 0 0
39422- 0 0 0 0 0 0 0 0 0 0 0 0
39423- 0 0 0 0 0 0 0 0 0 0 0 0
39424- 0 0 0 0 0 0 0 0 0 0 0 0
39425- 0 0 0 0 0 0 0 0 0 0 0 0
39426- 0 0 0 0 0 0 0 0 0 0 0 0
39427- 0 0 0 0 0 0 0 0 0 0 0 0
39428- 0 0 0 0 0 0 0 0 0 0 0 0
39429- 0 0 0 0 0 0 0 0 0 10 10 10
39430- 30 30 30 74 74 74 50 50 50 2 2 6
39431- 26 26 26 26 26 26 2 2 6 106 106 106
39432-238 238 238 253 253 253 253 253 253 253 253 253
39433-253 253 253 253 253 253 253 253 253 253 253 253
39434-253 253 253 253 253 253 253 253 253 253 253 253
39435-253 253 253 253 253 253 253 253 253 253 253 253
39436-253 253 253 253 253 253 253 253 253 253 253 253
39437-253 253 253 246 246 246 218 218 218 202 202 202
39438-210 210 210 14 14 14 2 2 6 2 2 6
39439- 30 30 30 22 22 22 2 2 6 2 2 6
39440- 2 2 6 2 2 6 18 18 18 86 86 86
39441- 42 42 42 14 14 14 0 0 0 0 0 0
39442- 0 0 0 0 0 0 0 0 0 0 0 0
39443- 0 0 0 0 0 0 0 0 0 0 0 0
39444- 0 0 0 0 0 0 0 0 0 0 0 0
39445- 0 0 0 0 0 0 0 0 0 0 0 0
39446- 0 0 0 0 0 0 0 0 0 0 0 0
39447- 0 0 0 0 0 0 0 0 0 0 0 0
39448- 0 0 0 0 0 0 0 0 0 0 0 0
39449- 0 0 0 0 0 0 0 0 0 14 14 14
39450- 42 42 42 90 90 90 22 22 22 2 2 6
39451- 42 42 42 2 2 6 18 18 18 218 218 218
39452-253 253 253 253 253 253 253 253 253 253 253 253
39453-253 253 253 253 253 253 253 253 253 253 253 253
39454-253 253 253 253 253 253 253 253 253 253 253 253
39455-253 253 253 253 253 253 253 253 253 253 253 253
39456-253 253 253 253 253 253 253 253 253 253 253 253
39457-253 253 253 253 253 253 250 250 250 221 221 221
39458-218 218 218 101 101 101 2 2 6 14 14 14
39459- 18 18 18 38 38 38 10 10 10 2 2 6
39460- 2 2 6 2 2 6 2 2 6 78 78 78
39461- 58 58 58 22 22 22 6 6 6 0 0 0
39462- 0 0 0 0 0 0 0 0 0 0 0 0
39463- 0 0 0 0 0 0 0 0 0 0 0 0
39464- 0 0 0 0 0 0 0 0 0 0 0 0
39465- 0 0 0 0 0 0 0 0 0 0 0 0
39466- 0 0 0 0 0 0 0 0 0 0 0 0
39467- 0 0 0 0 0 0 0 0 0 0 0 0
39468- 0 0 0 0 0 0 0 0 0 0 0 0
39469- 0 0 0 0 0 0 6 6 6 18 18 18
39470- 54 54 54 82 82 82 2 2 6 26 26 26
39471- 22 22 22 2 2 6 123 123 123 253 253 253
39472-253 253 253 253 253 253 253 253 253 253 253 253
39473-253 253 253 253 253 253 253 253 253 253 253 253
39474-253 253 253 253 253 253 253 253 253 253 253 253
39475-253 253 253 253 253 253 253 253 253 253 253 253
39476-253 253 253 253 253 253 253 253 253 253 253 253
39477-253 253 253 253 253 253 253 253 253 250 250 250
39478-238 238 238 198 198 198 6 6 6 38 38 38
39479- 58 58 58 26 26 26 38 38 38 2 2 6
39480- 2 2 6 2 2 6 2 2 6 46 46 46
39481- 78 78 78 30 30 30 10 10 10 0 0 0
39482- 0 0 0 0 0 0 0 0 0 0 0 0
39483- 0 0 0 0 0 0 0 0 0 0 0 0
39484- 0 0 0 0 0 0 0 0 0 0 0 0
39485- 0 0 0 0 0 0 0 0 0 0 0 0
39486- 0 0 0 0 0 0 0 0 0 0 0 0
39487- 0 0 0 0 0 0 0 0 0 0 0 0
39488- 0 0 0 0 0 0 0 0 0 0 0 0
39489- 0 0 0 0 0 0 10 10 10 30 30 30
39490- 74 74 74 58 58 58 2 2 6 42 42 42
39491- 2 2 6 22 22 22 231 231 231 253 253 253
39492-253 253 253 253 253 253 253 253 253 253 253 253
39493-253 253 253 253 253 253 253 253 253 250 250 250
39494-253 253 253 253 253 253 253 253 253 253 253 253
39495-253 253 253 253 253 253 253 253 253 253 253 253
39496-253 253 253 253 253 253 253 253 253 253 253 253
39497-253 253 253 253 253 253 253 253 253 253 253 253
39498-253 253 253 246 246 246 46 46 46 38 38 38
39499- 42 42 42 14 14 14 38 38 38 14 14 14
39500- 2 2 6 2 2 6 2 2 6 6 6 6
39501- 86 86 86 46 46 46 14 14 14 0 0 0
39502- 0 0 0 0 0 0 0 0 0 0 0 0
39503- 0 0 0 0 0 0 0 0 0 0 0 0
39504- 0 0 0 0 0 0 0 0 0 0 0 0
39505- 0 0 0 0 0 0 0 0 0 0 0 0
39506- 0 0 0 0 0 0 0 0 0 0 0 0
39507- 0 0 0 0 0 0 0 0 0 0 0 0
39508- 0 0 0 0 0 0 0 0 0 0 0 0
39509- 0 0 0 6 6 6 14 14 14 42 42 42
39510- 90 90 90 18 18 18 18 18 18 26 26 26
39511- 2 2 6 116 116 116 253 253 253 253 253 253
39512-253 253 253 253 253 253 253 253 253 253 253 253
39513-253 253 253 253 253 253 250 250 250 238 238 238
39514-253 253 253 253 253 253 253 253 253 253 253 253
39515-253 253 253 253 253 253 253 253 253 253 253 253
39516-253 253 253 253 253 253 253 253 253 253 253 253
39517-253 253 253 253 253 253 253 253 253 253 253 253
39518-253 253 253 253 253 253 94 94 94 6 6 6
39519- 2 2 6 2 2 6 10 10 10 34 34 34
39520- 2 2 6 2 2 6 2 2 6 2 2 6
39521- 74 74 74 58 58 58 22 22 22 6 6 6
39522- 0 0 0 0 0 0 0 0 0 0 0 0
39523- 0 0 0 0 0 0 0 0 0 0 0 0
39524- 0 0 0 0 0 0 0 0 0 0 0 0
39525- 0 0 0 0 0 0 0 0 0 0 0 0
39526- 0 0 0 0 0 0 0 0 0 0 0 0
39527- 0 0 0 0 0 0 0 0 0 0 0 0
39528- 0 0 0 0 0 0 0 0 0 0 0 0
39529- 0 0 0 10 10 10 26 26 26 66 66 66
39530- 82 82 82 2 2 6 38 38 38 6 6 6
39531- 14 14 14 210 210 210 253 253 253 253 253 253
39532-253 253 253 253 253 253 253 253 253 253 253 253
39533-253 253 253 253 253 253 246 246 246 242 242 242
39534-253 253 253 253 253 253 253 253 253 253 253 253
39535-253 253 253 253 253 253 253 253 253 253 253 253
39536-253 253 253 253 253 253 253 253 253 253 253 253
39537-253 253 253 253 253 253 253 253 253 253 253 253
39538-253 253 253 253 253 253 144 144 144 2 2 6
39539- 2 2 6 2 2 6 2 2 6 46 46 46
39540- 2 2 6 2 2 6 2 2 6 2 2 6
39541- 42 42 42 74 74 74 30 30 30 10 10 10
39542- 0 0 0 0 0 0 0 0 0 0 0 0
39543- 0 0 0 0 0 0 0 0 0 0 0 0
39544- 0 0 0 0 0 0 0 0 0 0 0 0
39545- 0 0 0 0 0 0 0 0 0 0 0 0
39546- 0 0 0 0 0 0 0 0 0 0 0 0
39547- 0 0 0 0 0 0 0 0 0 0 0 0
39548- 0 0 0 0 0 0 0 0 0 0 0 0
39549- 6 6 6 14 14 14 42 42 42 90 90 90
39550- 26 26 26 6 6 6 42 42 42 2 2 6
39551- 74 74 74 250 250 250 253 253 253 253 253 253
39552-253 253 253 253 253 253 253 253 253 253 253 253
39553-253 253 253 253 253 253 242 242 242 242 242 242
39554-253 253 253 253 253 253 253 253 253 253 253 253
39555-253 253 253 253 253 253 253 253 253 253 253 253
39556-253 253 253 253 253 253 253 253 253 253 253 253
39557-253 253 253 253 253 253 253 253 253 253 253 253
39558-253 253 253 253 253 253 182 182 182 2 2 6
39559- 2 2 6 2 2 6 2 2 6 46 46 46
39560- 2 2 6 2 2 6 2 2 6 2 2 6
39561- 10 10 10 86 86 86 38 38 38 10 10 10
39562- 0 0 0 0 0 0 0 0 0 0 0 0
39563- 0 0 0 0 0 0 0 0 0 0 0 0
39564- 0 0 0 0 0 0 0 0 0 0 0 0
39565- 0 0 0 0 0 0 0 0 0 0 0 0
39566- 0 0 0 0 0 0 0 0 0 0 0 0
39567- 0 0 0 0 0 0 0 0 0 0 0 0
39568- 0 0 0 0 0 0 0 0 0 0 0 0
39569- 10 10 10 26 26 26 66 66 66 82 82 82
39570- 2 2 6 22 22 22 18 18 18 2 2 6
39571-149 149 149 253 253 253 253 253 253 253 253 253
39572-253 253 253 253 253 253 253 253 253 253 253 253
39573-253 253 253 253 253 253 234 234 234 242 242 242
39574-253 253 253 253 253 253 253 253 253 253 253 253
39575-253 253 253 253 253 253 253 253 253 253 253 253
39576-253 253 253 253 253 253 253 253 253 253 253 253
39577-253 253 253 253 253 253 253 253 253 253 253 253
39578-253 253 253 253 253 253 206 206 206 2 2 6
39579- 2 2 6 2 2 6 2 2 6 38 38 38
39580- 2 2 6 2 2 6 2 2 6 2 2 6
39581- 6 6 6 86 86 86 46 46 46 14 14 14
39582- 0 0 0 0 0 0 0 0 0 0 0 0
39583- 0 0 0 0 0 0 0 0 0 0 0 0
39584- 0 0 0 0 0 0 0 0 0 0 0 0
39585- 0 0 0 0 0 0 0 0 0 0 0 0
39586- 0 0 0 0 0 0 0 0 0 0 0 0
39587- 0 0 0 0 0 0 0 0 0 0 0 0
39588- 0 0 0 0 0 0 0 0 0 6 6 6
39589- 18 18 18 46 46 46 86 86 86 18 18 18
39590- 2 2 6 34 34 34 10 10 10 6 6 6
39591-210 210 210 253 253 253 253 253 253 253 253 253
39592-253 253 253 253 253 253 253 253 253 253 253 253
39593-253 253 253 253 253 253 234 234 234 242 242 242
39594-253 253 253 253 253 253 253 253 253 253 253 253
39595-253 253 253 253 253 253 253 253 253 253 253 253
39596-253 253 253 253 253 253 253 253 253 253 253 253
39597-253 253 253 253 253 253 253 253 253 253 253 253
39598-253 253 253 253 253 253 221 221 221 6 6 6
39599- 2 2 6 2 2 6 6 6 6 30 30 30
39600- 2 2 6 2 2 6 2 2 6 2 2 6
39601- 2 2 6 82 82 82 54 54 54 18 18 18
39602- 6 6 6 0 0 0 0 0 0 0 0 0
39603- 0 0 0 0 0 0 0 0 0 0 0 0
39604- 0 0 0 0 0 0 0 0 0 0 0 0
39605- 0 0 0 0 0 0 0 0 0 0 0 0
39606- 0 0 0 0 0 0 0 0 0 0 0 0
39607- 0 0 0 0 0 0 0 0 0 0 0 0
39608- 0 0 0 0 0 0 0 0 0 10 10 10
39609- 26 26 26 66 66 66 62 62 62 2 2 6
39610- 2 2 6 38 38 38 10 10 10 26 26 26
39611-238 238 238 253 253 253 253 253 253 253 253 253
39612-253 253 253 253 253 253 253 253 253 253 253 253
39613-253 253 253 253 253 253 231 231 231 238 238 238
39614-253 253 253 253 253 253 253 253 253 253 253 253
39615-253 253 253 253 253 253 253 253 253 253 253 253
39616-253 253 253 253 253 253 253 253 253 253 253 253
39617-253 253 253 253 253 253 253 253 253 253 253 253
39618-253 253 253 253 253 253 231 231 231 6 6 6
39619- 2 2 6 2 2 6 10 10 10 30 30 30
39620- 2 2 6 2 2 6 2 2 6 2 2 6
39621- 2 2 6 66 66 66 58 58 58 22 22 22
39622- 6 6 6 0 0 0 0 0 0 0 0 0
39623- 0 0 0 0 0 0 0 0 0 0 0 0
39624- 0 0 0 0 0 0 0 0 0 0 0 0
39625- 0 0 0 0 0 0 0 0 0 0 0 0
39626- 0 0 0 0 0 0 0 0 0 0 0 0
39627- 0 0 0 0 0 0 0 0 0 0 0 0
39628- 0 0 0 0 0 0 0 0 0 10 10 10
39629- 38 38 38 78 78 78 6 6 6 2 2 6
39630- 2 2 6 46 46 46 14 14 14 42 42 42
39631-246 246 246 253 253 253 253 253 253 253 253 253
39632-253 253 253 253 253 253 253 253 253 253 253 253
39633-253 253 253 253 253 253 231 231 231 242 242 242
39634-253 253 253 253 253 253 253 253 253 253 253 253
39635-253 253 253 253 253 253 253 253 253 253 253 253
39636-253 253 253 253 253 253 253 253 253 253 253 253
39637-253 253 253 253 253 253 253 253 253 253 253 253
39638-253 253 253 253 253 253 234 234 234 10 10 10
39639- 2 2 6 2 2 6 22 22 22 14 14 14
39640- 2 2 6 2 2 6 2 2 6 2 2 6
39641- 2 2 6 66 66 66 62 62 62 22 22 22
39642- 6 6 6 0 0 0 0 0 0 0 0 0
39643- 0 0 0 0 0 0 0 0 0 0 0 0
39644- 0 0 0 0 0 0 0 0 0 0 0 0
39645- 0 0 0 0 0 0 0 0 0 0 0 0
39646- 0 0 0 0 0 0 0 0 0 0 0 0
39647- 0 0 0 0 0 0 0 0 0 0 0 0
39648- 0 0 0 0 0 0 6 6 6 18 18 18
39649- 50 50 50 74 74 74 2 2 6 2 2 6
39650- 14 14 14 70 70 70 34 34 34 62 62 62
39651-250 250 250 253 253 253 253 253 253 253 253 253
39652-253 253 253 253 253 253 253 253 253 253 253 253
39653-253 253 253 253 253 253 231 231 231 246 246 246
39654-253 253 253 253 253 253 253 253 253 253 253 253
39655-253 253 253 253 253 253 253 253 253 253 253 253
39656-253 253 253 253 253 253 253 253 253 253 253 253
39657-253 253 253 253 253 253 253 253 253 253 253 253
39658-253 253 253 253 253 253 234 234 234 14 14 14
39659- 2 2 6 2 2 6 30 30 30 2 2 6
39660- 2 2 6 2 2 6 2 2 6 2 2 6
39661- 2 2 6 66 66 66 62 62 62 22 22 22
39662- 6 6 6 0 0 0 0 0 0 0 0 0
39663- 0 0 0 0 0 0 0 0 0 0 0 0
39664- 0 0 0 0 0 0 0 0 0 0 0 0
39665- 0 0 0 0 0 0 0 0 0 0 0 0
39666- 0 0 0 0 0 0 0 0 0 0 0 0
39667- 0 0 0 0 0 0 0 0 0 0 0 0
39668- 0 0 0 0 0 0 6 6 6 18 18 18
39669- 54 54 54 62 62 62 2 2 6 2 2 6
39670- 2 2 6 30 30 30 46 46 46 70 70 70
39671-250 250 250 253 253 253 253 253 253 253 253 253
39672-253 253 253 253 253 253 253 253 253 253 253 253
39673-253 253 253 253 253 253 231 231 231 246 246 246
39674-253 253 253 253 253 253 253 253 253 253 253 253
39675-253 253 253 253 253 253 253 253 253 253 253 253
39676-253 253 253 253 253 253 253 253 253 253 253 253
39677-253 253 253 253 253 253 253 253 253 253 253 253
39678-253 253 253 253 253 253 226 226 226 10 10 10
39679- 2 2 6 6 6 6 30 30 30 2 2 6
39680- 2 2 6 2 2 6 2 2 6 2 2 6
39681- 2 2 6 66 66 66 58 58 58 22 22 22
39682- 6 6 6 0 0 0 0 0 0 0 0 0
39683- 0 0 0 0 0 0 0 0 0 0 0 0
39684- 0 0 0 0 0 0 0 0 0 0 0 0
39685- 0 0 0 0 0 0 0 0 0 0 0 0
39686- 0 0 0 0 0 0 0 0 0 0 0 0
39687- 0 0 0 0 0 0 0 0 0 0 0 0
39688- 0 0 0 0 0 0 6 6 6 22 22 22
39689- 58 58 58 62 62 62 2 2 6 2 2 6
39690- 2 2 6 2 2 6 30 30 30 78 78 78
39691-250 250 250 253 253 253 253 253 253 253 253 253
39692-253 253 253 253 253 253 253 253 253 253 253 253
39693-253 253 253 253 253 253 231 231 231 246 246 246
39694-253 253 253 253 253 253 253 253 253 253 253 253
39695-253 253 253 253 253 253 253 253 253 253 253 253
39696-253 253 253 253 253 253 253 253 253 253 253 253
39697-253 253 253 253 253 253 253 253 253 253 253 253
39698-253 253 253 253 253 253 206 206 206 2 2 6
39699- 22 22 22 34 34 34 18 14 6 22 22 22
39700- 26 26 26 18 18 18 6 6 6 2 2 6
39701- 2 2 6 82 82 82 54 54 54 18 18 18
39702- 6 6 6 0 0 0 0 0 0 0 0 0
39703- 0 0 0 0 0 0 0 0 0 0 0 0
39704- 0 0 0 0 0 0 0 0 0 0 0 0
39705- 0 0 0 0 0 0 0 0 0 0 0 0
39706- 0 0 0 0 0 0 0 0 0 0 0 0
39707- 0 0 0 0 0 0 0 0 0 0 0 0
39708- 0 0 0 0 0 0 6 6 6 26 26 26
39709- 62 62 62 106 106 106 74 54 14 185 133 11
39710-210 162 10 121 92 8 6 6 6 62 62 62
39711-238 238 238 253 253 253 253 253 253 253 253 253
39712-253 253 253 253 253 253 253 253 253 253 253 253
39713-253 253 253 253 253 253 231 231 231 246 246 246
39714-253 253 253 253 253 253 253 253 253 253 253 253
39715-253 253 253 253 253 253 253 253 253 253 253 253
39716-253 253 253 253 253 253 253 253 253 253 253 253
39717-253 253 253 253 253 253 253 253 253 253 253 253
39718-253 253 253 253 253 253 158 158 158 18 18 18
39719- 14 14 14 2 2 6 2 2 6 2 2 6
39720- 6 6 6 18 18 18 66 66 66 38 38 38
39721- 6 6 6 94 94 94 50 50 50 18 18 18
39722- 6 6 6 0 0 0 0 0 0 0 0 0
39723- 0 0 0 0 0 0 0 0 0 0 0 0
39724- 0 0 0 0 0 0 0 0 0 0 0 0
39725- 0 0 0 0 0 0 0 0 0 0 0 0
39726- 0 0 0 0 0 0 0 0 0 0 0 0
39727- 0 0 0 0 0 0 0 0 0 6 6 6
39728- 10 10 10 10 10 10 18 18 18 38 38 38
39729- 78 78 78 142 134 106 216 158 10 242 186 14
39730-246 190 14 246 190 14 156 118 10 10 10 10
39731- 90 90 90 238 238 238 253 253 253 253 253 253
39732-253 253 253 253 253 253 253 253 253 253 253 253
39733-253 253 253 253 253 253 231 231 231 250 250 250
39734-253 253 253 253 253 253 253 253 253 253 253 253
39735-253 253 253 253 253 253 253 253 253 253 253 253
39736-253 253 253 253 253 253 253 253 253 253 253 253
39737-253 253 253 253 253 253 253 253 253 246 230 190
39738-238 204 91 238 204 91 181 142 44 37 26 9
39739- 2 2 6 2 2 6 2 2 6 2 2 6
39740- 2 2 6 2 2 6 38 38 38 46 46 46
39741- 26 26 26 106 106 106 54 54 54 18 18 18
39742- 6 6 6 0 0 0 0 0 0 0 0 0
39743- 0 0 0 0 0 0 0 0 0 0 0 0
39744- 0 0 0 0 0 0 0 0 0 0 0 0
39745- 0 0 0 0 0 0 0 0 0 0 0 0
39746- 0 0 0 0 0 0 0 0 0 0 0 0
39747- 0 0 0 6 6 6 14 14 14 22 22 22
39748- 30 30 30 38 38 38 50 50 50 70 70 70
39749-106 106 106 190 142 34 226 170 11 242 186 14
39750-246 190 14 246 190 14 246 190 14 154 114 10
39751- 6 6 6 74 74 74 226 226 226 253 253 253
39752-253 253 253 253 253 253 253 253 253 253 253 253
39753-253 253 253 253 253 253 231 231 231 250 250 250
39754-253 253 253 253 253 253 253 253 253 253 253 253
39755-253 253 253 253 253 253 253 253 253 253 253 253
39756-253 253 253 253 253 253 253 253 253 253 253 253
39757-253 253 253 253 253 253 253 253 253 228 184 62
39758-241 196 14 241 208 19 232 195 16 38 30 10
39759- 2 2 6 2 2 6 2 2 6 2 2 6
39760- 2 2 6 6 6 6 30 30 30 26 26 26
39761-203 166 17 154 142 90 66 66 66 26 26 26
39762- 6 6 6 0 0 0 0 0 0 0 0 0
39763- 0 0 0 0 0 0 0 0 0 0 0 0
39764- 0 0 0 0 0 0 0 0 0 0 0 0
39765- 0 0 0 0 0 0 0 0 0 0 0 0
39766- 0 0 0 0 0 0 0 0 0 0 0 0
39767- 6 6 6 18 18 18 38 38 38 58 58 58
39768- 78 78 78 86 86 86 101 101 101 123 123 123
39769-175 146 61 210 150 10 234 174 13 246 186 14
39770-246 190 14 246 190 14 246 190 14 238 190 10
39771-102 78 10 2 2 6 46 46 46 198 198 198
39772-253 253 253 253 253 253 253 253 253 253 253 253
39773-253 253 253 253 253 253 234 234 234 242 242 242
39774-253 253 253 253 253 253 253 253 253 253 253 253
39775-253 253 253 253 253 253 253 253 253 253 253 253
39776-253 253 253 253 253 253 253 253 253 253 253 253
39777-253 253 253 253 253 253 253 253 253 224 178 62
39778-242 186 14 241 196 14 210 166 10 22 18 6
39779- 2 2 6 2 2 6 2 2 6 2 2 6
39780- 2 2 6 2 2 6 6 6 6 121 92 8
39781-238 202 15 232 195 16 82 82 82 34 34 34
39782- 10 10 10 0 0 0 0 0 0 0 0 0
39783- 0 0 0 0 0 0 0 0 0 0 0 0
39784- 0 0 0 0 0 0 0 0 0 0 0 0
39785- 0 0 0 0 0 0 0 0 0 0 0 0
39786- 0 0 0 0 0 0 0 0 0 0 0 0
39787- 14 14 14 38 38 38 70 70 70 154 122 46
39788-190 142 34 200 144 11 197 138 11 197 138 11
39789-213 154 11 226 170 11 242 186 14 246 190 14
39790-246 190 14 246 190 14 246 190 14 246 190 14
39791-225 175 15 46 32 6 2 2 6 22 22 22
39792-158 158 158 250 250 250 253 253 253 253 253 253
39793-253 253 253 253 253 253 253 253 253 253 253 253
39794-253 253 253 253 253 253 253 253 253 253 253 253
39795-253 253 253 253 253 253 253 253 253 253 253 253
39796-253 253 253 253 253 253 253 253 253 253 253 253
39797-253 253 253 250 250 250 242 242 242 224 178 62
39798-239 182 13 236 186 11 213 154 11 46 32 6
39799- 2 2 6 2 2 6 2 2 6 2 2 6
39800- 2 2 6 2 2 6 61 42 6 225 175 15
39801-238 190 10 236 186 11 112 100 78 42 42 42
39802- 14 14 14 0 0 0 0 0 0 0 0 0
39803- 0 0 0 0 0 0 0 0 0 0 0 0
39804- 0 0 0 0 0 0 0 0 0 0 0 0
39805- 0 0 0 0 0 0 0 0 0 0 0 0
39806- 0 0 0 0 0 0 0 0 0 6 6 6
39807- 22 22 22 54 54 54 154 122 46 213 154 11
39808-226 170 11 230 174 11 226 170 11 226 170 11
39809-236 178 12 242 186 14 246 190 14 246 190 14
39810-246 190 14 246 190 14 246 190 14 246 190 14
39811-241 196 14 184 144 12 10 10 10 2 2 6
39812- 6 6 6 116 116 116 242 242 242 253 253 253
39813-253 253 253 253 253 253 253 253 253 253 253 253
39814-253 253 253 253 253 253 253 253 253 253 253 253
39815-253 253 253 253 253 253 253 253 253 253 253 253
39816-253 253 253 253 253 253 253 253 253 253 253 253
39817-253 253 253 231 231 231 198 198 198 214 170 54
39818-236 178 12 236 178 12 210 150 10 137 92 6
39819- 18 14 6 2 2 6 2 2 6 2 2 6
39820- 6 6 6 70 47 6 200 144 11 236 178 12
39821-239 182 13 239 182 13 124 112 88 58 58 58
39822- 22 22 22 6 6 6 0 0 0 0 0 0
39823- 0 0 0 0 0 0 0 0 0 0 0 0
39824- 0 0 0 0 0 0 0 0 0 0 0 0
39825- 0 0 0 0 0 0 0 0 0 0 0 0
39826- 0 0 0 0 0 0 0 0 0 10 10 10
39827- 30 30 30 70 70 70 180 133 36 226 170 11
39828-239 182 13 242 186 14 242 186 14 246 186 14
39829-246 190 14 246 190 14 246 190 14 246 190 14
39830-246 190 14 246 190 14 246 190 14 246 190 14
39831-246 190 14 232 195 16 98 70 6 2 2 6
39832- 2 2 6 2 2 6 66 66 66 221 221 221
39833-253 253 253 253 253 253 253 253 253 253 253 253
39834-253 253 253 253 253 253 253 253 253 253 253 253
39835-253 253 253 253 253 253 253 253 253 253 253 253
39836-253 253 253 253 253 253 253 253 253 253 253 253
39837-253 253 253 206 206 206 198 198 198 214 166 58
39838-230 174 11 230 174 11 216 158 10 192 133 9
39839-163 110 8 116 81 8 102 78 10 116 81 8
39840-167 114 7 197 138 11 226 170 11 239 182 13
39841-242 186 14 242 186 14 162 146 94 78 78 78
39842- 34 34 34 14 14 14 6 6 6 0 0 0
39843- 0 0 0 0 0 0 0 0 0 0 0 0
39844- 0 0 0 0 0 0 0 0 0 0 0 0
39845- 0 0 0 0 0 0 0 0 0 0 0 0
39846- 0 0 0 0 0 0 0 0 0 6 6 6
39847- 30 30 30 78 78 78 190 142 34 226 170 11
39848-239 182 13 246 190 14 246 190 14 246 190 14
39849-246 190 14 246 190 14 246 190 14 246 190 14
39850-246 190 14 246 190 14 246 190 14 246 190 14
39851-246 190 14 241 196 14 203 166 17 22 18 6
39852- 2 2 6 2 2 6 2 2 6 38 38 38
39853-218 218 218 253 253 253 253 253 253 253 253 253
39854-253 253 253 253 253 253 253 253 253 253 253 253
39855-253 253 253 253 253 253 253 253 253 253 253 253
39856-253 253 253 253 253 253 253 253 253 253 253 253
39857-250 250 250 206 206 206 198 198 198 202 162 69
39858-226 170 11 236 178 12 224 166 10 210 150 10
39859-200 144 11 197 138 11 192 133 9 197 138 11
39860-210 150 10 226 170 11 242 186 14 246 190 14
39861-246 190 14 246 186 14 225 175 15 124 112 88
39862- 62 62 62 30 30 30 14 14 14 6 6 6
39863- 0 0 0 0 0 0 0 0 0 0 0 0
39864- 0 0 0 0 0 0 0 0 0 0 0 0
39865- 0 0 0 0 0 0 0 0 0 0 0 0
39866- 0 0 0 0 0 0 0 0 0 10 10 10
39867- 30 30 30 78 78 78 174 135 50 224 166 10
39868-239 182 13 246 190 14 246 190 14 246 190 14
39869-246 190 14 246 190 14 246 190 14 246 190 14
39870-246 190 14 246 190 14 246 190 14 246 190 14
39871-246 190 14 246 190 14 241 196 14 139 102 15
39872- 2 2 6 2 2 6 2 2 6 2 2 6
39873- 78 78 78 250 250 250 253 253 253 253 253 253
39874-253 253 253 253 253 253 253 253 253 253 253 253
39875-253 253 253 253 253 253 253 253 253 253 253 253
39876-253 253 253 253 253 253 253 253 253 253 253 253
39877-250 250 250 214 214 214 198 198 198 190 150 46
39878-219 162 10 236 178 12 234 174 13 224 166 10
39879-216 158 10 213 154 11 213 154 11 216 158 10
39880-226 170 11 239 182 13 246 190 14 246 190 14
39881-246 190 14 246 190 14 242 186 14 206 162 42
39882-101 101 101 58 58 58 30 30 30 14 14 14
39883- 6 6 6 0 0 0 0 0 0 0 0 0
39884- 0 0 0 0 0 0 0 0 0 0 0 0
39885- 0 0 0 0 0 0 0 0 0 0 0 0
39886- 0 0 0 0 0 0 0 0 0 10 10 10
39887- 30 30 30 74 74 74 174 135 50 216 158 10
39888-236 178 12 246 190 14 246 190 14 246 190 14
39889-246 190 14 246 190 14 246 190 14 246 190 14
39890-246 190 14 246 190 14 246 190 14 246 190 14
39891-246 190 14 246 190 14 241 196 14 226 184 13
39892- 61 42 6 2 2 6 2 2 6 2 2 6
39893- 22 22 22 238 238 238 253 253 253 253 253 253
39894-253 253 253 253 253 253 253 253 253 253 253 253
39895-253 253 253 253 253 253 253 253 253 253 253 253
39896-253 253 253 253 253 253 253 253 253 253 253 253
39897-253 253 253 226 226 226 187 187 187 180 133 36
39898-216 158 10 236 178 12 239 182 13 236 178 12
39899-230 174 11 226 170 11 226 170 11 230 174 11
39900-236 178 12 242 186 14 246 190 14 246 190 14
39901-246 190 14 246 190 14 246 186 14 239 182 13
39902-206 162 42 106 106 106 66 66 66 34 34 34
39903- 14 14 14 6 6 6 0 0 0 0 0 0
39904- 0 0 0 0 0 0 0 0 0 0 0 0
39905- 0 0 0 0 0 0 0 0 0 0 0 0
39906- 0 0 0 0 0 0 0 0 0 6 6 6
39907- 26 26 26 70 70 70 163 133 67 213 154 11
39908-236 178 12 246 190 14 246 190 14 246 190 14
39909-246 190 14 246 190 14 246 190 14 246 190 14
39910-246 190 14 246 190 14 246 190 14 246 190 14
39911-246 190 14 246 190 14 246 190 14 241 196 14
39912-190 146 13 18 14 6 2 2 6 2 2 6
39913- 46 46 46 246 246 246 253 253 253 253 253 253
39914-253 253 253 253 253 253 253 253 253 253 253 253
39915-253 253 253 253 253 253 253 253 253 253 253 253
39916-253 253 253 253 253 253 253 253 253 253 253 253
39917-253 253 253 221 221 221 86 86 86 156 107 11
39918-216 158 10 236 178 12 242 186 14 246 186 14
39919-242 186 14 239 182 13 239 182 13 242 186 14
39920-242 186 14 246 186 14 246 190 14 246 190 14
39921-246 190 14 246 190 14 246 190 14 246 190 14
39922-242 186 14 225 175 15 142 122 72 66 66 66
39923- 30 30 30 10 10 10 0 0 0 0 0 0
39924- 0 0 0 0 0 0 0 0 0 0 0 0
39925- 0 0 0 0 0 0 0 0 0 0 0 0
39926- 0 0 0 0 0 0 0 0 0 6 6 6
39927- 26 26 26 70 70 70 163 133 67 210 150 10
39928-236 178 12 246 190 14 246 190 14 246 190 14
39929-246 190 14 246 190 14 246 190 14 246 190 14
39930-246 190 14 246 190 14 246 190 14 246 190 14
39931-246 190 14 246 190 14 246 190 14 246 190 14
39932-232 195 16 121 92 8 34 34 34 106 106 106
39933-221 221 221 253 253 253 253 253 253 253 253 253
39934-253 253 253 253 253 253 253 253 253 253 253 253
39935-253 253 253 253 253 253 253 253 253 253 253 253
39936-253 253 253 253 253 253 253 253 253 253 253 253
39937-242 242 242 82 82 82 18 14 6 163 110 8
39938-216 158 10 236 178 12 242 186 14 246 190 14
39939-246 190 14 246 190 14 246 190 14 246 190 14
39940-246 190 14 246 190 14 246 190 14 246 190 14
39941-246 190 14 246 190 14 246 190 14 246 190 14
39942-246 190 14 246 190 14 242 186 14 163 133 67
39943- 46 46 46 18 18 18 6 6 6 0 0 0
39944- 0 0 0 0 0 0 0 0 0 0 0 0
39945- 0 0 0 0 0 0 0 0 0 0 0 0
39946- 0 0 0 0 0 0 0 0 0 10 10 10
39947- 30 30 30 78 78 78 163 133 67 210 150 10
39948-236 178 12 246 186 14 246 190 14 246 190 14
39949-246 190 14 246 190 14 246 190 14 246 190 14
39950-246 190 14 246 190 14 246 190 14 246 190 14
39951-246 190 14 246 190 14 246 190 14 246 190 14
39952-241 196 14 215 174 15 190 178 144 253 253 253
39953-253 253 253 253 253 253 253 253 253 253 253 253
39954-253 253 253 253 253 253 253 253 253 253 253 253
39955-253 253 253 253 253 253 253 253 253 253 253 253
39956-253 253 253 253 253 253 253 253 253 218 218 218
39957- 58 58 58 2 2 6 22 18 6 167 114 7
39958-216 158 10 236 178 12 246 186 14 246 190 14
39959-246 190 14 246 190 14 246 190 14 246 190 14
39960-246 190 14 246 190 14 246 190 14 246 190 14
39961-246 190 14 246 190 14 246 190 14 246 190 14
39962-246 190 14 246 186 14 242 186 14 190 150 46
39963- 54 54 54 22 22 22 6 6 6 0 0 0
39964- 0 0 0 0 0 0 0 0 0 0 0 0
39965- 0 0 0 0 0 0 0 0 0 0 0 0
39966- 0 0 0 0 0 0 0 0 0 14 14 14
39967- 38 38 38 86 86 86 180 133 36 213 154 11
39968-236 178 12 246 186 14 246 190 14 246 190 14
39969-246 190 14 246 190 14 246 190 14 246 190 14
39970-246 190 14 246 190 14 246 190 14 246 190 14
39971-246 190 14 246 190 14 246 190 14 246 190 14
39972-246 190 14 232 195 16 190 146 13 214 214 214
39973-253 253 253 253 253 253 253 253 253 253 253 253
39974-253 253 253 253 253 253 253 253 253 253 253 253
39975-253 253 253 253 253 253 253 253 253 253 253 253
39976-253 253 253 250 250 250 170 170 170 26 26 26
39977- 2 2 6 2 2 6 37 26 9 163 110 8
39978-219 162 10 239 182 13 246 186 14 246 190 14
39979-246 190 14 246 190 14 246 190 14 246 190 14
39980-246 190 14 246 190 14 246 190 14 246 190 14
39981-246 190 14 246 190 14 246 190 14 246 190 14
39982-246 186 14 236 178 12 224 166 10 142 122 72
39983- 46 46 46 18 18 18 6 6 6 0 0 0
39984- 0 0 0 0 0 0 0 0 0 0 0 0
39985- 0 0 0 0 0 0 0 0 0 0 0 0
39986- 0 0 0 0 0 0 6 6 6 18 18 18
39987- 50 50 50 109 106 95 192 133 9 224 166 10
39988-242 186 14 246 190 14 246 190 14 246 190 14
39989-246 190 14 246 190 14 246 190 14 246 190 14
39990-246 190 14 246 190 14 246 190 14 246 190 14
39991-246 190 14 246 190 14 246 190 14 246 190 14
39992-242 186 14 226 184 13 210 162 10 142 110 46
39993-226 226 226 253 253 253 253 253 253 253 253 253
39994-253 253 253 253 253 253 253 253 253 253 253 253
39995-253 253 253 253 253 253 253 253 253 253 253 253
39996-198 198 198 66 66 66 2 2 6 2 2 6
39997- 2 2 6 2 2 6 50 34 6 156 107 11
39998-219 162 10 239 182 13 246 186 14 246 190 14
39999-246 190 14 246 190 14 246 190 14 246 190 14
40000-246 190 14 246 190 14 246 190 14 246 190 14
40001-246 190 14 246 190 14 246 190 14 242 186 14
40002-234 174 13 213 154 11 154 122 46 66 66 66
40003- 30 30 30 10 10 10 0 0 0 0 0 0
40004- 0 0 0 0 0 0 0 0 0 0 0 0
40005- 0 0 0 0 0 0 0 0 0 0 0 0
40006- 0 0 0 0 0 0 6 6 6 22 22 22
40007- 58 58 58 154 121 60 206 145 10 234 174 13
40008-242 186 14 246 186 14 246 190 14 246 190 14
40009-246 190 14 246 190 14 246 190 14 246 190 14
40010-246 190 14 246 190 14 246 190 14 246 190 14
40011-246 190 14 246 190 14 246 190 14 246 190 14
40012-246 186 14 236 178 12 210 162 10 163 110 8
40013- 61 42 6 138 138 138 218 218 218 250 250 250
40014-253 253 253 253 253 253 253 253 253 250 250 250
40015-242 242 242 210 210 210 144 144 144 66 66 66
40016- 6 6 6 2 2 6 2 2 6 2 2 6
40017- 2 2 6 2 2 6 61 42 6 163 110 8
40018-216 158 10 236 178 12 246 190 14 246 190 14
40019-246 190 14 246 190 14 246 190 14 246 190 14
40020-246 190 14 246 190 14 246 190 14 246 190 14
40021-246 190 14 239 182 13 230 174 11 216 158 10
40022-190 142 34 124 112 88 70 70 70 38 38 38
40023- 18 18 18 6 6 6 0 0 0 0 0 0
40024- 0 0 0 0 0 0 0 0 0 0 0 0
40025- 0 0 0 0 0 0 0 0 0 0 0 0
40026- 0 0 0 0 0 0 6 6 6 22 22 22
40027- 62 62 62 168 124 44 206 145 10 224 166 10
40028-236 178 12 239 182 13 242 186 14 242 186 14
40029-246 186 14 246 190 14 246 190 14 246 190 14
40030-246 190 14 246 190 14 246 190 14 246 190 14
40031-246 190 14 246 190 14 246 190 14 246 190 14
40032-246 190 14 236 178 12 216 158 10 175 118 6
40033- 80 54 7 2 2 6 6 6 6 30 30 30
40034- 54 54 54 62 62 62 50 50 50 38 38 38
40035- 14 14 14 2 2 6 2 2 6 2 2 6
40036- 2 2 6 2 2 6 2 2 6 2 2 6
40037- 2 2 6 6 6 6 80 54 7 167 114 7
40038-213 154 11 236 178 12 246 190 14 246 190 14
40039-246 190 14 246 190 14 246 190 14 246 190 14
40040-246 190 14 242 186 14 239 182 13 239 182 13
40041-230 174 11 210 150 10 174 135 50 124 112 88
40042- 82 82 82 54 54 54 34 34 34 18 18 18
40043- 6 6 6 0 0 0 0 0 0 0 0 0
40044- 0 0 0 0 0 0 0 0 0 0 0 0
40045- 0 0 0 0 0 0 0 0 0 0 0 0
40046- 0 0 0 0 0 0 6 6 6 18 18 18
40047- 50 50 50 158 118 36 192 133 9 200 144 11
40048-216 158 10 219 162 10 224 166 10 226 170 11
40049-230 174 11 236 178 12 239 182 13 239 182 13
40050-242 186 14 246 186 14 246 190 14 246 190 14
40051-246 190 14 246 190 14 246 190 14 246 190 14
40052-246 186 14 230 174 11 210 150 10 163 110 8
40053-104 69 6 10 10 10 2 2 6 2 2 6
40054- 2 2 6 2 2 6 2 2 6 2 2 6
40055- 2 2 6 2 2 6 2 2 6 2 2 6
40056- 2 2 6 2 2 6 2 2 6 2 2 6
40057- 2 2 6 6 6 6 91 60 6 167 114 7
40058-206 145 10 230 174 11 242 186 14 246 190 14
40059-246 190 14 246 190 14 246 186 14 242 186 14
40060-239 182 13 230 174 11 224 166 10 213 154 11
40061-180 133 36 124 112 88 86 86 86 58 58 58
40062- 38 38 38 22 22 22 10 10 10 6 6 6
40063- 0 0 0 0 0 0 0 0 0 0 0 0
40064- 0 0 0 0 0 0 0 0 0 0 0 0
40065- 0 0 0 0 0 0 0 0 0 0 0 0
40066- 0 0 0 0 0 0 0 0 0 14 14 14
40067- 34 34 34 70 70 70 138 110 50 158 118 36
40068-167 114 7 180 123 7 192 133 9 197 138 11
40069-200 144 11 206 145 10 213 154 11 219 162 10
40070-224 166 10 230 174 11 239 182 13 242 186 14
40071-246 186 14 246 186 14 246 186 14 246 186 14
40072-239 182 13 216 158 10 185 133 11 152 99 6
40073-104 69 6 18 14 6 2 2 6 2 2 6
40074- 2 2 6 2 2 6 2 2 6 2 2 6
40075- 2 2 6 2 2 6 2 2 6 2 2 6
40076- 2 2 6 2 2 6 2 2 6 2 2 6
40077- 2 2 6 6 6 6 80 54 7 152 99 6
40078-192 133 9 219 162 10 236 178 12 239 182 13
40079-246 186 14 242 186 14 239 182 13 236 178 12
40080-224 166 10 206 145 10 192 133 9 154 121 60
40081- 94 94 94 62 62 62 42 42 42 22 22 22
40082- 14 14 14 6 6 6 0 0 0 0 0 0
40083- 0 0 0 0 0 0 0 0 0 0 0 0
40084- 0 0 0 0 0 0 0 0 0 0 0 0
40085- 0 0 0 0 0 0 0 0 0 0 0 0
40086- 0 0 0 0 0 0 0 0 0 6 6 6
40087- 18 18 18 34 34 34 58 58 58 78 78 78
40088-101 98 89 124 112 88 142 110 46 156 107 11
40089-163 110 8 167 114 7 175 118 6 180 123 7
40090-185 133 11 197 138 11 210 150 10 219 162 10
40091-226 170 11 236 178 12 236 178 12 234 174 13
40092-219 162 10 197 138 11 163 110 8 130 83 6
40093- 91 60 6 10 10 10 2 2 6 2 2 6
40094- 18 18 18 38 38 38 38 38 38 38 38 38
40095- 38 38 38 38 38 38 38 38 38 38 38 38
40096- 38 38 38 38 38 38 26 26 26 2 2 6
40097- 2 2 6 6 6 6 70 47 6 137 92 6
40098-175 118 6 200 144 11 219 162 10 230 174 11
40099-234 174 13 230 174 11 219 162 10 210 150 10
40100-192 133 9 163 110 8 124 112 88 82 82 82
40101- 50 50 50 30 30 30 14 14 14 6 6 6
40102- 0 0 0 0 0 0 0 0 0 0 0 0
40103- 0 0 0 0 0 0 0 0 0 0 0 0
40104- 0 0 0 0 0 0 0 0 0 0 0 0
40105- 0 0 0 0 0 0 0 0 0 0 0 0
40106- 0 0 0 0 0 0 0 0 0 0 0 0
40107- 6 6 6 14 14 14 22 22 22 34 34 34
40108- 42 42 42 58 58 58 74 74 74 86 86 86
40109-101 98 89 122 102 70 130 98 46 121 87 25
40110-137 92 6 152 99 6 163 110 8 180 123 7
40111-185 133 11 197 138 11 206 145 10 200 144 11
40112-180 123 7 156 107 11 130 83 6 104 69 6
40113- 50 34 6 54 54 54 110 110 110 101 98 89
40114- 86 86 86 82 82 82 78 78 78 78 78 78
40115- 78 78 78 78 78 78 78 78 78 78 78 78
40116- 78 78 78 82 82 82 86 86 86 94 94 94
40117-106 106 106 101 101 101 86 66 34 124 80 6
40118-156 107 11 180 123 7 192 133 9 200 144 11
40119-206 145 10 200 144 11 192 133 9 175 118 6
40120-139 102 15 109 106 95 70 70 70 42 42 42
40121- 22 22 22 10 10 10 0 0 0 0 0 0
40122- 0 0 0 0 0 0 0 0 0 0 0 0
40123- 0 0 0 0 0 0 0 0 0 0 0 0
40124- 0 0 0 0 0 0 0 0 0 0 0 0
40125- 0 0 0 0 0 0 0 0 0 0 0 0
40126- 0 0 0 0 0 0 0 0 0 0 0 0
40127- 0 0 0 0 0 0 6 6 6 10 10 10
40128- 14 14 14 22 22 22 30 30 30 38 38 38
40129- 50 50 50 62 62 62 74 74 74 90 90 90
40130-101 98 89 112 100 78 121 87 25 124 80 6
40131-137 92 6 152 99 6 152 99 6 152 99 6
40132-138 86 6 124 80 6 98 70 6 86 66 30
40133-101 98 89 82 82 82 58 58 58 46 46 46
40134- 38 38 38 34 34 34 34 34 34 34 34 34
40135- 34 34 34 34 34 34 34 34 34 34 34 34
40136- 34 34 34 34 34 34 38 38 38 42 42 42
40137- 54 54 54 82 82 82 94 86 76 91 60 6
40138-134 86 6 156 107 11 167 114 7 175 118 6
40139-175 118 6 167 114 7 152 99 6 121 87 25
40140-101 98 89 62 62 62 34 34 34 18 18 18
40141- 6 6 6 0 0 0 0 0 0 0 0 0
40142- 0 0 0 0 0 0 0 0 0 0 0 0
40143- 0 0 0 0 0 0 0 0 0 0 0 0
40144- 0 0 0 0 0 0 0 0 0 0 0 0
40145- 0 0 0 0 0 0 0 0 0 0 0 0
40146- 0 0 0 0 0 0 0 0 0 0 0 0
40147- 0 0 0 0 0 0 0 0 0 0 0 0
40148- 0 0 0 6 6 6 6 6 6 10 10 10
40149- 18 18 18 22 22 22 30 30 30 42 42 42
40150- 50 50 50 66 66 66 86 86 86 101 98 89
40151-106 86 58 98 70 6 104 69 6 104 69 6
40152-104 69 6 91 60 6 82 62 34 90 90 90
40153- 62 62 62 38 38 38 22 22 22 14 14 14
40154- 10 10 10 10 10 10 10 10 10 10 10 10
40155- 10 10 10 10 10 10 6 6 6 10 10 10
40156- 10 10 10 10 10 10 10 10 10 14 14 14
40157- 22 22 22 42 42 42 70 70 70 89 81 66
40158- 80 54 7 104 69 6 124 80 6 137 92 6
40159-134 86 6 116 81 8 100 82 52 86 86 86
40160- 58 58 58 30 30 30 14 14 14 6 6 6
40161- 0 0 0 0 0 0 0 0 0 0 0 0
40162- 0 0 0 0 0 0 0 0 0 0 0 0
40163- 0 0 0 0 0 0 0 0 0 0 0 0
40164- 0 0 0 0 0 0 0 0 0 0 0 0
40165- 0 0 0 0 0 0 0 0 0 0 0 0
40166- 0 0 0 0 0 0 0 0 0 0 0 0
40167- 0 0 0 0 0 0 0 0 0 0 0 0
40168- 0 0 0 0 0 0 0 0 0 0 0 0
40169- 0 0 0 6 6 6 10 10 10 14 14 14
40170- 18 18 18 26 26 26 38 38 38 54 54 54
40171- 70 70 70 86 86 86 94 86 76 89 81 66
40172- 89 81 66 86 86 86 74 74 74 50 50 50
40173- 30 30 30 14 14 14 6 6 6 0 0 0
40174- 0 0 0 0 0 0 0 0 0 0 0 0
40175- 0 0 0 0 0 0 0 0 0 0 0 0
40176- 0 0 0 0 0 0 0 0 0 0 0 0
40177- 6 6 6 18 18 18 34 34 34 58 58 58
40178- 82 82 82 89 81 66 89 81 66 89 81 66
40179- 94 86 66 94 86 76 74 74 74 50 50 50
40180- 26 26 26 14 14 14 6 6 6 0 0 0
40181- 0 0 0 0 0 0 0 0 0 0 0 0
40182- 0 0 0 0 0 0 0 0 0 0 0 0
40183- 0 0 0 0 0 0 0 0 0 0 0 0
40184- 0 0 0 0 0 0 0 0 0 0 0 0
40185- 0 0 0 0 0 0 0 0 0 0 0 0
40186- 0 0 0 0 0 0 0 0 0 0 0 0
40187- 0 0 0 0 0 0 0 0 0 0 0 0
40188- 0 0 0 0 0 0 0 0 0 0 0 0
40189- 0 0 0 0 0 0 0 0 0 0 0 0
40190- 6 6 6 6 6 6 14 14 14 18 18 18
40191- 30 30 30 38 38 38 46 46 46 54 54 54
40192- 50 50 50 42 42 42 30 30 30 18 18 18
40193- 10 10 10 0 0 0 0 0 0 0 0 0
40194- 0 0 0 0 0 0 0 0 0 0 0 0
40195- 0 0 0 0 0 0 0 0 0 0 0 0
40196- 0 0 0 0 0 0 0 0 0 0 0 0
40197- 0 0 0 6 6 6 14 14 14 26 26 26
40198- 38 38 38 50 50 50 58 58 58 58 58 58
40199- 54 54 54 42 42 42 30 30 30 18 18 18
40200- 10 10 10 0 0 0 0 0 0 0 0 0
40201- 0 0 0 0 0 0 0 0 0 0 0 0
40202- 0 0 0 0 0 0 0 0 0 0 0 0
40203- 0 0 0 0 0 0 0 0 0 0 0 0
40204- 0 0 0 0 0 0 0 0 0 0 0 0
40205- 0 0 0 0 0 0 0 0 0 0 0 0
40206- 0 0 0 0 0 0 0 0 0 0 0 0
40207- 0 0 0 0 0 0 0 0 0 0 0 0
40208- 0 0 0 0 0 0 0 0 0 0 0 0
40209- 0 0 0 0 0 0 0 0 0 0 0 0
40210- 0 0 0 0 0 0 0 0 0 6 6 6
40211- 6 6 6 10 10 10 14 14 14 18 18 18
40212- 18 18 18 14 14 14 10 10 10 6 6 6
40213- 0 0 0 0 0 0 0 0 0 0 0 0
40214- 0 0 0 0 0 0 0 0 0 0 0 0
40215- 0 0 0 0 0 0 0 0 0 0 0 0
40216- 0 0 0 0 0 0 0 0 0 0 0 0
40217- 0 0 0 0 0 0 0 0 0 6 6 6
40218- 14 14 14 18 18 18 22 22 22 22 22 22
40219- 18 18 18 14 14 14 10 10 10 6 6 6
40220- 0 0 0 0 0 0 0 0 0 0 0 0
40221- 0 0 0 0 0 0 0 0 0 0 0 0
40222- 0 0 0 0 0 0 0 0 0 0 0 0
40223- 0 0 0 0 0 0 0 0 0 0 0 0
40224- 0 0 0 0 0 0 0 0 0 0 0 0
40225+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40226+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40227+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40228+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40229+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40230+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40231+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40232+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40233+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40234+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40235+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40236+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40237+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40238+4 4 4 4 4 4
40239+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40240+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40241+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40242+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40243+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40244+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40245+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40246+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40247+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40248+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40249+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40250+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40251+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40252+4 4 4 4 4 4
40253+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40254+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40255+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40256+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40257+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40258+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40259+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40260+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40261+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40262+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40263+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40264+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40265+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40266+4 4 4 4 4 4
40267+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40268+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40269+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40270+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40271+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40272+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40273+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40274+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40275+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40276+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40277+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40278+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40279+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40280+4 4 4 4 4 4
40281+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40282+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40283+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40284+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40285+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40286+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40287+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40288+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40289+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40290+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40291+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40292+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40293+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40294+4 4 4 4 4 4
40295+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40296+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40297+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40298+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40299+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40300+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40301+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40302+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40303+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40304+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40305+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40306+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40307+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40308+4 4 4 4 4 4
40309+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40310+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40311+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40312+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40313+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
40314+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
40315+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40316+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40317+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40318+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
40319+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
40320+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
40321+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40322+4 4 4 4 4 4
40323+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40324+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40325+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40326+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40327+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
40328+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
40329+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40330+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40331+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40332+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
40333+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
40334+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
40335+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40336+4 4 4 4 4 4
40337+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40338+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40339+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40340+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40341+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
40342+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
40343+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
40344+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40345+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40346+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
40347+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
40348+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
40349+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
40350+4 4 4 4 4 4
40351+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40352+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40353+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40354+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
40355+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
40356+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
40357+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
40358+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40359+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
40360+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
40361+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
40362+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
40363+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
40364+4 4 4 4 4 4
40365+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40366+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40367+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40368+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
40369+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
40370+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
40371+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
40372+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40373+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
40374+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
40375+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
40376+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
40377+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
40378+4 4 4 4 4 4
40379+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40380+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40381+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40382+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
40383+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
40384+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
40385+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
40386+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
40387+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
40388+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
40389+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
40390+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
40391+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
40392+4 4 4 4 4 4
40393+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40394+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40395+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
40396+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
40397+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
40398+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
40399+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
40400+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
40401+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
40402+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
40403+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
40404+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
40405+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
40406+4 4 4 4 4 4
40407+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40408+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40409+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
40410+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
40411+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
40412+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
40413+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
40414+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
40415+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
40416+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
40417+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
40418+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
40419+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
40420+4 4 4 4 4 4
40421+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40422+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40423+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
40424+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
40425+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
40426+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
40427+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
40428+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
40429+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
40430+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
40431+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
40432+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
40433+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
40434+4 4 4 4 4 4
40435+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40436+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40437+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
40438+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
40439+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
40440+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
40441+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
40442+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
40443+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
40444+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
40445+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
40446+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
40447+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
40448+4 4 4 4 4 4
40449+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40450+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
40451+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
40452+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
40453+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
40454+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
40455+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
40456+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
40457+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
40458+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
40459+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
40460+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
40461+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
40462+4 4 4 4 4 4
40463+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40464+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
40465+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
40466+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
40467+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
40468+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
40469+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
40470+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
40471+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
40472+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
40473+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
40474+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
40475+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
40476+0 0 0 4 4 4
40477+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
40478+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
40479+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
40480+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
40481+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
40482+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
40483+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
40484+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
40485+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
40486+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
40487+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
40488+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
40489+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
40490+2 0 0 0 0 0
40491+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
40492+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
40493+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
40494+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
40495+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
40496+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
40497+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
40498+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
40499+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
40500+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
40501+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
40502+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
40503+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
40504+37 38 37 0 0 0
40505+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
40506+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
40507+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
40508+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
40509+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
40510+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
40511+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
40512+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
40513+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
40514+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
40515+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
40516+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
40517+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
40518+85 115 134 4 0 0
40519+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
40520+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
40521+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
40522+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
40523+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
40524+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
40525+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
40526+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
40527+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
40528+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
40529+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
40530+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
40531+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
40532+60 73 81 4 0 0
40533+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
40534+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
40535+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
40536+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
40537+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
40538+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
40539+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
40540+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
40541+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
40542+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
40543+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
40544+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
40545+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
40546+16 19 21 4 0 0
40547+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
40548+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
40549+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
40550+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
40551+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
40552+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
40553+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
40554+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
40555+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
40556+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
40557+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
40558+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
40559+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
40560+4 0 0 4 3 3
40561+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
40562+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
40563+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
40564+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
40565+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
40566+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
40567+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
40568+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
40569+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
40570+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
40571+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
40572+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
40573+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
40574+3 2 2 4 4 4
40575+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
40576+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
40577+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
40578+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
40579+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
40580+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
40581+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
40582+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
40583+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
40584+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
40585+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
40586+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
40587+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
40588+4 4 4 4 4 4
40589+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
40590+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
40591+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
40592+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
40593+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
40594+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
40595+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
40596+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
40597+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
40598+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
40599+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
40600+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
40601+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
40602+4 4 4 4 4 4
40603+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
40604+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
40605+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
40606+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
40607+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
40608+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
40609+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
40610+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
40611+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
40612+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
40613+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
40614+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
40615+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
40616+5 5 5 5 5 5
40617+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
40618+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
40619+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
40620+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
40621+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
40622+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40623+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
40624+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
40625+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
40626+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
40627+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
40628+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
40629+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
40630+5 5 5 4 4 4
40631+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
40632+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
40633+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
40634+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
40635+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40636+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
40637+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
40638+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
40639+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
40640+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
40641+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
40642+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
40643+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40644+4 4 4 4 4 4
40645+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
40646+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
40647+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
40648+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
40649+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
40650+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40651+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40652+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
40653+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
40654+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
40655+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
40656+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
40657+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40658+4 4 4 4 4 4
40659+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
40660+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
40661+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
40662+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
40663+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40664+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
40665+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
40666+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
40667+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
40668+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
40669+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
40670+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40671+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40672+4 4 4 4 4 4
40673+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
40674+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
40675+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
40676+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
40677+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40678+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40679+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40680+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
40681+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
40682+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
40683+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
40684+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40685+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40686+4 4 4 4 4 4
40687+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
40688+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
40689+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
40690+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
40691+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40692+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
40693+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
40694+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
40695+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
40696+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
40697+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40698+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40699+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40700+4 4 4 4 4 4
40701+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
40702+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
40703+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
40704+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
40705+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40706+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
40707+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
40708+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
40709+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
40710+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
40711+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
40712+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40713+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40714+4 4 4 4 4 4
40715+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
40716+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
40717+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
40718+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
40719+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40720+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
40721+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
40722+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
40723+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
40724+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
40725+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
40726+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40727+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40728+4 4 4 4 4 4
40729+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
40730+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
40731+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
40732+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
40733+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
40734+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
40735+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
40736+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
40737+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
40738+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
40739+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40740+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40741+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40742+4 4 4 4 4 4
40743+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
40744+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
40745+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
40746+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
40747+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40748+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
40749+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
40750+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
40751+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
40752+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
40753+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40754+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40755+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40756+4 4 4 4 4 4
40757+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
40758+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
40759+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
40760+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
40761+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40762+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
40763+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
40764+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
40765+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
40766+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
40767+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40768+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40769+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40770+4 4 4 4 4 4
40771+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
40772+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
40773+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
40774+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
40775+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40776+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
40777+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
40778+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
40779+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
40780+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40781+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40782+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40783+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40784+4 4 4 4 4 4
40785+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
40786+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
40787+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
40788+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
40789+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
40790+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
40791+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
40792+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
40793+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40794+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40795+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40796+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40797+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40798+4 4 4 4 4 4
40799+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
40800+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
40801+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
40802+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
40803+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40804+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
40805+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
40806+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
40807+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40808+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40809+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40810+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40811+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40812+4 4 4 4 4 4
40813+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
40814+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
40815+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
40816+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
40817+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
40818+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
40819+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
40820+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
40821+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40822+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40823+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40824+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40825+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40826+4 4 4 4 4 4
40827+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
40828+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
40829+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40830+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
40831+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
40832+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
40833+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
40834+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
40835+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
40836+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40837+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40838+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40839+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40840+4 4 4 4 4 4
40841+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
40842+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
40843+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
40844+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
40845+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
40846+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
40847+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
40848+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
40849+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40850+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40851+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40852+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40853+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40854+4 4 4 4 4 4
40855+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
40856+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
40857+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40858+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
40859+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
40860+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
40861+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
40862+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
40863+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
40864+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40865+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40866+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40867+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40868+4 4 4 4 4 4
40869+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
40870+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
40871+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
40872+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
40873+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
40874+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
40875+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
40876+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
40877+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40878+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40879+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40880+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40881+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40882+4 4 4 4 4 4
40883+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40884+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
40885+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40886+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
40887+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
40888+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
40889+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
40890+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
40891+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40892+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40893+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40894+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40895+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40896+4 4 4 4 4 4
40897+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
40898+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
40899+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
40900+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
40901+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
40902+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
40903+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40904+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
40905+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40906+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40907+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40908+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40909+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40910+4 4 4 4 4 4
40911+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40912+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
40913+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
40914+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
40915+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
40916+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
40917+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40918+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
40919+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40920+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40921+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40922+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40923+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40924+4 4 4 4 4 4
40925+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
40926+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
40927+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
40928+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
40929+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
40930+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
40931+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
40932+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
40933+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
40934+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40935+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40936+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40937+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40938+4 4 4 4 4 4
40939+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40940+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
40941+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
40942+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
40943+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
40944+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
40945+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
40946+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
40947+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
40948+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40949+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40950+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40951+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40952+4 4 4 4 4 4
40953+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
40954+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
40955+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
40956+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
40957+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
40958+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
40959+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
40960+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
40961+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
40962+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40963+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40964+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40965+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40966+4 4 4 4 4 4
40967+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40968+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
40969+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
40970+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
40971+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
40972+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
40973+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
40974+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
40975+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
40976+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40977+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40978+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40979+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40980+4 4 4 4 4 4
40981+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
40982+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
40983+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
40984+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
40985+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
40986+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
40987+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
40988+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
40989+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
40990+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
40991+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40992+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40993+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40994+4 4 4 4 4 4
40995+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
40996+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
40997+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
40998+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
40999+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
41000+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
41001+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
41002+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
41003+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
41004+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
41005+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41006+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41007+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41008+4 4 4 4 4 4
41009+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
41010+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
41011+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
41012+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
41013+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
41014+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
41015+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41016+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
41017+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
41018+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
41019+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41020+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41021+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41022+4 4 4 4 4 4
41023+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
41024+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
41025+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
41026+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
41027+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
41028+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
41029+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
41030+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
41031+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
41032+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
41033+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41034+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41035+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41036+4 4 4 4 4 4
41037+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
41038+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
41039+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
41040+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
41041+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
41042+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
41043+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
41044+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
41045+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
41046+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
41047+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41048+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41049+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41050+4 4 4 4 4 4
41051+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
41052+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
41053+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
41054+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
41055+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
41056+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
41057+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
41058+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
41059+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
41060+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
41061+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41062+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41063+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41064+4 4 4 4 4 4
41065+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
41066+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
41067+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
41068+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
41069+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
41070+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
41071+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
41072+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
41073+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
41074+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
41075+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41076+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41077+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41078+4 4 4 4 4 4
41079+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
41080+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
41081+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
41082+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
41083+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
41084+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
41085+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
41086+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
41087+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
41088+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41089+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41090+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41091+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41092+4 4 4 4 4 4
41093+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
41094+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
41095+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
41096+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
41097+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
41098+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
41099+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
41100+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
41101+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
41102+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41103+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41104+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41105+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41106+4 4 4 4 4 4
41107+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
41108+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
41109+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
41110+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
41111+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
41112+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
41113+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
41114+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
41115+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
41116+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41117+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41118+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41119+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41120+4 4 4 4 4 4
41121+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
41122+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
41123+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
41124+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
41125+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
41126+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
41127+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
41128+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
41129+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41130+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41131+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41132+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41133+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41134+4 4 4 4 4 4
41135+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
41136+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
41137+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
41138+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
41139+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
41140+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
41141+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
41142+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
41143+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41144+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41145+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41146+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41147+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41148+4 4 4 4 4 4
41149+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
41150+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
41151+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
41152+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
41153+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
41154+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
41155+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
41156+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
41157+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41158+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41159+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41160+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41161+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41162+4 4 4 4 4 4
41163+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41164+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
41165+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
41166+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
41167+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
41168+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
41169+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
41170+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
41171+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41172+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41173+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41174+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41175+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41176+4 4 4 4 4 4
41177+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41178+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
41179+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
41180+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
41181+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
41182+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
41183+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
41184+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
41185+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41186+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41187+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41188+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41189+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41190+4 4 4 4 4 4
41191+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41192+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
41193+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
41194+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
41195+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
41196+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
41197+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
41198+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41199+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41200+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41201+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41202+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41203+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41204+4 4 4 4 4 4
41205+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41206+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41207+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
41208+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
41209+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
41210+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
41211+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
41212+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41213+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41214+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41215+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41216+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41217+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41218+4 4 4 4 4 4
41219+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41220+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41221+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41222+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
41223+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
41224+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
41225+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
41226+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41227+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41228+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41229+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41230+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41231+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41232+4 4 4 4 4 4
41233+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41234+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41235+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41236+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
41237+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
41238+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
41239+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
41240+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41241+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41242+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41243+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41244+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41245+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41246+4 4 4 4 4 4
41247+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41248+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41249+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41250+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
41251+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
41252+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
41253+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
41254+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41255+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41256+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41257+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41258+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41259+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41260+4 4 4 4 4 4
41261+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41262+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41263+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41264+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
41265+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
41266+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
41267+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
41268+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41269+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41270+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41271+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41272+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41273+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41274+4 4 4 4 4 4
41275+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41276+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41277+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41278+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41279+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
41280+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
41281+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41282+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41283+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41284+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41285+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41286+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41287+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41288+4 4 4 4 4 4
41289+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41290+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41291+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41292+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41293+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
41294+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
41295+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
41296+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41297+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41298+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41299+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41300+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41301+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41302+4 4 4 4 4 4
41303+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41304+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41305+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41306+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41307+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
41308+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
41309+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41310+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41311+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41312+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41313+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41314+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41315+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41316+4 4 4 4 4 4
41317+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41318+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41319+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41320+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41321+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
41322+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
41323+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41324+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41325+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41326+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41327+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41328+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41329+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41330+4 4 4 4 4 4
41331+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41332+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41333+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41334+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41335+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
41336+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
41337+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41338+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41339+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41340+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41341+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41342+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41343+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41344+4 4 4 4 4 4
fe2de317
MT
41345diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
41346index 087fc99..f85ed76 100644
41347--- a/drivers/video/udlfb.c
41348+++ b/drivers/video/udlfb.c
41349@@ -585,11 +585,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
8308f9c9
MT
41350 dlfb_urb_completion(urb);
41351
41352 error:
41353- atomic_add(bytes_sent, &dev->bytes_sent);
41354- atomic_add(bytes_identical, &dev->bytes_identical);
41355- atomic_add(width*height*2, &dev->bytes_rendered);
41356+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
41357+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
41358+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
41359 end_cycles = get_cycles();
41360- atomic_add(((unsigned int) ((end_cycles - start_cycles)
41361+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
41362 >> 10)), /* Kcycles */
41363 &dev->cpu_kcycles_used);
41364
fe2de317 41365@@ -710,11 +710,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
8308f9c9
MT
41366 dlfb_urb_completion(urb);
41367
41368 error:
41369- atomic_add(bytes_sent, &dev->bytes_sent);
41370- atomic_add(bytes_identical, &dev->bytes_identical);
41371- atomic_add(bytes_rendered, &dev->bytes_rendered);
41372+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
41373+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
41374+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
41375 end_cycles = get_cycles();
41376- atomic_add(((unsigned int) ((end_cycles - start_cycles)
41377+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
41378 >> 10)), /* Kcycles */
41379 &dev->cpu_kcycles_used);
41380 }
fe2de317 41381@@ -1306,7 +1306,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
8308f9c9
MT
41382 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41383 struct dlfb_data *dev = fb_info->par;
41384 return snprintf(buf, PAGE_SIZE, "%u\n",
41385- atomic_read(&dev->bytes_rendered));
41386+ atomic_read_unchecked(&dev->bytes_rendered));
41387 }
41388
41389 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
fe2de317 41390@@ -1314,7 +1314,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
8308f9c9
MT
41391 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41392 struct dlfb_data *dev = fb_info->par;
41393 return snprintf(buf, PAGE_SIZE, "%u\n",
41394- atomic_read(&dev->bytes_identical));
41395+ atomic_read_unchecked(&dev->bytes_identical));
41396 }
41397
41398 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
fe2de317 41399@@ -1322,7 +1322,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
8308f9c9
MT
41400 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41401 struct dlfb_data *dev = fb_info->par;
41402 return snprintf(buf, PAGE_SIZE, "%u\n",
41403- atomic_read(&dev->bytes_sent));
41404+ atomic_read_unchecked(&dev->bytes_sent));
41405 }
41406
41407 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
fe2de317 41408@@ -1330,7 +1330,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
8308f9c9
MT
41409 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41410 struct dlfb_data *dev = fb_info->par;
41411 return snprintf(buf, PAGE_SIZE, "%u\n",
41412- atomic_read(&dev->cpu_kcycles_used));
41413+ atomic_read_unchecked(&dev->cpu_kcycles_used));
41414 }
41415
41416 static ssize_t edid_show(
fe2de317 41417@@ -1387,10 +1387,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
8308f9c9
MT
41418 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41419 struct dlfb_data *dev = fb_info->par;
41420
41421- atomic_set(&dev->bytes_rendered, 0);
41422- atomic_set(&dev->bytes_identical, 0);
41423- atomic_set(&dev->bytes_sent, 0);
41424- atomic_set(&dev->cpu_kcycles_used, 0);
41425+ atomic_set_unchecked(&dev->bytes_rendered, 0);
41426+ atomic_set_unchecked(&dev->bytes_identical, 0);
41427+ atomic_set_unchecked(&dev->bytes_sent, 0);
41428+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
41429
41430 return count;
41431 }
fe2de317
MT
41432diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
41433index 7f8472c..9842e87 100644
41434--- a/drivers/video/uvesafb.c
41435+++ b/drivers/video/uvesafb.c
df50ba0c 41436@@ -19,6 +19,7 @@
58c5fc13
MT
41437 #include <linux/io.h>
41438 #include <linux/mutex.h>
df50ba0c 41439 #include <linux/slab.h>
58c5fc13
MT
41440+#include <linux/moduleloader.h>
41441 #include <video/edid.h>
41442 #include <video/uvesafb.h>
41443 #ifdef CONFIG_X86
df50ba0c 41444@@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
58c5fc13
MT
41445 NULL,
41446 };
41447
41448- return call_usermodehelper(v86d_path, argv, envp, 1);
41449+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
41450 }
41451
41452 /*
fe2de317 41453@@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
58c5fc13
MT
41454 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
41455 par->pmi_setpal = par->ypan = 0;
41456 } else {
41457+
41458+#ifdef CONFIG_PAX_KERNEXEC
41459+#ifdef CONFIG_MODULES
58c5fc13
MT
41460+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
41461+#endif
41462+ if (!par->pmi_code) {
41463+ par->pmi_setpal = par->ypan = 0;
41464+ return 0;
41465+ }
41466+#endif
41467+
41468 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
41469 + task->t.regs.edi);
41470+
41471+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
ae4e228f 41472+ pax_open_kernel();
58c5fc13 41473+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
ae4e228f 41474+ pax_close_kernel();
58c5fc13
MT
41475+
41476+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
41477+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
41478+#else
41479 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
41480 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
41481+#endif
41482+
41483 printk(KERN_INFO "uvesafb: protected mode interface info at "
41484 "%04x:%04x\n",
41485 (u16)task->t.regs.es, (u16)task->t.regs.edi);
66a7e928 41486@@ -1821,6 +1844,11 @@ out:
58c5fc13
MT
41487 if (par->vbe_modes)
41488 kfree(par->vbe_modes);
41489
41490+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41491+ if (par->pmi_code)
41492+ module_free_exec(NULL, par->pmi_code);
41493+#endif
41494+
41495 framebuffer_release(info);
41496 return err;
41497 }
fe2de317 41498@@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platform_device *dev)
58c5fc13
MT
41499 kfree(par->vbe_state_orig);
41500 if (par->vbe_state_saved)
41501 kfree(par->vbe_state_saved);
41502+
41503+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41504+ if (par->pmi_code)
41505+ module_free_exec(NULL, par->pmi_code);
41506+#endif
41507+
41508 }
41509
41510 framebuffer_release(info);
fe2de317
MT
41511diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
41512index 501b340..86bd4cf 100644
41513--- a/drivers/video/vesafb.c
41514+++ b/drivers/video/vesafb.c
58c5fc13
MT
41515@@ -9,6 +9,7 @@
41516 */
41517
41518 #include <linux/module.h>
41519+#include <linux/moduleloader.h>
41520 #include <linux/kernel.h>
41521 #include <linux/errno.h>
41522 #include <linux/string.h>
fe2de317 41523@@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
58c5fc13
MT
41524 static int vram_total __initdata; /* Set total amount of memory */
41525 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
41526 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
41527-static void (*pmi_start)(void) __read_mostly;
41528-static void (*pmi_pal) (void) __read_mostly;
41529+static void (*pmi_start)(void) __read_only;
41530+static void (*pmi_pal) (void) __read_only;
41531 static int depth __read_mostly;
41532 static int vga_compat __read_mostly;
41533 /* --------------------------------------------------------------------- */
fe2de317 41534@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
58c5fc13
MT
41535 unsigned int size_vmode;
41536 unsigned int size_remap;
41537 unsigned int size_total;
41538+ void *pmi_code = NULL;
41539
41540 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
41541 return -ENODEV;
fe2de317 41542@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
58c5fc13
MT
41543 size_remap = size_total;
41544 vesafb_fix.smem_len = size_remap;
41545
41546-#ifndef __i386__
41547- screen_info.vesapm_seg = 0;
41548-#endif
41549-
41550 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
41551 printk(KERN_WARNING
41552 "vesafb: cannot reserve video memory at 0x%lx\n",
fe2de317 41553@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
58c5fc13
MT
41554 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
41555 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
41556
41557+#ifdef __i386__
41558+
41559+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41560+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
41561+ if (!pmi_code)
41562+#elif !defined(CONFIG_PAX_KERNEXEC)
41563+ if (0)
41564+#endif
41565+
41566+#endif
41567+ screen_info.vesapm_seg = 0;
41568+
41569 if (screen_info.vesapm_seg) {
41570- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
41571- screen_info.vesapm_seg,screen_info.vesapm_off);
41572+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
41573+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
41574 }
41575
41576 if (screen_info.vesapm_seg < 0xc000)
fe2de317 41577@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
58c5fc13
MT
41578
41579 if (ypan || pmi_setpal) {
41580 unsigned short *pmi_base;
15a11c5b
MT
41581+
41582 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
58c5fc13
MT
41583- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
41584- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
41585+
58c5fc13 41586+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
ae4e228f 41587+ pax_open_kernel();
58c5fc13
MT
41588+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
41589+#else
15a11c5b 41590+ pmi_code = pmi_base;
58c5fc13
MT
41591+#endif
41592+
41593+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
41594+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
41595+
41596+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41597+ pmi_start = ktva_ktla(pmi_start);
41598+ pmi_pal = ktva_ktla(pmi_pal);
ae4e228f 41599+ pax_close_kernel();
58c5fc13
MT
41600+#endif
41601+
41602 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
41603 if (pmi_base[3]) {
41604 printk(KERN_INFO "vesafb: pmi: ports = ");
fe2de317 41605@@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
58c5fc13
MT
41606 info->node, info->fix.id);
41607 return 0;
41608 err:
41609+
41610+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41611+ module_free_exec(NULL, pmi_code);
41612+#endif
41613+
41614 if (info->screen_base)
41615 iounmap(info->screen_base);
41616 framebuffer_release(info);
fe2de317
MT
41617diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
41618index 88714ae..16c2e11 100644
41619--- a/drivers/video/via/via_clock.h
41620+++ b/drivers/video/via/via_clock.h
15a11c5b
MT
41621@@ -56,7 +56,7 @@ struct via_clock {
41622
41623 void (*set_engine_pll_state)(u8 state);
41624 void (*set_engine_pll)(struct via_pll_config config);
41625-};
41626+} __no_const;
41627
41628
41629 static inline u32 get_pll_internal_frequency(u32 ref_freq,
fe2de317
MT
41630diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
41631index e058ace..2424d93 100644
41632--- a/drivers/virtio/virtio_balloon.c
41633+++ b/drivers/virtio/virtio_balloon.c
41634@@ -174,6 +174,8 @@ static void update_balloon_stats(struct virtio_balloon *vb)
66a7e928
MT
41635 struct sysinfo i;
41636 int idx = 0;
41637
41638+ pax_track_stack();
41639+
41640 all_vm_events(events);
41641 si_meminfo(&i);
41642
fe2de317
MT
41643diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
41644index e56c934..fc22f4b 100644
41645--- a/drivers/xen/xen-pciback/conf_space.h
41646+++ b/drivers/xen/xen-pciback/conf_space.h
6e9df6a3
MT
41647@@ -44,15 +44,15 @@ struct config_field {
41648 struct {
41649 conf_dword_write write;
41650 conf_dword_read read;
41651- } dw;
41652+ } __no_const dw;
41653 struct {
41654 conf_word_write write;
41655 conf_word_read read;
41656- } w;
41657+ } __no_const w;
41658 struct {
41659 conf_byte_write write;
41660 conf_byte_read read;
41661- } b;
41662+ } __no_const b;
41663 } u;
41664 struct list_head list;
41665 };
fe2de317
MT
41666diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
41667index e3c03db..93b0172 100644
41668--- a/fs/9p/vfs_inode.c
41669+++ b/fs/9p/vfs_inode.c
41670@@ -1288,7 +1288,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
16454cff 41671 void
58c5fc13
MT
41672 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
41673 {
41674- char *s = nd_get_link(nd);
41675+ const char *s = nd_get_link(nd);
41676
41677 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
41678 IS_ERR(s) ? "<error>" : s);
fe2de317
MT
41679diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
41680index 79e2ca7..5828ad1 100644
41681--- a/fs/Kconfig.binfmt
41682+++ b/fs/Kconfig.binfmt
41683@@ -86,7 +86,7 @@ config HAVE_AOUT
41684
41685 config BINFMT_AOUT
41686 tristate "Kernel support for a.out and ECOFF binaries"
41687- depends on HAVE_AOUT
41688+ depends on HAVE_AOUT && BROKEN
41689 ---help---
41690 A.out (Assembler.OUTput) is a set of formats for libraries and
41691 executables used in the earliest versions of UNIX. Linux used
41692diff --git a/fs/aio.c b/fs/aio.c
41693index e29ec48..f083e5e 100644
41694--- a/fs/aio.c
41695+++ b/fs/aio.c
41696@@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx *ctx)
58c5fc13
MT
41697 size += sizeof(struct io_event) * nr_events;
41698 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
41699
41700- if (nr_pages < 0)
41701+ if (nr_pages <= 0)
41702 return -EINVAL;
41703
41704 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
fe2de317 41705@@ -1088,6 +1088,8 @@ static int read_events(struct kioctx *ctx,
66a7e928
MT
41706 struct aio_timeout to;
41707 int retry = 0;
41708
41709+ pax_track_stack();
41710+
41711 /* needed to zero any padding within an entry (there shouldn't be
41712 * any, but C is fun!
41713 */
fe2de317 41714@@ -1381,22 +1383,27 @@ static ssize_t aio_fsync(struct kiocb *iocb)
15a11c5b
MT
41715 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
41716 {
41717 ssize_t ret;
41718+ struct iovec iovstack;
41719
41720 #ifdef CONFIG_COMPAT
41721 if (compat)
41722 ret = compat_rw_copy_check_uvector(type,
41723 (struct compat_iovec __user *)kiocb->ki_buf,
41724- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
41725+ kiocb->ki_nbytes, 1, &iovstack,
41726 &kiocb->ki_iovec);
41727 else
41728 #endif
41729 ret = rw_copy_check_uvector(type,
41730 (struct iovec __user *)kiocb->ki_buf,
41731- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
41732+ kiocb->ki_nbytes, 1, &iovstack,
41733 &kiocb->ki_iovec);
41734 if (ret < 0)
41735 goto out;
41736
41737+ if (kiocb->ki_iovec == &iovstack) {
41738+ kiocb->ki_inline_vec = iovstack;
41739+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
41740+ }
41741 kiocb->ki_nr_segs = kiocb->ki_nbytes;
41742 kiocb->ki_cur_seg = 0;
41743 /* ki_nbytes/left now reflect bytes instead of segs */
fe2de317
MT
41744diff --git a/fs/attr.c b/fs/attr.c
41745index 538e279..046cc6d 100644
41746--- a/fs/attr.c
41747+++ b/fs/attr.c
41748@@ -98,6 +98,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
ae4e228f
MT
41749 unsigned long limit;
41750
df50ba0c 41751 limit = rlimit(RLIMIT_FSIZE);
ae4e228f
MT
41752+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
41753 if (limit != RLIM_INFINITY && offset > limit)
41754 goto out_sig;
41755 if (offset > inode->i_sb->s_maxbytes)
fe2de317
MT
41756diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
41757index e1fbdee..cd5ea56 100644
41758--- a/fs/autofs4/waitq.c
41759+++ b/fs/autofs4/waitq.c
41760@@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
6e9df6a3
MT
41761 {
41762 unsigned long sigpipe, flags;
41763 mm_segment_t fs;
41764- const char *data = (const char *)addr;
41765+ const char __user *data = (const char __force_user *)addr;
41766 ssize_t wr = 0;
41767
41768 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
fe2de317
MT
41769diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
41770index 720d885..012e7f0 100644
41771--- a/fs/befs/linuxvfs.c
41772+++ b/fs/befs/linuxvfs.c
41773@@ -503,7 +503,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
58c5fc13
MT
41774 {
41775 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
41776 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
41777- char *link = nd_get_link(nd);
41778+ const char *link = nd_get_link(nd);
41779 if (!IS_ERR(link))
41780 kfree(link);
41781 }
fe2de317
MT
41782diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
41783index a6395bd..a5b24c4 100644
41784--- a/fs/binfmt_aout.c
41785+++ b/fs/binfmt_aout.c
58c5fc13
MT
41786@@ -16,6 +16,7 @@
41787 #include <linux/string.h>
41788 #include <linux/fs.h>
41789 #include <linux/file.h>
41790+#include <linux/security.h>
41791 #include <linux/stat.h>
41792 #include <linux/fcntl.h>
41793 #include <linux/ptrace.h>
fe2de317 41794@@ -86,6 +87,8 @@ static int aout_core_dump(struct coredump_params *cprm)
6892158b
MT
41795 #endif
41796 # define START_STACK(u) ((void __user *)u.start_stack)
41797
41798+ memset(&dump, 0, sizeof(dump));
41799+
41800 fs = get_fs();
41801 set_fs(KERNEL_DS);
41802 has_dumped = 1;
fe2de317 41803@@ -97,10 +100,12 @@ static int aout_core_dump(struct coredump_params *cprm)
58c5fc13
MT
41804
41805 /* If the size of the dump file exceeds the rlimit, then see what would happen
41806 if we wrote the stack, but not the data area. */
41807+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
ae4e228f 41808 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
58c5fc13
MT
41809 dump.u_dsize = 0;
41810
41811 /* Make sure we have enough room to write the stack and data areas. */
41812+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
ae4e228f 41813 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
58c5fc13
MT
41814 dump.u_ssize = 0;
41815
fe2de317 41816@@ -234,6 +239,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
df50ba0c 41817 rlim = rlimit(RLIMIT_DATA);
58c5fc13
MT
41818 if (rlim >= RLIM_INFINITY)
41819 rlim = ~0;
41820+
41821+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
41822 if (ex.a_data + ex.a_bss > rlim)
41823 return -ENOMEM;
41824
fe2de317 41825@@ -262,6 +269,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
58c5fc13
MT
41826 install_exec_creds(bprm);
41827 current->flags &= ~PF_FORKNOEXEC;
41828
41829+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
41830+ current->mm->pax_flags = 0UL;
41831+#endif
41832+
41833+#ifdef CONFIG_PAX_PAGEEXEC
41834+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
41835+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
41836+
41837+#ifdef CONFIG_PAX_EMUTRAMP
41838+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
41839+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
41840+#endif
41841+
41842+#ifdef CONFIG_PAX_MPROTECT
41843+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
41844+ current->mm->pax_flags |= MF_PAX_MPROTECT;
41845+#endif
41846+
41847+ }
41848+#endif
41849+
41850 if (N_MAGIC(ex) == OMAGIC) {
41851 unsigned long text_addr, map_size;
41852 loff_t pos;
fe2de317 41853@@ -334,7 +362,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
58c5fc13
MT
41854
41855 down_write(&current->mm->mmap_sem);
41856 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
41857- PROT_READ | PROT_WRITE | PROT_EXEC,
41858+ PROT_READ | PROT_WRITE,
41859 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
41860 fd_offset + ex.a_text);
41861 up_write(&current->mm->mmap_sem);
fe2de317
MT
41862diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
41863index 21ac5ee..171b1d0 100644
41864--- a/fs/binfmt_elf.c
41865+++ b/fs/binfmt_elf.c
41866@@ -51,6 +51,10 @@ static int elf_core_dump(struct coredump_params *cprm);
58c5fc13
MT
41867 #define elf_core_dump NULL
41868 #endif
41869
41870+#ifdef CONFIG_PAX_MPROTECT
41871+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
41872+#endif
41873+
41874 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
41875 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
41876 #else
fe2de317 41877@@ -70,6 +74,11 @@ static struct linux_binfmt elf_format = {
16454cff
MT
41878 .load_binary = load_elf_binary,
41879 .load_shlib = load_elf_library,
41880 .core_dump = elf_core_dump,
58c5fc13
MT
41881+
41882+#ifdef CONFIG_PAX_MPROTECT
41883+ .handle_mprotect= elf_handle_mprotect,
41884+#endif
41885+
16454cff 41886 .min_coredump = ELF_EXEC_PAGESIZE,
58c5fc13 41887 };
16454cff 41888
fe2de317 41889@@ -77,6 +86,8 @@ static struct linux_binfmt elf_format = {
58c5fc13
MT
41890
41891 static int set_brk(unsigned long start, unsigned long end)
41892 {
41893+ unsigned long e = end;
41894+
41895 start = ELF_PAGEALIGN(start);
41896 end = ELF_PAGEALIGN(end);
41897 if (end > start) {
fe2de317 41898@@ -87,7 +98,7 @@ static int set_brk(unsigned long start, unsigned long end)
58c5fc13
MT
41899 if (BAD_ADDR(addr))
41900 return addr;
41901 }
41902- current->mm->start_brk = current->mm->brk = end;
41903+ current->mm->start_brk = current->mm->brk = e;
41904 return 0;
41905 }
41906
fe2de317 41907@@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
58c5fc13
MT
41908 elf_addr_t __user *u_rand_bytes;
41909 const char *k_platform = ELF_PLATFORM;
41910 const char *k_base_platform = ELF_BASE_PLATFORM;
41911- unsigned char k_rand_bytes[16];
41912+ u32 k_rand_bytes[4];
41913 int items;
41914 elf_addr_t *elf_info;
41915 int ei_index = 0;
71d190be
MT
41916 const struct cred *cred = current_cred();
41917 struct vm_area_struct *vma;
41918+ unsigned long saved_auxv[AT_VECTOR_SIZE];
66a7e928
MT
41919+
41920+ pax_track_stack();
71d190be
MT
41921
41922 /*
41923 * In some cases (e.g. Hyper-Threading), we want to avoid L1
fe2de317 41924@@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
58c5fc13
MT
41925 * Generate 16 random bytes for userspace PRNG seeding.
41926 */
41927 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
df50ba0c
MT
41928- u_rand_bytes = (elf_addr_t __user *)
41929- STACK_ALLOC(p, sizeof(k_rand_bytes));
58c5fc13
MT
41930+ srandom32(k_rand_bytes[0] ^ random32());
41931+ srandom32(k_rand_bytes[1] ^ random32());
41932+ srandom32(k_rand_bytes[2] ^ random32());
41933+ srandom32(k_rand_bytes[3] ^ random32());
df50ba0c
MT
41934+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
41935+ u_rand_bytes = (elf_addr_t __user *) p;
58c5fc13 41936 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
df50ba0c
MT
41937 return -EFAULT;
41938
fe2de317 41939@@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
71d190be
MT
41940 return -EFAULT;
41941 current->mm->env_end = p;
41942
41943+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
41944+
41945 /* Put the elf_info on the stack in the right place. */
41946 sp = (elf_addr_t __user *)envp + 1;
41947- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
41948+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
41949 return -EFAULT;
41950 return 0;
41951 }
fe2de317 41952@@ -381,10 +401,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58c5fc13
MT
41953 {
41954 struct elf_phdr *elf_phdata;
41955 struct elf_phdr *eppnt;
41956- unsigned long load_addr = 0;
41957+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
41958 int load_addr_set = 0;
41959 unsigned long last_bss = 0, elf_bss = 0;
41960- unsigned long error = ~0UL;
41961+ unsigned long error = -EINVAL;
41962 unsigned long total_size;
41963 int retval, i, size;
41964
fe2de317 41965@@ -430,6 +450,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58c5fc13
MT
41966 goto out_close;
41967 }
41968
41969+#ifdef CONFIG_PAX_SEGMEXEC
41970+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
41971+ pax_task_size = SEGMEXEC_TASK_SIZE;
41972+#endif
41973+
41974 eppnt = elf_phdata;
41975 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
41976 if (eppnt->p_type == PT_LOAD) {
fe2de317 41977@@ -473,8 +498,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58c5fc13
MT
41978 k = load_addr + eppnt->p_vaddr;
41979 if (BAD_ADDR(k) ||
41980 eppnt->p_filesz > eppnt->p_memsz ||
41981- eppnt->p_memsz > TASK_SIZE ||
41982- TASK_SIZE - eppnt->p_memsz < k) {
41983+ eppnt->p_memsz > pax_task_size ||
41984+ pax_task_size - eppnt->p_memsz < k) {
41985 error = -ENOMEM;
41986 goto out_close;
41987 }
66a7e928 41988@@ -528,6 +553,193 @@ out:
58c5fc13
MT
41989 return error;
41990 }
41991
41992+#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
41993+static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
41994+{
41995+ unsigned long pax_flags = 0UL;
41996+
41997+#ifdef CONFIG_PAX_PAGEEXEC
41998+ if (elf_phdata->p_flags & PF_PAGEEXEC)
41999+ pax_flags |= MF_PAX_PAGEEXEC;
42000+#endif
42001+
42002+#ifdef CONFIG_PAX_SEGMEXEC
42003+ if (elf_phdata->p_flags & PF_SEGMEXEC)
42004+ pax_flags |= MF_PAX_SEGMEXEC;
42005+#endif
42006+
42007+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
42008+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
ae4e228f 42009+ if ((__supported_pte_mask & _PAGE_NX))
58c5fc13
MT
42010+ pax_flags &= ~MF_PAX_SEGMEXEC;
42011+ else
42012+ pax_flags &= ~MF_PAX_PAGEEXEC;
42013+ }
42014+#endif
42015+
42016+#ifdef CONFIG_PAX_EMUTRAMP
42017+ if (elf_phdata->p_flags & PF_EMUTRAMP)
42018+ pax_flags |= MF_PAX_EMUTRAMP;
42019+#endif
42020+
42021+#ifdef CONFIG_PAX_MPROTECT
42022+ if (elf_phdata->p_flags & PF_MPROTECT)
42023+ pax_flags |= MF_PAX_MPROTECT;
42024+#endif
42025+
42026+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42027+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
42028+ pax_flags |= MF_PAX_RANDMMAP;
42029+#endif
42030+
42031+ return pax_flags;
42032+}
42033+#endif
42034+
42035+#ifdef CONFIG_PAX_PT_PAX_FLAGS
42036+static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
42037+{
42038+ unsigned long pax_flags = 0UL;
42039+
42040+#ifdef CONFIG_PAX_PAGEEXEC
42041+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
42042+ pax_flags |= MF_PAX_PAGEEXEC;
42043+#endif
42044+
42045+#ifdef CONFIG_PAX_SEGMEXEC
42046+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
42047+ pax_flags |= MF_PAX_SEGMEXEC;
42048+#endif
42049+
42050+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
42051+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
ae4e228f 42052+ if ((__supported_pte_mask & _PAGE_NX))
58c5fc13
MT
42053+ pax_flags &= ~MF_PAX_SEGMEXEC;
42054+ else
42055+ pax_flags &= ~MF_PAX_PAGEEXEC;
42056+ }
42057+#endif
42058+
42059+#ifdef CONFIG_PAX_EMUTRAMP
42060+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
42061+ pax_flags |= MF_PAX_EMUTRAMP;
42062+#endif
42063+
42064+#ifdef CONFIG_PAX_MPROTECT
42065+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
42066+ pax_flags |= MF_PAX_MPROTECT;
42067+#endif
42068+
42069+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42070+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
42071+ pax_flags |= MF_PAX_RANDMMAP;
42072+#endif
42073+
42074+ return pax_flags;
42075+}
42076+#endif
42077+
42078+#ifdef CONFIG_PAX_EI_PAX
42079+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
42080+{
42081+ unsigned long pax_flags = 0UL;
42082+
42083+#ifdef CONFIG_PAX_PAGEEXEC
42084+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
42085+ pax_flags |= MF_PAX_PAGEEXEC;
42086+#endif
42087+
42088+#ifdef CONFIG_PAX_SEGMEXEC
42089+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
42090+ pax_flags |= MF_PAX_SEGMEXEC;
42091+#endif
42092+
42093+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
42094+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
ae4e228f 42095+ if ((__supported_pte_mask & _PAGE_NX))
58c5fc13
MT
42096+ pax_flags &= ~MF_PAX_SEGMEXEC;
42097+ else
42098+ pax_flags &= ~MF_PAX_PAGEEXEC;
42099+ }
42100+#endif
42101+
42102+#ifdef CONFIG_PAX_EMUTRAMP
42103+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
42104+ pax_flags |= MF_PAX_EMUTRAMP;
42105+#endif
42106+
42107+#ifdef CONFIG_PAX_MPROTECT
42108+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
42109+ pax_flags |= MF_PAX_MPROTECT;
42110+#endif
42111+
42112+#ifdef CONFIG_PAX_ASLR
42113+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
42114+ pax_flags |= MF_PAX_RANDMMAP;
42115+#endif
42116+
42117+ return pax_flags;
42118+}
42119+#endif
42120+
42121+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
42122+static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
42123+{
42124+ unsigned long pax_flags = 0UL;
42125+
42126+#ifdef CONFIG_PAX_PT_PAX_FLAGS
42127+ unsigned long i;
71d190be 42128+ int found_flags = 0;
58c5fc13
MT
42129+#endif
42130+
42131+#ifdef CONFIG_PAX_EI_PAX
42132+ pax_flags = pax_parse_ei_pax(elf_ex);
42133+#endif
42134+
42135+#ifdef CONFIG_PAX_PT_PAX_FLAGS
42136+ for (i = 0UL; i < elf_ex->e_phnum; i++)
42137+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
42138+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
42139+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
42140+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
42141+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
42142+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
42143+ return -EINVAL;
42144+
42145+#ifdef CONFIG_PAX_SOFTMODE
42146+ if (pax_softmode)
42147+ pax_flags = pax_parse_softmode(&elf_phdata[i]);
42148+ else
42149+#endif
42150+
42151+ pax_flags = pax_parse_hardmode(&elf_phdata[i]);
71d190be 42152+ found_flags = 1;
58c5fc13
MT
42153+ break;
42154+ }
42155+#endif
42156+
71d190be
MT
42157+#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
42158+ if (found_flags == 0) {
42159+ struct elf_phdr phdr;
42160+ memset(&phdr, 0, sizeof(phdr));
42161+ phdr.p_flags = PF_NOEMUTRAMP;
42162+#ifdef CONFIG_PAX_SOFTMODE
42163+ if (pax_softmode)
42164+ pax_flags = pax_parse_softmode(&phdr);
42165+ else
42166+#endif
42167+ pax_flags = pax_parse_hardmode(&phdr);
42168+ }
42169+#endif
42170+
58c5fc13
MT
42171+ if (0 > pax_check_flags(&pax_flags))
42172+ return -EINVAL;
42173+
42174+ current->mm->pax_flags = pax_flags;
42175+ return 0;
42176+}
42177+#endif
42178+
42179 /*
42180 * These are the functions used to load ELF style executables and shared
42181 * libraries. There is no binary dependent code anywhere else.
fe2de317 42182@@ -544,6 +756,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
58c5fc13
MT
42183 {
42184 unsigned int random_variable = 0;
42185
42186+#ifdef CONFIG_PAX_RANDUSTACK
42187+ if (randomize_va_space)
42188+ return stack_top - current->mm->delta_stack;
42189+#endif
42190+
42191 if ((current->flags & PF_RANDOMIZE) &&
42192 !(current->personality & ADDR_NO_RANDOMIZE)) {
42193 random_variable = get_random_int() & STACK_RND_MASK;
fe2de317 42194@@ -562,7 +779,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
58c5fc13
MT
42195 unsigned long load_addr = 0, load_bias = 0;
42196 int load_addr_set = 0;
42197 char * elf_interpreter = NULL;
42198- unsigned long error;
42199+ unsigned long error = 0;
42200 struct elf_phdr *elf_ppnt, *elf_phdata;
42201 unsigned long elf_bss, elf_brk;
42202 int retval, i;
fe2de317 42203@@ -572,11 +789,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
58c5fc13 42204 unsigned long start_code, end_code, start_data, end_data;
66a7e928 42205 unsigned long reloc_func_desc __maybe_unused = 0;
58c5fc13
MT
42206 int executable_stack = EXSTACK_DEFAULT;
42207- unsigned long def_flags = 0;
42208 struct {
42209 struct elfhdr elf_ex;
42210 struct elfhdr interp_elf_ex;
42211 } *loc;
42212+ unsigned long pax_task_size = TASK_SIZE;
42213
42214 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
42215 if (!loc) {
fe2de317 42216@@ -713,11 +930,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
58c5fc13
MT
42217
42218 /* OK, This is the point of no return */
42219 current->flags &= ~PF_FORKNOEXEC;
42220- current->mm->def_flags = def_flags;
42221+
42222+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42223+ current->mm->pax_flags = 0UL;
42224+#endif
42225+
42226+#ifdef CONFIG_PAX_DLRESOLVE
42227+ current->mm->call_dl_resolve = 0UL;
42228+#endif
42229+
42230+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
42231+ current->mm->call_syscall = 0UL;
42232+#endif
42233+
42234+#ifdef CONFIG_PAX_ASLR
42235+ current->mm->delta_mmap = 0UL;
42236+ current->mm->delta_stack = 0UL;
42237+#endif
42238+
42239+ current->mm->def_flags = 0;
42240+
42241+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
42242+ if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
42243+ send_sig(SIGKILL, current, 0);
42244+ goto out_free_dentry;
42245+ }
42246+#endif
42247+
42248+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
42249+ pax_set_initial_flags(bprm);
42250+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
42251+ if (pax_set_initial_flags_func)
42252+ (pax_set_initial_flags_func)(bprm);
42253+#endif
42254+
42255+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
ae4e228f 42256+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
58c5fc13
MT
42257+ current->mm->context.user_cs_limit = PAGE_SIZE;
42258+ current->mm->def_flags |= VM_PAGEEXEC;
42259+ }
42260+#endif
42261+
42262+#ifdef CONFIG_PAX_SEGMEXEC
42263+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
42264+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
42265+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
42266+ pax_task_size = SEGMEXEC_TASK_SIZE;
66a7e928 42267+ current->mm->def_flags |= VM_NOHUGEPAGE;
58c5fc13
MT
42268+ }
42269+#endif
42270+
42271+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
42272+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42273+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
42274+ put_cpu();
42275+ }
42276+#endif
ae4e228f
MT
42277
42278 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
42279 may depend on the personality. */
42280 SET_PERSONALITY(loc->elf_ex);
58c5fc13
MT
42281+
42282+#ifdef CONFIG_PAX_ASLR
42283+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
42284+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
42285+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
42286+ }
42287+#endif
58c5fc13
MT
42288+
42289+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
42290+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42291+ executable_stack = EXSTACK_DISABLE_X;
42292+ current->personality &= ~READ_IMPLIES_EXEC;
42293+ } else
42294+#endif
42295+
42296 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
42297 current->personality |= READ_IMPLIES_EXEC;
42298
fe2de317 42299@@ -808,6 +1095,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
58c5fc13
MT
42300 #else
42301 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
42302 #endif
42303+
42304+#ifdef CONFIG_PAX_RANDMMAP
42305+ /* PaX: randomize base address at the default exe base if requested */
42306+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
42307+#ifdef CONFIG_SPARC64
42308+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
42309+#else
42310+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
42311+#endif
42312+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
42313+ elf_flags |= MAP_FIXED;
42314+ }
42315+#endif
42316+
42317 }
42318
42319 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
fe2de317 42320@@ -840,9 +1141,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
58c5fc13
MT
42321 * allowed task size. Note that p_filesz must always be
42322 * <= p_memsz so it is only necessary to check p_memsz.
42323 */
42324- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
42325- elf_ppnt->p_memsz > TASK_SIZE ||
42326- TASK_SIZE - elf_ppnt->p_memsz < k) {
42327+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
42328+ elf_ppnt->p_memsz > pax_task_size ||
42329+ pax_task_size - elf_ppnt->p_memsz < k) {
42330 /* set_brk can never work. Avoid overflows. */
42331 send_sig(SIGKILL, current, 0);
42332 retval = -EINVAL;
fe2de317 42333@@ -870,6 +1171,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
58c5fc13
MT
42334 start_data += load_bias;
42335 end_data += load_bias;
42336
42337+#ifdef CONFIG_PAX_RANDMMAP
42338+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
42339+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
42340+#endif
42341+
42342 /* Calling set_brk effectively mmaps the pages that we need
42343 * for the bss and break sections. We must do this before
42344 * mapping in the interpreter, to make sure it doesn't wind
fe2de317 42345@@ -881,9 +1187,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
58c5fc13
MT
42346 goto out_free_dentry;
42347 }
42348 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
42349- send_sig(SIGSEGV, current, 0);
42350- retval = -EFAULT; /* Nobody gets to see this, but.. */
42351- goto out_free_dentry;
42352+ /*
42353+ * This bss-zeroing can fail if the ELF
42354+ * file specifies odd protections. So
42355+ * we don't check the return value
42356+ */
42357 }
42358
42359 if (elf_interpreter) {
6e9df6a3 42360@@ -1098,7 +1406,7 @@ out:
58c5fc13
MT
42361 * Decide what to dump of a segment, part, all or none.
42362 */
42363 static unsigned long vma_dump_size(struct vm_area_struct *vma,
42364- unsigned long mm_flags)
42365+ unsigned long mm_flags, long signr)
42366 {
42367 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
42368
fe2de317 42369@@ -1132,7 +1440,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
58c5fc13
MT
42370 if (vma->vm_file == NULL)
42371 return 0;
42372
42373- if (FILTER(MAPPED_PRIVATE))
42374+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
42375 goto whole;
42376
42377 /*
fe2de317 42378@@ -1354,9 +1662,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
ae4e228f
MT
42379 {
42380 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
42381 int i = 0;
42382- do
42383+ do {
42384 i += 2;
42385- while (auxv[i - 2] != AT_NULL);
42386+ } while (auxv[i - 2] != AT_NULL);
42387 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
42388 }
42389
fe2de317 42390@@ -1862,14 +2170,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
df50ba0c
MT
42391 }
42392
42393 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
42394- unsigned long mm_flags)
42395+ struct coredump_params *cprm)
42396 {
42397 struct vm_area_struct *vma;
42398 size_t size = 0;
42399
42400 for (vma = first_vma(current, gate_vma); vma != NULL;
42401 vma = next_vma(vma, gate_vma))
42402- size += vma_dump_size(vma, mm_flags);
42403+ size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42404 return size;
42405 }
42406
fe2de317 42407@@ -1963,7 +2271,7 @@ static int elf_core_dump(struct coredump_params *cprm)
df50ba0c
MT
42408
42409 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
42410
42411- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
42412+ offset += elf_core_vma_data_size(gate_vma, cprm);
42413 offset += elf_core_extra_data_size();
42414 e_shoff = offset;
42415
fe2de317 42416@@ -1977,10 +2285,12 @@ static int elf_core_dump(struct coredump_params *cprm)
df50ba0c
MT
42417 offset = dataoff;
42418
42419 size += sizeof(*elf);
42420+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
42421 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
42422 goto end_coredump;
42423
42424 size += sizeof(*phdr4note);
42425+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
42426 if (size > cprm->limit
42427 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
42428 goto end_coredump;
fe2de317 42429@@ -1994,7 +2304,7 @@ static int elf_core_dump(struct coredump_params *cprm)
58c5fc13
MT
42430 phdr.p_offset = offset;
42431 phdr.p_vaddr = vma->vm_start;
42432 phdr.p_paddr = 0;
df50ba0c
MT
42433- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
42434+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
58c5fc13
MT
42435 phdr.p_memsz = vma->vm_end - vma->vm_start;
42436 offset += phdr.p_filesz;
42437 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
fe2de317 42438@@ -2005,6 +2315,7 @@ static int elf_core_dump(struct coredump_params *cprm)
df50ba0c
MT
42439 phdr.p_align = ELF_EXEC_PAGESIZE;
42440
42441 size += sizeof(phdr);
42442+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
42443 if (size > cprm->limit
42444 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
42445 goto end_coredump;
fe2de317 42446@@ -2029,7 +2340,7 @@ static int elf_core_dump(struct coredump_params *cprm)
58c5fc13
MT
42447 unsigned long addr;
42448 unsigned long end;
42449
df50ba0c
MT
42450- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
42451+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
58c5fc13
MT
42452
42453 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
42454 struct page *page;
fe2de317 42455@@ -2038,6 +2349,7 @@ static int elf_core_dump(struct coredump_params *cprm)
ae4e228f
MT
42456 page = get_dump_page(addr);
42457 if (page) {
42458 void *kaddr = kmap(page);
42459+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
42460 stop = ((size += PAGE_SIZE) > cprm->limit) ||
42461 !dump_write(cprm->file, kaddr,
42462 PAGE_SIZE);
fe2de317 42463@@ -2055,6 +2367,7 @@ static int elf_core_dump(struct coredump_params *cprm)
df50ba0c
MT
42464
42465 if (e_phnum == PN_XNUM) {
42466 size += sizeof(*shdr4extnum);
42467+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
42468 if (size > cprm->limit
42469 || !dump_write(cprm->file, shdr4extnum,
42470 sizeof(*shdr4extnum)))
6e9df6a3 42471@@ -2075,6 +2388,97 @@ out:
ae4e228f
MT
42472
42473 #endif /* CONFIG_ELF_CORE */
58c5fc13
MT
42474
42475+#ifdef CONFIG_PAX_MPROTECT
42476+/* PaX: non-PIC ELF libraries need relocations on their executable segments
42477+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
42478+ * we'll remove VM_MAYWRITE for good on RELRO segments.
42479+ *
42480+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
42481+ * basis because we want to allow the common case and not the special ones.
42482+ */
42483+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
42484+{
42485+ struct elfhdr elf_h;
42486+ struct elf_phdr elf_p;
42487+ unsigned long i;
42488+ unsigned long oldflags;
42489+ bool is_textrel_rw, is_textrel_rx, is_relro;
42490+
42491+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
42492+ return;
42493+
42494+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
42495+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
42496+
57199397 42497+#ifdef CONFIG_PAX_ELFRELOCS
58c5fc13
MT
42498+ /* possible TEXTREL */
42499+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
42500+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
57199397
MT
42501+#else
42502+ is_textrel_rw = false;
42503+ is_textrel_rx = false;
58c5fc13
MT
42504+#endif
42505+
42506+ /* possible RELRO */
42507+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
42508+
42509+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
42510+ return;
42511+
42512+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
42513+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
42514+
42515+#ifdef CONFIG_PAX_ETEXECRELOCS
42516+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
42517+#else
42518+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
42519+#endif
42520+
42521+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
42522+ !elf_check_arch(&elf_h) ||
42523+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
42524+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
42525+ return;
42526+
42527+ for (i = 0UL; i < elf_h.e_phnum; i++) {
42528+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
42529+ return;
42530+ switch (elf_p.p_type) {
42531+ case PT_DYNAMIC:
42532+ if (!is_textrel_rw && !is_textrel_rx)
42533+ continue;
42534+ i = 0UL;
42535+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
42536+ elf_dyn dyn;
42537+
42538+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
42539+ return;
42540+ if (dyn.d_tag == DT_NULL)
42541+ return;
42542+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
42543+ gr_log_textrel(vma);
42544+ if (is_textrel_rw)
42545+ vma->vm_flags |= VM_MAYWRITE;
42546+ else
42547+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
42548+ vma->vm_flags &= ~VM_MAYWRITE;
42549+ return;
42550+ }
42551+ i++;
42552+ }
42553+ return;
42554+
42555+ case PT_GNU_RELRO:
42556+ if (!is_relro)
42557+ continue;
42558+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
42559+ vma->vm_flags &= ~VM_MAYWRITE;
42560+ return;
42561+ }
42562+ }
42563+}
42564+#endif
42565+
42566 static int __init init_elf_binfmt(void)
42567 {
42568 return register_binfmt(&elf_format);
fe2de317
MT
42569diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
42570index 1bffbe0..c8c283e 100644
42571--- a/fs/binfmt_flat.c
42572+++ b/fs/binfmt_flat.c
42573@@ -567,7 +567,9 @@ static int load_flat_file(struct linux_binprm * bprm,
58c5fc13
MT
42574 realdatastart = (unsigned long) -ENOMEM;
42575 printk("Unable to allocate RAM for process data, errno %d\n",
42576 (int)-realdatastart);
42577+ down_write(&current->mm->mmap_sem);
42578 do_munmap(current->mm, textpos, text_len);
42579+ up_write(&current->mm->mmap_sem);
42580 ret = realdatastart;
42581 goto err;
42582 }
fe2de317 42583@@ -591,8 +593,10 @@ static int load_flat_file(struct linux_binprm * bprm,
58c5fc13 42584 }
ae4e228f 42585 if (IS_ERR_VALUE(result)) {
58c5fc13
MT
42586 printk("Unable to read data+bss, errno %d\n", (int)-result);
42587+ down_write(&current->mm->mmap_sem);
42588 do_munmap(current->mm, textpos, text_len);
57199397 42589 do_munmap(current->mm, realdatastart, len);
58c5fc13
MT
42590+ up_write(&current->mm->mmap_sem);
42591 ret = result;
42592 goto err;
42593 }
fe2de317 42594@@ -661,8 +665,10 @@ static int load_flat_file(struct linux_binprm * bprm,
58c5fc13 42595 }
ae4e228f 42596 if (IS_ERR_VALUE(result)) {
58c5fc13
MT
42597 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
42598+ down_write(&current->mm->mmap_sem);
42599 do_munmap(current->mm, textpos, text_len + data_len + extra +
42600 MAX_SHARED_LIBS * sizeof(unsigned long));
42601+ up_write(&current->mm->mmap_sem);
42602 ret = result;
42603 goto err;
42604 }
fe2de317
MT
42605diff --git a/fs/bio.c b/fs/bio.c
42606index 9bfade8..782f3b9 100644
42607--- a/fs/bio.c
42608+++ b/fs/bio.c
42609@@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
ae4e228f
MT
42610 const int read = bio_data_dir(bio) == READ;
42611 struct bio_map_data *bmd = bio->bi_private;
42612 int i;
42613- char *p = bmd->sgvecs[0].iov_base;
6e9df6a3 42614+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
ae4e228f
MT
42615
42616 __bio_for_each_segment(bvec, bio, i, 0) {
42617 char *addr = page_address(bvec->bv_page);
fe2de317
MT
42618diff --git a/fs/block_dev.c b/fs/block_dev.c
42619index 1c44b8d..e2507b4 100644
42620--- a/fs/block_dev.c
42621+++ b/fs/block_dev.c
42622@@ -681,7 +681,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
df50ba0c 42623 else if (bdev->bd_contains == bdev)
57199397
MT
42624 return true; /* is a whole device which isn't held */
42625
16454cff
MT
42626- else if (whole->bd_holder == bd_may_claim)
42627+ else if (whole->bd_holder == (void *)bd_may_claim)
57199397
MT
42628 return true; /* is a partition of a device that is being partitioned */
42629 else if (whole->bd_holder != NULL)
42630 return false; /* is a partition of a held device */
fe2de317
MT
42631diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
42632index 011cab3..9ace713 100644
42633--- a/fs/btrfs/ctree.c
42634+++ b/fs/btrfs/ctree.c
42635@@ -488,9 +488,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
6892158b
MT
42636 free_extent_buffer(buf);
42637 add_root_to_dirty_list(root);
42638 } else {
42639- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
42640- parent_start = parent->start;
42641- else
42642+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
42643+ if (parent)
42644+ parent_start = parent->start;
42645+ else
42646+ parent_start = 0;
42647+ } else
42648 parent_start = 0;
42649
42650 WARN_ON(trans->transid != btrfs_header_generation(parent));
fe2de317
MT
42651diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
42652index b2d004a..6bb543d 100644
42653--- a/fs/btrfs/inode.c
42654+++ b/fs/btrfs/inode.c
6e9df6a3 42655@@ -6922,7 +6922,7 @@ fail:
16454cff
MT
42656 return -ENOMEM;
42657 }
42658
42659-static int btrfs_getattr(struct vfsmount *mnt,
42660+int btrfs_getattr(struct vfsmount *mnt,
42661 struct dentry *dentry, struct kstat *stat)
42662 {
42663 struct inode *inode = dentry->d_inode;
fe2de317 42664@@ -6934,6 +6934,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
16454cff
MT
42665 return 0;
42666 }
42667
42668+EXPORT_SYMBOL(btrfs_getattr);
42669+
42670+dev_t get_btrfs_dev_from_inode(struct inode *inode)
42671+{
6e9df6a3 42672+ return BTRFS_I(inode)->root->anon_dev;
16454cff
MT
42673+}
42674+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
42675+
66a7e928
MT
42676 /*
42677 * If a file is moved, it will inherit the cow and compression flags of the new
42678 * directory.
fe2de317
MT
42679diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
42680index dae5dfe..6aa01b1 100644
42681--- a/fs/btrfs/ioctl.c
42682+++ b/fs/btrfs/ioctl.c
42683@@ -2704,9 +2704,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
c52201e0
MT
42684 for (i = 0; i < num_types; i++) {
42685 struct btrfs_space_info *tmp;
42686
42687+ /* Don't copy in more than we allocated */
317566c1
MT
42688 if (!slot_count)
42689 break;
42690
c52201e0
MT
42691+ slot_count--;
42692+
42693 info = NULL;
42694 rcu_read_lock();
42695 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
fe2de317 42696@@ -2728,15 +2731,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
317566c1
MT
42697 memcpy(dest, &space, sizeof(space));
42698 dest++;
42699 space_args.total_spaces++;
42700- slot_count--;
42701 }
42702- if (!slot_count)
42703- break;
42704 }
42705 up_read(&info->groups_sem);
42706 }
6e9df6a3
MT
42707
42708- user_dest = (struct btrfs_ioctl_space_info *)
42709+ user_dest = (struct btrfs_ioctl_space_info __user *)
42710 (arg + sizeof(struct btrfs_ioctl_space_args));
42711
42712 if (copy_to_user(user_dest, dest_orig, alloc_size))
fe2de317
MT
42713diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
42714index 59bb176..be9977d 100644
42715--- a/fs/btrfs/relocation.c
42716+++ b/fs/btrfs/relocation.c
42717@@ -1242,7 +1242,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
6892158b
MT
42718 }
42719 spin_unlock(&rc->reloc_root_tree.lock);
42720
42721- BUG_ON((struct btrfs_root *)node->data != root);
42722+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
42723
42724 if (!del) {
42725 spin_lock(&rc->reloc_root_tree.lock);
fe2de317
MT
42726diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
42727index 622f469..e8d2d55 100644
42728--- a/fs/cachefiles/bind.c
42729+++ b/fs/cachefiles/bind.c
42730@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
df50ba0c
MT
42731 args);
42732
42733 /* start by checking things over */
42734- ASSERT(cache->fstop_percent >= 0 &&
42735- cache->fstop_percent < cache->fcull_percent &&
42736+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
42737 cache->fcull_percent < cache->frun_percent &&
42738 cache->frun_percent < 100);
42739
42740- ASSERT(cache->bstop_percent >= 0 &&
42741- cache->bstop_percent < cache->bcull_percent &&
42742+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
42743 cache->bcull_percent < cache->brun_percent &&
42744 cache->brun_percent < 100);
42745
fe2de317
MT
42746diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
42747index 0a1467b..6a53245 100644
42748--- a/fs/cachefiles/daemon.c
42749+++ b/fs/cachefiles/daemon.c
42750@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
ae4e228f
MT
42751 if (n > buflen)
42752 return -EMSGSIZE;
42753
42754- if (copy_to_user(_buffer, buffer, n) != 0)
42755+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
42756 return -EFAULT;
58c5fc13 42757
ae4e228f 42758 return n;
fe2de317 42759@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
df50ba0c
MT
42760 if (test_bit(CACHEFILES_DEAD, &cache->flags))
42761 return -EIO;
42762
42763- if (datalen < 0 || datalen > PAGE_SIZE - 1)
42764+ if (datalen > PAGE_SIZE - 1)
42765 return -EOPNOTSUPP;
42766
42767 /* drag the command string into the kernel so we can parse it */
fe2de317 42768@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
df50ba0c
MT
42769 if (args[0] != '%' || args[1] != '\0')
42770 return -EINVAL;
42771
42772- if (fstop < 0 || fstop >= cache->fcull_percent)
42773+ if (fstop >= cache->fcull_percent)
42774 return cachefiles_daemon_range_error(cache, args);
42775
42776 cache->fstop_percent = fstop;
fe2de317 42777@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
df50ba0c
MT
42778 if (args[0] != '%' || args[1] != '\0')
42779 return -EINVAL;
42780
42781- if (bstop < 0 || bstop >= cache->bcull_percent)
42782+ if (bstop >= cache->bcull_percent)
42783 return cachefiles_daemon_range_error(cache, args);
42784
42785 cache->bstop_percent = bstop;
fe2de317
MT
42786diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
42787index bd6bc1b..b627b53 100644
42788--- a/fs/cachefiles/internal.h
42789+++ b/fs/cachefiles/internal.h
8308f9c9
MT
42790@@ -57,7 +57,7 @@ struct cachefiles_cache {
42791 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
42792 struct rb_root active_nodes; /* active nodes (can't be culled) */
42793 rwlock_t active_lock; /* lock for active_nodes */
42794- atomic_t gravecounter; /* graveyard uniquifier */
42795+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
42796 unsigned frun_percent; /* when to stop culling (% files) */
42797 unsigned fcull_percent; /* when to start culling (% files) */
42798 unsigned fstop_percent; /* when to stop allocating (% files) */
fe2de317 42799@@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
8308f9c9
MT
42800 * proc.c
42801 */
42802 #ifdef CONFIG_CACHEFILES_HISTOGRAM
42803-extern atomic_t cachefiles_lookup_histogram[HZ];
42804-extern atomic_t cachefiles_mkdir_histogram[HZ];
42805-extern atomic_t cachefiles_create_histogram[HZ];
42806+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
42807+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
42808+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
42809
42810 extern int __init cachefiles_proc_init(void);
42811 extern void cachefiles_proc_cleanup(void);
42812 static inline
42813-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
42814+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
42815 {
42816 unsigned long jif = jiffies - start_jif;
42817 if (jif >= HZ)
42818 jif = HZ - 1;
42819- atomic_inc(&histogram[jif]);
42820+ atomic_inc_unchecked(&histogram[jif]);
42821 }
42822
42823 #else
fe2de317
MT
42824diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
42825index a0358c2..d6137f2 100644
42826--- a/fs/cachefiles/namei.c
42827+++ b/fs/cachefiles/namei.c
66a7e928 42828@@ -318,7 +318,7 @@ try_again:
8308f9c9
MT
42829 /* first step is to make up a grave dentry in the graveyard */
42830 sprintf(nbuffer, "%08x%08x",
42831 (uint32_t) get_seconds(),
42832- (uint32_t) atomic_inc_return(&cache->gravecounter));
42833+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
42834
42835 /* do the multiway lock magic */
42836 trap = lock_rename(cache->graveyard, dir);
fe2de317
MT
42837diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
42838index eccd339..4c1d995 100644
42839--- a/fs/cachefiles/proc.c
42840+++ b/fs/cachefiles/proc.c
8308f9c9
MT
42841@@ -14,9 +14,9 @@
42842 #include <linux/seq_file.h>
42843 #include "internal.h"
42844
42845-atomic_t cachefiles_lookup_histogram[HZ];
42846-atomic_t cachefiles_mkdir_histogram[HZ];
42847-atomic_t cachefiles_create_histogram[HZ];
42848+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
42849+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
42850+atomic_unchecked_t cachefiles_create_histogram[HZ];
42851
42852 /*
42853 * display the latency histogram
fe2de317 42854@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
8308f9c9
MT
42855 return 0;
42856 default:
42857 index = (unsigned long) v - 3;
42858- x = atomic_read(&cachefiles_lookup_histogram[index]);
42859- y = atomic_read(&cachefiles_mkdir_histogram[index]);
42860- z = atomic_read(&cachefiles_create_histogram[index]);
42861+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
42862+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
42863+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
42864 if (x == 0 && y == 0 && z == 0)
42865 return 0;
42866
fe2de317
MT
42867diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
42868index 0e3c092..818480e 100644
42869--- a/fs/cachefiles/rdwr.c
42870+++ b/fs/cachefiles/rdwr.c
42871@@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
ae4e228f
MT
42872 old_fs = get_fs();
42873 set_fs(KERNEL_DS);
42874 ret = file->f_op->write(
42875- file, (const void __user *) data, len, &pos);
6e9df6a3 42876+ file, (const void __force_user *) data, len, &pos);
ae4e228f
MT
42877 set_fs(old_fs);
42878 kunmap(page);
42879 if (ret != len)
fe2de317
MT
42880diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
42881index 382abc9..bd89646 100644
42882--- a/fs/ceph/dir.c
42883+++ b/fs/ceph/dir.c
42884@@ -244,7 +244,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
bc901d79
MT
42885 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
42886 struct ceph_mds_client *mdsc = fsc->mdsc;
6892158b
MT
42887 unsigned frag = fpos_frag(filp->f_pos);
42888- int off = fpos_off(filp->f_pos);
42889+ unsigned int off = fpos_off(filp->f_pos);
42890 int err;
42891 u32 ftype;
42892 struct ceph_mds_reply_info_parsed *rinfo;
fe2de317
MT
42893diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
42894index 6d40656..bc1f825 100644
42895--- a/fs/cifs/cifs_debug.c
42896+++ b/fs/cifs/cifs_debug.c
42897@@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
15a11c5b
MT
42898
42899 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
42900 #ifdef CONFIG_CIFS_STATS2
42901- atomic_set(&totBufAllocCount, 0);
42902- atomic_set(&totSmBufAllocCount, 0);
42903+ atomic_set_unchecked(&totBufAllocCount, 0);
42904+ atomic_set_unchecked(&totSmBufAllocCount, 0);
42905 #endif /* CONFIG_CIFS_STATS2 */
42906 spin_lock(&cifs_tcp_ses_lock);
42907 list_for_each(tmp1, &cifs_tcp_ses_list) {
fe2de317 42908@@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
8308f9c9 42909 tcon = list_entry(tmp3,
15a11c5b 42910 struct cifs_tcon,
8308f9c9
MT
42911 tcon_list);
42912- atomic_set(&tcon->num_smbs_sent, 0);
42913- atomic_set(&tcon->num_writes, 0);
42914- atomic_set(&tcon->num_reads, 0);
42915- atomic_set(&tcon->num_oplock_brks, 0);
42916- atomic_set(&tcon->num_opens, 0);
42917- atomic_set(&tcon->num_posixopens, 0);
42918- atomic_set(&tcon->num_posixmkdirs, 0);
42919- atomic_set(&tcon->num_closes, 0);
42920- atomic_set(&tcon->num_deletes, 0);
42921- atomic_set(&tcon->num_mkdirs, 0);
42922- atomic_set(&tcon->num_rmdirs, 0);
42923- atomic_set(&tcon->num_renames, 0);
42924- atomic_set(&tcon->num_t2renames, 0);
42925- atomic_set(&tcon->num_ffirst, 0);
42926- atomic_set(&tcon->num_fnext, 0);
42927- atomic_set(&tcon->num_fclose, 0);
42928- atomic_set(&tcon->num_hardlinks, 0);
42929- atomic_set(&tcon->num_symlinks, 0);
42930- atomic_set(&tcon->num_locks, 0);
42931+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
42932+ atomic_set_unchecked(&tcon->num_writes, 0);
42933+ atomic_set_unchecked(&tcon->num_reads, 0);
42934+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
42935+ atomic_set_unchecked(&tcon->num_opens, 0);
42936+ atomic_set_unchecked(&tcon->num_posixopens, 0);
42937+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
42938+ atomic_set_unchecked(&tcon->num_closes, 0);
42939+ atomic_set_unchecked(&tcon->num_deletes, 0);
42940+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
42941+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
42942+ atomic_set_unchecked(&tcon->num_renames, 0);
42943+ atomic_set_unchecked(&tcon->num_t2renames, 0);
42944+ atomic_set_unchecked(&tcon->num_ffirst, 0);
42945+ atomic_set_unchecked(&tcon->num_fnext, 0);
42946+ atomic_set_unchecked(&tcon->num_fclose, 0);
42947+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
42948+ atomic_set_unchecked(&tcon->num_symlinks, 0);
42949+ atomic_set_unchecked(&tcon->num_locks, 0);
42950 }
42951 }
42952 }
fe2de317 42953@@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
15a11c5b
MT
42954 smBufAllocCount.counter, cifs_min_small);
42955 #ifdef CONFIG_CIFS_STATS2
42956 seq_printf(m, "Total Large %d Small %d Allocations\n",
42957- atomic_read(&totBufAllocCount),
42958- atomic_read(&totSmBufAllocCount));
42959+ atomic_read_unchecked(&totBufAllocCount),
42960+ atomic_read_unchecked(&totSmBufAllocCount));
42961 #endif /* CONFIG_CIFS_STATS2 */
42962
42963 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
fe2de317 42964@@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
8308f9c9
MT
42965 if (tcon->need_reconnect)
42966 seq_puts(m, "\tDISCONNECTED ");
42967 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
42968- atomic_read(&tcon->num_smbs_sent),
42969- atomic_read(&tcon->num_oplock_brks));
42970+ atomic_read_unchecked(&tcon->num_smbs_sent),
42971+ atomic_read_unchecked(&tcon->num_oplock_brks));
42972 seq_printf(m, "\nReads: %d Bytes: %lld",
42973- atomic_read(&tcon->num_reads),
42974+ atomic_read_unchecked(&tcon->num_reads),
42975 (long long)(tcon->bytes_read));
42976 seq_printf(m, "\nWrites: %d Bytes: %lld",
42977- atomic_read(&tcon->num_writes),
42978+ atomic_read_unchecked(&tcon->num_writes),
42979 (long long)(tcon->bytes_written));
42980 seq_printf(m, "\nFlushes: %d",
42981- atomic_read(&tcon->num_flushes));
42982+ atomic_read_unchecked(&tcon->num_flushes));
42983 seq_printf(m, "\nLocks: %d HardLinks: %d "
42984 "Symlinks: %d",
42985- atomic_read(&tcon->num_locks),
42986- atomic_read(&tcon->num_hardlinks),
42987- atomic_read(&tcon->num_symlinks));
42988+ atomic_read_unchecked(&tcon->num_locks),
42989+ atomic_read_unchecked(&tcon->num_hardlinks),
42990+ atomic_read_unchecked(&tcon->num_symlinks));
42991 seq_printf(m, "\nOpens: %d Closes: %d "
42992 "Deletes: %d",
42993- atomic_read(&tcon->num_opens),
42994- atomic_read(&tcon->num_closes),
42995- atomic_read(&tcon->num_deletes));
42996+ atomic_read_unchecked(&tcon->num_opens),
42997+ atomic_read_unchecked(&tcon->num_closes),
42998+ atomic_read_unchecked(&tcon->num_deletes));
42999 seq_printf(m, "\nPosix Opens: %d "
43000 "Posix Mkdirs: %d",
43001- atomic_read(&tcon->num_posixopens),
43002- atomic_read(&tcon->num_posixmkdirs));
43003+ atomic_read_unchecked(&tcon->num_posixopens),
43004+ atomic_read_unchecked(&tcon->num_posixmkdirs));
43005 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
43006- atomic_read(&tcon->num_mkdirs),
43007- atomic_read(&tcon->num_rmdirs));
43008+ atomic_read_unchecked(&tcon->num_mkdirs),
43009+ atomic_read_unchecked(&tcon->num_rmdirs));
43010 seq_printf(m, "\nRenames: %d T2 Renames %d",
43011- atomic_read(&tcon->num_renames),
43012- atomic_read(&tcon->num_t2renames));
43013+ atomic_read_unchecked(&tcon->num_renames),
43014+ atomic_read_unchecked(&tcon->num_t2renames));
43015 seq_printf(m, "\nFindFirst: %d FNext %d "
43016 "FClose %d",
43017- atomic_read(&tcon->num_ffirst),
43018- atomic_read(&tcon->num_fnext),
43019- atomic_read(&tcon->num_fclose));
43020+ atomic_read_unchecked(&tcon->num_ffirst),
43021+ atomic_read_unchecked(&tcon->num_fnext),
43022+ atomic_read_unchecked(&tcon->num_fclose));
43023 }
43024 }
43025 }
fe2de317
MT
43026diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
43027index 54b8f1e..f6a4c00 100644
43028--- a/fs/cifs/cifsfs.c
43029+++ b/fs/cifs/cifsfs.c
6e9df6a3 43030@@ -981,7 +981,7 @@ cifs_init_request_bufs(void)
15a11c5b
MT
43031 cifs_req_cachep = kmem_cache_create("cifs_request",
43032 CIFSMaxBufSize +
43033 MAX_CIFS_HDR_SIZE, 0,
43034- SLAB_HWCACHE_ALIGN, NULL);
43035+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
43036 if (cifs_req_cachep == NULL)
43037 return -ENOMEM;
43038
6e9df6a3 43039@@ -1008,7 +1008,7 @@ cifs_init_request_bufs(void)
15a11c5b
MT
43040 efficient to alloc 1 per page off the slab compared to 17K (5page)
43041 alloc of large cifs buffers even when page debugging is on */
43042 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
43043- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
43044+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
43045 NULL);
43046 if (cifs_sm_req_cachep == NULL) {
43047 mempool_destroy(cifs_req_poolp);
6e9df6a3 43048@@ -1093,8 +1093,8 @@ init_cifs(void)
15a11c5b
MT
43049 atomic_set(&bufAllocCount, 0);
43050 atomic_set(&smBufAllocCount, 0);
43051 #ifdef CONFIG_CIFS_STATS2
43052- atomic_set(&totBufAllocCount, 0);
43053- atomic_set(&totSmBufAllocCount, 0);
43054+ atomic_set_unchecked(&totBufAllocCount, 0);
43055+ atomic_set_unchecked(&totSmBufAllocCount, 0);
43056 #endif /* CONFIG_CIFS_STATS2 */
43057
43058 atomic_set(&midCount, 0);
fe2de317
MT
43059diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
43060index 95dad9d..fe7af1a 100644
43061--- a/fs/cifs/cifsglob.h
43062+++ b/fs/cifs/cifsglob.h
15a11c5b 43063@@ -381,28 +381,28 @@ struct cifs_tcon {
8308f9c9
MT
43064 __u16 Flags; /* optional support bits */
43065 enum statusEnum tidStatus;
43066 #ifdef CONFIG_CIFS_STATS
43067- atomic_t num_smbs_sent;
43068- atomic_t num_writes;
43069- atomic_t num_reads;
43070- atomic_t num_flushes;
43071- atomic_t num_oplock_brks;
43072- atomic_t num_opens;
43073- atomic_t num_closes;
43074- atomic_t num_deletes;
43075- atomic_t num_mkdirs;
43076- atomic_t num_posixopens;
43077- atomic_t num_posixmkdirs;
43078- atomic_t num_rmdirs;
43079- atomic_t num_renames;
43080- atomic_t num_t2renames;
43081- atomic_t num_ffirst;
43082- atomic_t num_fnext;
43083- atomic_t num_fclose;
43084- atomic_t num_hardlinks;
43085- atomic_t num_symlinks;
43086- atomic_t num_locks;
43087- atomic_t num_acl_get;
43088- atomic_t num_acl_set;
43089+ atomic_unchecked_t num_smbs_sent;
43090+ atomic_unchecked_t num_writes;
43091+ atomic_unchecked_t num_reads;
43092+ atomic_unchecked_t num_flushes;
43093+ atomic_unchecked_t num_oplock_brks;
43094+ atomic_unchecked_t num_opens;
43095+ atomic_unchecked_t num_closes;
43096+ atomic_unchecked_t num_deletes;
43097+ atomic_unchecked_t num_mkdirs;
43098+ atomic_unchecked_t num_posixopens;
43099+ atomic_unchecked_t num_posixmkdirs;
43100+ atomic_unchecked_t num_rmdirs;
43101+ atomic_unchecked_t num_renames;
43102+ atomic_unchecked_t num_t2renames;
43103+ atomic_unchecked_t num_ffirst;
43104+ atomic_unchecked_t num_fnext;
43105+ atomic_unchecked_t num_fclose;
43106+ atomic_unchecked_t num_hardlinks;
43107+ atomic_unchecked_t num_symlinks;
43108+ atomic_unchecked_t num_locks;
43109+ atomic_unchecked_t num_acl_get;
43110+ atomic_unchecked_t num_acl_set;
43111 #ifdef CONFIG_CIFS_STATS2
43112 unsigned long long time_writes;
43113 unsigned long long time_reads;
fe2de317 43114@@ -613,7 +613,7 @@ convert_delimiter(char *path, char delim)
8308f9c9
MT
43115 }
43116
43117 #ifdef CONFIG_CIFS_STATS
43118-#define cifs_stats_inc atomic_inc
43119+#define cifs_stats_inc atomic_inc_unchecked
43120
15a11c5b 43121 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
8308f9c9 43122 unsigned int bytes)
fe2de317 43123@@ -953,8 +953,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
15a11c5b
MT
43124 /* Various Debug counters */
43125 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
43126 #ifdef CONFIG_CIFS_STATS2
43127-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
43128-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
43129+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
43130+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
43131 #endif
43132 GLOBAL_EXTERN atomic_t smBufAllocCount;
43133 GLOBAL_EXTERN atomic_t midCount;
fe2de317
MT
43134diff --git a/fs/cifs/link.c b/fs/cifs/link.c
43135index db3f18c..1f5955e 100644
43136--- a/fs/cifs/link.c
43137+++ b/fs/cifs/link.c
6e9df6a3 43138@@ -593,7 +593,7 @@ symlink_exit:
58c5fc13
MT
43139
43140 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
43141 {
43142- char *p = nd_get_link(nd);
43143+ const char *p = nd_get_link(nd);
43144 if (!IS_ERR(p))
43145 kfree(p);
43146 }
fe2de317
MT
43147diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
43148index 7c16933..c8212b5 100644
43149--- a/fs/cifs/misc.c
43150+++ b/fs/cifs/misc.c
15a11c5b
MT
43151@@ -156,7 +156,7 @@ cifs_buf_get(void)
43152 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
43153 atomic_inc(&bufAllocCount);
43154 #ifdef CONFIG_CIFS_STATS2
43155- atomic_inc(&totBufAllocCount);
43156+ atomic_inc_unchecked(&totBufAllocCount);
43157 #endif /* CONFIG_CIFS_STATS2 */
43158 }
43159
43160@@ -191,7 +191,7 @@ cifs_small_buf_get(void)
43161 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
43162 atomic_inc(&smBufAllocCount);
43163 #ifdef CONFIG_CIFS_STATS2
43164- atomic_inc(&totSmBufAllocCount);
43165+ atomic_inc_unchecked(&totSmBufAllocCount);
43166 #endif /* CONFIG_CIFS_STATS2 */
43167
43168 }
fe2de317
MT
43169diff --git a/fs/coda/cache.c b/fs/coda/cache.c
43170index 6901578..d402eb5 100644
43171--- a/fs/coda/cache.c
43172+++ b/fs/coda/cache.c
8308f9c9
MT
43173@@ -24,7 +24,7 @@
43174 #include "coda_linux.h"
43175 #include "coda_cache.h"
43176
43177-static atomic_t permission_epoch = ATOMIC_INIT(0);
43178+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
43179
43180 /* replace or extend an acl cache hit */
43181 void coda_cache_enter(struct inode *inode, int mask)
fe2de317 43182@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
8308f9c9
MT
43183 struct coda_inode_info *cii = ITOC(inode);
43184
43185 spin_lock(&cii->c_lock);
43186- cii->c_cached_epoch = atomic_read(&permission_epoch);
43187+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
43188 if (cii->c_uid != current_fsuid()) {
43189 cii->c_uid = current_fsuid();
43190 cii->c_cached_perm = mask;
fe2de317 43191@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
8308f9c9
MT
43192 {
43193 struct coda_inode_info *cii = ITOC(inode);
43194 spin_lock(&cii->c_lock);
43195- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
43196+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
43197 spin_unlock(&cii->c_lock);
43198 }
43199
43200 /* remove all acl caches */
43201 void coda_cache_clear_all(struct super_block *sb)
43202 {
43203- atomic_inc(&permission_epoch);
43204+ atomic_inc_unchecked(&permission_epoch);
43205 }
43206
43207
fe2de317 43208@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
8308f9c9
MT
43209 spin_lock(&cii->c_lock);
43210 hit = (mask & cii->c_cached_perm) == mask &&
43211 cii->c_uid == current_fsuid() &&
43212- cii->c_cached_epoch == atomic_read(&permission_epoch);
43213+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
43214 spin_unlock(&cii->c_lock);
43215
43216 return hit;
fe2de317
MT
43217diff --git a/fs/compat.c b/fs/compat.c
43218index 58b1da4..afcd9b8 100644
43219--- a/fs/compat.c
43220+++ b/fs/compat.c
43221@@ -133,8 +133,8 @@ asmlinkage long compat_sys_utimes(const char __user *filename, struct compat_tim
6e9df6a3
MT
43222 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
43223 {
43224 compat_ino_t ino = stat->ino;
43225- typeof(ubuf->st_uid) uid = 0;
43226- typeof(ubuf->st_gid) gid = 0;
43227+ typeof(((struct compat_stat *)0)->st_uid) uid = 0;
43228+ typeof(((struct compat_stat *)0)->st_gid) gid = 0;
43229 int err;
43230
43231 SET_UID(uid, stat->uid);
fe2de317 43232@@ -508,7 +508,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
6e9df6a3
MT
43233
43234 set_fs(KERNEL_DS);
43235 /* The __user pointer cast is valid because of the set_fs() */
43236- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
43237+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
43238 set_fs(oldfs);
43239 /* truncating is ok because it's a user address */
43240 if (!ret)
fe2de317 43241@@ -566,7 +566,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
6892158b
MT
43242 goto out;
43243
43244 ret = -EINVAL;
43245- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
43246+ if (nr_segs > UIO_MAXIOV)
43247 goto out;
43248 if (nr_segs > fast_segs) {
43249 ret = -ENOMEM;
66a7e928 43250@@ -848,6 +848,7 @@ struct compat_old_linux_dirent {
58c5fc13 43251
bc901d79
MT
43252 struct compat_readdir_callback {
43253 struct compat_old_linux_dirent __user *dirent;
43254+ struct file * file;
43255 int result;
43256 };
43257
fe2de317 43258@@ -865,6 +866,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
bc901d79
MT
43259 buf->result = -EOVERFLOW;
43260 return -EOVERFLOW;
43261 }
43262+
43263+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43264+ return 0;
43265+
43266 buf->result++;
43267 dirent = buf->dirent;
43268 if (!access_ok(VERIFY_WRITE, dirent,
fe2de317 43269@@ -897,6 +902,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
bc901d79
MT
43270
43271 buf.result = 0;
43272 buf.dirent = dirent;
43273+ buf.file = file;
43274
43275 error = vfs_readdir(file, compat_fillonedir, &buf);
43276 if (buf.result)
66a7e928 43277@@ -917,6 +923,7 @@ struct compat_linux_dirent {
bc901d79
MT
43278 struct compat_getdents_callback {
43279 struct compat_linux_dirent __user *current_dir;
43280 struct compat_linux_dirent __user *previous;
43281+ struct file * file;
43282 int count;
43283 int error;
43284 };
fe2de317 43285@@ -938,6 +945,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
bc901d79
MT
43286 buf->error = -EOVERFLOW;
43287 return -EOVERFLOW;
43288 }
43289+
43290+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43291+ return 0;
43292+
43293 dirent = buf->previous;
43294 if (dirent) {
43295 if (__put_user(offset, &dirent->d_off))
fe2de317 43296@@ -985,6 +996,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
bc901d79
MT
43297 buf.previous = NULL;
43298 buf.count = count;
43299 buf.error = 0;
43300+ buf.file = file;
43301
43302 error = vfs_readdir(file, compat_filldir, &buf);
43303 if (error >= 0)
66a7e928 43304@@ -1006,6 +1018,7 @@ out:
bc901d79
MT
43305 struct compat_getdents_callback64 {
43306 struct linux_dirent64 __user *current_dir;
43307 struct linux_dirent64 __user *previous;
43308+ struct file * file;
43309 int count;
43310 int error;
43311 };
fe2de317 43312@@ -1022,6 +1035,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
bc901d79
MT
43313 buf->error = -EINVAL; /* only used if we fail.. */
43314 if (reclen > buf->count)
43315 return -EINVAL;
43316+
43317+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43318+ return 0;
43319+
43320 dirent = buf->previous;
43321
43322 if (dirent) {
fe2de317 43323@@ -1073,13 +1090,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
bc901d79
MT
43324 buf.previous = NULL;
43325 buf.count = count;
43326 buf.error = 0;
43327+ buf.file = file;
43328
43329 error = vfs_readdir(file, compat_filldir64, &buf);
43330 if (error >= 0)
6e9df6a3
MT
43331 error = buf.error;
43332 lastdirent = buf.previous;
43333 if (lastdirent) {
43334- typeof(lastdirent->d_off) d_off = file->f_pos;
43335+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
43336 if (__put_user_unaligned(d_off, &lastdirent->d_off))
43337 error = -EFAULT;
43338 else
fe2de317 43339@@ -1446,6 +1464,8 @@ int compat_core_sys_select(int n, compat_ulong_t __user *inp,
66a7e928
MT
43340 struct fdtable *fdt;
43341 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
43342
43343+ pax_track_stack();
43344+
43345 if (n < 0)
43346 goto out_nofds;
43347
fe2de317
MT
43348diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
43349index 112e45a..b59845b 100644
43350--- a/fs/compat_binfmt_elf.c
43351+++ b/fs/compat_binfmt_elf.c
43352@@ -30,11 +30,13 @@
43353 #undef elf_phdr
43354 #undef elf_shdr
43355 #undef elf_note
43356+#undef elf_dyn
43357 #undef elf_addr_t
43358 #define elfhdr elf32_hdr
43359 #define elf_phdr elf32_phdr
43360 #define elf_shdr elf32_shdr
43361 #define elf_note elf32_note
43362+#define elf_dyn Elf32_Dyn
43363 #define elf_addr_t Elf32_Addr
43364
43365 /*
43366diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
43367index 51352de..93292ff 100644
43368--- a/fs/compat_ioctl.c
43369+++ b/fs/compat_ioctl.c
43370@@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
6892158b
MT
43371
43372 err = get_user(palp, &up->palette);
43373 err |= get_user(length, &up->length);
43374+ if (err)
43375+ return -EFAULT;
43376
43377 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
43378 err = put_user(compat_ptr(palp), &up_native->palette);
fe2de317 43379@@ -621,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
6e9df6a3
MT
43380 return -EFAULT;
43381 if (__get_user(udata, &ss32->iomem_base))
43382 return -EFAULT;
43383- ss.iomem_base = compat_ptr(udata);
43384+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
43385 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
43386 __get_user(ss.port_high, &ss32->port_high))
43387 return -EFAULT;
fe2de317 43388@@ -796,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
6e9df6a3
MT
43389 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
43390 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
43391 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
43392- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
43393+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
43394 return -EFAULT;
43395
43396 return ioctl_preallocate(file, p);
fe2de317 43397@@ -1644,8 +1646,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
bc901d79
MT
43398 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
43399 {
43400 unsigned int a, b;
43401- a = *(unsigned int *)p;
43402- b = *(unsigned int *)q;
43403+ a = *(const unsigned int *)p;
43404+ b = *(const unsigned int *)q;
43405 if (a > b)
43406 return 1;
43407 if (a < b)
fe2de317
MT
43408diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
43409index 9a37a9b..35792b6 100644
43410--- a/fs/configfs/dir.c
43411+++ b/fs/configfs/dir.c
43412@@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
66a7e928
MT
43413 }
43414 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
43415 struct configfs_dirent *next;
43416- const char * name;
43417+ const unsigned char * name;
43418+ char d_name[sizeof(next->s_dentry->d_iname)];
43419 int len;
43420 struct inode *inode = NULL;
43421
fe2de317 43422@@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
66a7e928
MT
43423 continue;
43424
43425 name = configfs_get_name(next);
43426- len = strlen(name);
43427+ if (next->s_dentry && name == next->s_dentry->d_iname) {
43428+ len = next->s_dentry->d_name.len;
43429+ memcpy(d_name, name, len);
43430+ name = d_name;
43431+ } else
43432+ len = strlen(name);
43433
43434 /*
43435 * We'll have a dentry and an inode for
fe2de317
MT
43436diff --git a/fs/dcache.c b/fs/dcache.c
43437index a88948b..1e32160 100644
43438--- a/fs/dcache.c
43439+++ b/fs/dcache.c
43440@@ -2998,7 +2998,7 @@ void __init vfs_caches_init(unsigned long mempages)
71d190be
MT
43441 mempages -= reserve;
43442
43443 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
43444- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
43445+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
43446
43447 dcache_init();
43448 inode_init();
fe2de317
MT
43449diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
43450index 528da01..bd8c23d 100644
43451--- a/fs/ecryptfs/inode.c
43452+++ b/fs/ecryptfs/inode.c
43453@@ -691,7 +691,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
ae4e228f
MT
43454 old_fs = get_fs();
43455 set_fs(get_ds());
43456 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
43457- (char __user *)lower_buf,
6e9df6a3 43458+ (char __force_user *)lower_buf,
ae4e228f
MT
43459 lower_bufsiz);
43460 set_fs(old_fs);
df50ba0c 43461 if (rc < 0)
fe2de317 43462@@ -737,7 +737,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
ae4e228f
MT
43463 }
43464 old_fs = get_fs();
43465 set_fs(get_ds());
43466- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
6e9df6a3 43467+ rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
ae4e228f
MT
43468 set_fs(old_fs);
43469 if (rc < 0) {
43470 kfree(buf);
fe2de317 43471@@ -752,7 +752,7 @@ out:
ae4e228f
MT
43472 static void
43473 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
43474 {
43475- char *buf = nd_get_link(nd);
43476+ const char *buf = nd_get_link(nd);
43477 if (!IS_ERR(buf)) {
43478 /* Free the char* */
43479 kfree(buf);
fe2de317
MT
43480diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
43481index 940a82e..63af89e 100644
43482--- a/fs/ecryptfs/miscdev.c
43483+++ b/fs/ecryptfs/miscdev.c
df50ba0c 43484@@ -328,7 +328,7 @@ check_list:
ae4e228f
MT
43485 goto out_unlock_msg_ctx;
43486 i = 5;
43487 if (msg_ctx->msg) {
43488- if (copy_to_user(&buf[i], packet_length, packet_length_size))
43489+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
43490 goto out_unlock_msg_ctx;
43491 i += packet_length_size;
43492 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
fe2de317
MT
43493diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
43494index 3745f7c..89cc7a3 100644
43495--- a/fs/ecryptfs/read_write.c
43496+++ b/fs/ecryptfs/read_write.c
43497@@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
6e9df6a3
MT
43498 return -EIO;
43499 fs_save = get_fs();
43500 set_fs(get_ds());
43501- rc = vfs_write(lower_file, data, size, &offset);
43502+ rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
43503 set_fs(fs_save);
43504 mark_inode_dirty_sync(ecryptfs_inode);
43505 return rc;
fe2de317 43506@@ -235,7 +235,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
6e9df6a3
MT
43507 return -EIO;
43508 fs_save = get_fs();
43509 set_fs(get_ds());
43510- rc = vfs_read(lower_file, data, size, &offset);
43511+ rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
43512 set_fs(fs_save);
43513 return rc;
43514 }
fe2de317
MT
43515diff --git a/fs/exec.c b/fs/exec.c
43516index 25dcbe5..4ffaa78 100644
43517--- a/fs/exec.c
43518+++ b/fs/exec.c
bc901d79 43519@@ -55,12 +55,24 @@
ae4e228f 43520 #include <linux/pipe_fs_i.h>
bc901d79 43521 #include <linux/oom.h>
15a11c5b 43522 #include <linux/compat.h>
58c5fc13
MT
43523+#include <linux/random.h>
43524+#include <linux/seq_file.h>
43525+
43526+#ifdef CONFIG_PAX_REFCOUNT
43527+#include <linux/kallsyms.h>
43528+#include <linux/kdebug.h>
43529+#endif
43530
43531 #include <asm/uaccess.h>
43532 #include <asm/mmu_context.h>
43533 #include <asm/tlb.h>
43534 #include "internal.h"
43535
43536+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
43537+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
43538+EXPORT_SYMBOL(pax_set_initial_flags_func);
43539+#endif
43540+
43541 int core_uses_pid;
43542 char core_pattern[CORENAME_MAX_SIZE] = "core";
ae4e228f 43543 unsigned int core_pipe_limit;
8308f9c9
MT
43544@@ -70,7 +82,7 @@ struct core_name {
43545 char *corename;
43546 int used, size;
43547 };
43548-static atomic_t call_count = ATOMIC_INIT(1);
43549+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
43550
43551 /* The maximal length of core_pattern is also specified in sysctl.c */
43552
fe2de317 43553@@ -188,18 +200,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
58c5fc13
MT
43554 int write)
43555 {
43556 struct page *page;
43557- int ret;
43558
43559-#ifdef CONFIG_STACK_GROWSUP
43560- if (write) {
15a11c5b 43561- ret = expand_downwards(bprm->vma, pos);
58c5fc13
MT
43562- if (ret < 0)
43563- return NULL;
43564- }
43565-#endif
43566- ret = get_user_pages(current, bprm->mm, pos,
43567- 1, write, 1, &page, NULL);
43568- if (ret <= 0)
15a11c5b 43569+ if (0 > expand_downwards(bprm->vma, pos))
58c5fc13
MT
43570+ return NULL;
43571+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
43572 return NULL;
43573
43574 if (write) {
fe2de317 43575@@ -274,6 +278,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
58c5fc13
MT
43576 vma->vm_end = STACK_TOP_MAX;
43577 vma->vm_start = vma->vm_end - PAGE_SIZE;
57199397 43578 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
58c5fc13
MT
43579+
43580+#ifdef CONFIG_PAX_SEGMEXEC
43581+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
43582+#endif
43583+
43584 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
df50ba0c 43585 INIT_LIST_HEAD(&vma->anon_vma_chain);
bc901d79 43586
fe2de317 43587@@ -288,6 +297,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
58c5fc13
MT
43588 mm->stack_vm = mm->total_vm = 1;
43589 up_write(&mm->mmap_sem);
43590 bprm->p = vma->vm_end - sizeof(void *);
43591+
43592+#ifdef CONFIG_PAX_RANDUSTACK
43593+ if (randomize_va_space)
43594+ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
43595+#endif
43596+
43597 return 0;
43598 err:
43599 up_write(&mm->mmap_sem);
6e9df6a3 43600@@ -396,19 +411,7 @@ err:
15a11c5b
MT
43601 return err;
43602 }
43603
43604-struct user_arg_ptr {
43605-#ifdef CONFIG_COMPAT
43606- bool is_compat;
43607-#endif
43608- union {
43609- const char __user *const __user *native;
43610-#ifdef CONFIG_COMPAT
43611- compat_uptr_t __user *compat;
43612-#endif
43613- } ptr;
43614-};
43615-
43616-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43617+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43618 {
43619 const char __user *native;
43620
fe2de317 43621@@ -417,14 +420,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
6e9df6a3
MT
43622 compat_uptr_t compat;
43623
43624 if (get_user(compat, argv.ptr.compat + nr))
43625- return ERR_PTR(-EFAULT);
43626+ return (const char __force_user *)ERR_PTR(-EFAULT);
43627
43628 return compat_ptr(compat);
43629 }
43630 #endif
43631
43632 if (get_user(native, argv.ptr.native + nr))
43633- return ERR_PTR(-EFAULT);
43634+ return (const char __force_user *)ERR_PTR(-EFAULT);
43635
43636 return native;
43637 }
fe2de317 43638@@ -443,7 +446,7 @@ static int count(struct user_arg_ptr argv, int max)
6e9df6a3
MT
43639 if (!p)
43640 break;
43641
43642- if (IS_ERR(p))
43643+ if (IS_ERR((const char __force_kernel *)p))
43644 return -EFAULT;
43645
43646 if (i++ >= max)
fe2de317 43647@@ -477,7 +480,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
6e9df6a3
MT
43648
43649 ret = -EFAULT;
43650 str = get_user_arg_ptr(argv, argc);
43651- if (IS_ERR(str))
43652+ if (IS_ERR((const char __force_kernel *)str))
43653 goto out;
43654
43655 len = strnlen_user(str, MAX_ARG_STRLEN);
fe2de317 43656@@ -559,7 +562,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
ae4e228f
MT
43657 int r;
43658 mm_segment_t oldfs = get_fs();
15a11c5b
MT
43659 struct user_arg_ptr argv = {
43660- .ptr.native = (const char __user *const __user *)__argv,
6e9df6a3 43661+ .ptr.native = (const char __force_user *const __force_user *)__argv,
15a11c5b
MT
43662 };
43663
ae4e228f 43664 set_fs(KERNEL_DS);
fe2de317 43665@@ -594,7 +597,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
58c5fc13 43666 unsigned long new_end = old_end - shift;
15a11c5b 43667 struct mmu_gather tlb;
58c5fc13
MT
43668
43669- BUG_ON(new_start > new_end);
43670+ if (new_start >= new_end || new_start < mmap_min_addr)
bc901d79 43671+ return -ENOMEM;
58c5fc13
MT
43672
43673 /*
43674 * ensure there are no vmas between where we want to go
fe2de317 43675@@ -603,6 +607,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
58c5fc13
MT
43676 if (vma != find_vma(mm, new_start))
43677 return -EFAULT;
43678
43679+#ifdef CONFIG_PAX_SEGMEXEC
43680+ BUG_ON(pax_find_mirror_vma(vma));
43681+#endif
43682+
43683 /*
43684 * cover the whole range: [new_start, old_end)
43685 */
fe2de317 43686@@ -683,10 +691,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
bc901d79
MT
43687 stack_top = arch_align_stack(stack_top);
43688 stack_top = PAGE_ALIGN(stack_top);
43689
43690- if (unlikely(stack_top < mmap_min_addr) ||
43691- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
43692- return -ENOMEM;
43693-
43694 stack_shift = vma->vm_end - stack_top;
43695
43696 bprm->p -= stack_shift;
fe2de317 43697@@ -698,8 +702,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
58c5fc13
MT
43698 bprm->exec -= stack_shift;
43699
43700 down_write(&mm->mmap_sem);
43701+
43702+ /* Move stack pages down in memory. */
43703+ if (stack_shift) {
43704+ ret = shift_arg_pages(vma, stack_shift);
43705+ if (ret)
43706+ goto out_unlock;
43707+ }
43708+
43709 vm_flags = VM_STACK_FLAGS;
43710
58c5fc13
MT
43711+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
43712+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
43713+ vm_flags &= ~VM_EXEC;
43714+
43715+#ifdef CONFIG_PAX_MPROTECT
43716+ if (mm->pax_flags & MF_PAX_MPROTECT)
43717+ vm_flags &= ~VM_MAYEXEC;
43718+#endif
43719+
43720+ }
43721+#endif
43722+
ae4e228f
MT
43723 /*
43724 * Adjust stack execute permissions; explicitly enable for
43725 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
fe2de317 43726@@ -718,13 +742,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
58c5fc13
MT
43727 goto out_unlock;
43728 BUG_ON(prev != vma);
43729
43730- /* Move stack pages down in memory. */
43731- if (stack_shift) {
43732- ret = shift_arg_pages(vma, stack_shift);
ae4e228f
MT
43733- if (ret)
43734- goto out_unlock;
58c5fc13
MT
43735- }
43736-
57199397
MT
43737 /* mprotect_fixup is overkill to remove the temporary stack flags */
43738 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
43739
fe2de317 43740@@ -805,7 +822,7 @@ int kernel_read(struct file *file, loff_t offset,
ae4e228f
MT
43741 old_fs = get_fs();
43742 set_fs(get_ds());
43743 /* The cast to a user pointer is valid due to the set_fs() */
43744- result = vfs_read(file, (void __user *)addr, count, &pos);
6e9df6a3 43745+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
ae4e228f
MT
43746 set_fs(old_fs);
43747 return result;
43748 }
fe2de317 43749@@ -1251,7 +1268,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
58c5fc13
MT
43750 }
43751 rcu_read_unlock();
43752
43753- if (p->fs->users > n_fs) {
43754+ if (atomic_read(&p->fs->users) > n_fs) {
43755 bprm->unsafe |= LSM_UNSAFE_SHARE;
43756 } else {
43757 res = -EAGAIN;
fe2de317 43758@@ -1454,6 +1471,11 @@ static int do_execve_common(const char *filename,
15a11c5b
MT
43759 struct user_arg_ptr envp,
43760 struct pt_regs *regs)
58c5fc13
MT
43761 {
43762+#ifdef CONFIG_GRKERNSEC
43763+ struct file *old_exec_file;
43764+ struct acl_subject_label *old_acl;
43765+ struct rlimit old_rlim[RLIM_NLIMITS];
43766+#endif
43767 struct linux_binprm *bprm;
43768 struct file *file;
43769 struct files_struct *displaced;
fe2de317 43770@@ -1461,6 +1483,8 @@ static int do_execve_common(const char *filename,
15a11c5b 43771 int retval;
6e9df6a3
MT
43772 const struct cred *cred = current_cred();
43773
15a11c5b
MT
43774+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
43775+
6e9df6a3
MT
43776 /*
43777 * We move the actual failure in case of RLIMIT_NPROC excess from
43778 * set*uid() to execve() because too many poorly written programs
fe2de317 43779@@ -1507,6 +1531,16 @@ static int do_execve_common(const char *filename,
58c5fc13
MT
43780 bprm->filename = filename;
43781 bprm->interp = filename;
43782
71d190be
MT
43783+ if (gr_process_user_ban()) {
43784+ retval = -EPERM;
43785+ goto out_file;
43786+ }
43787+
58c5fc13
MT
43788+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
43789+ retval = -EACCES;
43790+ goto out_file;
43791+ }
43792+
43793 retval = bprm_mm_init(bprm);
43794 if (retval)
43795 goto out_file;
fe2de317 43796@@ -1536,9 +1570,40 @@ static int do_execve_common(const char *filename,
58c5fc13
MT
43797 if (retval < 0)
43798 goto out;
43799
43800+ if (!gr_tpe_allow(file)) {
43801+ retval = -EACCES;
43802+ goto out;
43803+ }
43804+
43805+ if (gr_check_crash_exec(file)) {
43806+ retval = -EACCES;
43807+ goto out;
43808+ }
43809+
43810+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
43811+
43812+ gr_handle_exec_args(bprm, argv);
43813+
43814+#ifdef CONFIG_GRKERNSEC
43815+ old_acl = current->acl;
43816+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
43817+ old_exec_file = current->exec_file;
43818+ get_file(file);
43819+ current->exec_file = file;
43820+#endif
43821+
43822+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
bc901d79 43823+ bprm->unsafe & LSM_UNSAFE_SHARE);
58c5fc13
MT
43824+ if (retval < 0)
43825+ goto out_fail;
43826+
58c5fc13
MT
43827 retval = search_binary_handler(bprm,regs);
43828 if (retval < 0)
43829- goto out;
43830+ goto out_fail;
43831+#ifdef CONFIG_GRKERNSEC
43832+ if (old_exec_file)
43833+ fput(old_exec_file);
43834+#endif
43835
df50ba0c
MT
43836 /* execve succeeded */
43837 current->fs->in_exec = 0;
fe2de317 43838@@ -1549,6 +1614,14 @@ static int do_execve_common(const char *filename,
58c5fc13
MT
43839 put_files_struct(displaced);
43840 return retval;
43841
43842+out_fail:
43843+#ifdef CONFIG_GRKERNSEC
43844+ current->acl = old_acl;
43845+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
43846+ fput(current->exec_file);
43847+ current->exec_file = old_exec_file;
43848+#endif
43849+
43850 out:
bc901d79
MT
43851 if (bprm->mm) {
43852 acct_arg_size(bprm, 0);
fe2de317 43853@@ -1622,7 +1695,7 @@ static int expand_corename(struct core_name *cn)
8308f9c9
MT
43854 {
43855 char *old_corename = cn->corename;
43856
43857- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
43858+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
43859 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
43860
43861 if (!cn->corename) {
fe2de317 43862@@ -1719,7 +1792,7 @@ static int format_corename(struct core_name *cn, long signr)
8308f9c9
MT
43863 int pid_in_pattern = 0;
43864 int err = 0;
43865
43866- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
43867+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
43868 cn->corename = kmalloc(cn->size, GFP_KERNEL);
43869 cn->used = 0;
43870
6e9df6a3 43871@@ -1816,6 +1889,218 @@ out:
58c5fc13
MT
43872 return ispipe;
43873 }
43874
43875+int pax_check_flags(unsigned long *flags)
43876+{
43877+ int retval = 0;
43878+
43879+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
43880+ if (*flags & MF_PAX_SEGMEXEC)
43881+ {
43882+ *flags &= ~MF_PAX_SEGMEXEC;
43883+ retval = -EINVAL;
43884+ }
43885+#endif
43886+
43887+ if ((*flags & MF_PAX_PAGEEXEC)
43888+
43889+#ifdef CONFIG_PAX_PAGEEXEC
43890+ && (*flags & MF_PAX_SEGMEXEC)
43891+#endif
43892+
43893+ )
43894+ {
43895+ *flags &= ~MF_PAX_PAGEEXEC;
43896+ retval = -EINVAL;
43897+ }
43898+
43899+ if ((*flags & MF_PAX_MPROTECT)
43900+
43901+#ifdef CONFIG_PAX_MPROTECT
43902+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
43903+#endif
43904+
43905+ )
43906+ {
43907+ *flags &= ~MF_PAX_MPROTECT;
43908+ retval = -EINVAL;
43909+ }
43910+
43911+ if ((*flags & MF_PAX_EMUTRAMP)
43912+
43913+#ifdef CONFIG_PAX_EMUTRAMP
43914+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
43915+#endif
43916+
43917+ )
43918+ {
43919+ *flags &= ~MF_PAX_EMUTRAMP;
43920+ retval = -EINVAL;
43921+ }
43922+
43923+ return retval;
43924+}
43925+
43926+EXPORT_SYMBOL(pax_check_flags);
43927+
43928+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
43929+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
43930+{
43931+ struct task_struct *tsk = current;
43932+ struct mm_struct *mm = current->mm;
43933+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
43934+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
43935+ char *path_exec = NULL;
43936+ char *path_fault = NULL;
43937+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
43938+
43939+ if (buffer_exec && buffer_fault) {
43940+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
43941+
43942+ down_read(&mm->mmap_sem);
43943+ vma = mm->mmap;
43944+ while (vma && (!vma_exec || !vma_fault)) {
43945+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
43946+ vma_exec = vma;
43947+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
43948+ vma_fault = vma;
43949+ vma = vma->vm_next;
43950+ }
43951+ if (vma_exec) {
43952+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
43953+ if (IS_ERR(path_exec))
43954+ path_exec = "<path too long>";
43955+ else {
43956+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
43957+ if (path_exec) {
43958+ *path_exec = 0;
43959+ path_exec = buffer_exec;
43960+ } else
43961+ path_exec = "<path too long>";
43962+ }
43963+ }
43964+ if (vma_fault) {
43965+ start = vma_fault->vm_start;
43966+ end = vma_fault->vm_end;
43967+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
43968+ if (vma_fault->vm_file) {
43969+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
43970+ if (IS_ERR(path_fault))
43971+ path_fault = "<path too long>";
43972+ else {
43973+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
43974+ if (path_fault) {
43975+ *path_fault = 0;
43976+ path_fault = buffer_fault;
43977+ } else
43978+ path_fault = "<path too long>";
43979+ }
43980+ } else
43981+ path_fault = "<anonymous mapping>";
43982+ }
43983+ up_read(&mm->mmap_sem);
43984+ }
43985+ if (tsk->signal->curr_ip)
ae4e228f 43986+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
58c5fc13
MT
43987+ else
43988+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
43989+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
43990+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
43991+ task_uid(tsk), task_euid(tsk), pc, sp);
43992+ free_page((unsigned long)buffer_exec);
43993+ free_page((unsigned long)buffer_fault);
6e9df6a3 43994+ pax_report_insns(regs, pc, sp);
58c5fc13
MT
43995+ do_coredump(SIGKILL, SIGKILL, regs);
43996+}
43997+#endif
43998+
43999+#ifdef CONFIG_PAX_REFCOUNT
44000+void pax_report_refcount_overflow(struct pt_regs *regs)
44001+{
44002+ if (current->signal->curr_ip)
ae4e228f
MT
44003+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
44004+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
58c5fc13
MT
44005+ else
44006+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
44007+ current->comm, task_pid_nr(current), current_uid(), current_euid());
44008+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
44009+ show_regs(regs);
ae4e228f 44010+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
58c5fc13
MT
44011+}
44012+#endif
44013+
44014+#ifdef CONFIG_PAX_USERCOPY
6892158b
MT
44015+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
44016+int object_is_on_stack(const void *obj, unsigned long len)
44017+{
44018+ const void * const stack = task_stack_page(current);
44019+ const void * const stackend = stack + THREAD_SIZE;
44020+
57199397 44021+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
6892158b
MT
44022+ const void *frame = NULL;
44023+ const void *oldframe;
57199397
MT
44024+#endif
44025+
6892158b
MT
44026+ if (obj + len < obj)
44027+ return -1;
57199397 44028+
6892158b
MT
44029+ if (obj + len <= stack || stackend <= obj)
44030+ return 0;
57199397 44031+
6892158b 44032+ if (obj < stack || stackend < obj + len)
57199397
MT
44033+ return -1;
44034+
57199397 44035+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
6892158b
MT
44036+ oldframe = __builtin_frame_address(1);
44037+ if (oldframe)
44038+ frame = __builtin_frame_address(2);
44039+ /*
44040+ low ----------------------------------------------> high
44041+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
44042+ ^----------------^
44043+ allow copies only within here
44044+ */
44045+ while (stack <= frame && frame < stackend) {
44046+ /* if obj + len extends past the last frame, this
44047+ check won't pass and the next frame will be 0,
44048+ causing us to bail out and correctly report
44049+ the copy as invalid
57199397 44050+ */
6892158b
MT
44051+ if (obj + len <= frame)
44052+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
44053+ oldframe = frame;
44054+ frame = *(const void * const *)frame;
57199397 44055+ }
57199397 44056+ return -1;
6892158b
MT
44057+#else
44058+ return 1;
44059+#endif
57199397
MT
44060+}
44061+
15a11c5b 44062+NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
58c5fc13 44063+{
ae4e228f 44064+ if (current->signal->curr_ip)
71d190be
MT
44065+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
44066+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
ae4e228f 44067+ else
71d190be
MT
44068+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
44069+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
58c5fc13 44070+ dump_stack();
71d190be 44071+ gr_handle_kernel_exploit();
58c5fc13
MT
44072+ do_group_exit(SIGKILL);
44073+}
44074+#endif
15a11c5b
MT
44075+
44076+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
44077+void pax_track_stack(void)
44078+{
44079+ unsigned long sp = (unsigned long)&sp;
44080+ if (sp < current_thread_info()->lowest_stack &&
44081+ sp > (unsigned long)task_stack_page(current))
44082+ current_thread_info()->lowest_stack = sp;
44083+}
44084+EXPORT_SYMBOL(pax_track_stack);
44085+#endif
58c5fc13 44086+
df50ba0c 44087 static int zap_process(struct task_struct *start, int exit_code)
58c5fc13
MT
44088 {
44089 struct task_struct *t;
fe2de317 44090@@ -2027,17 +2312,17 @@ static void wait_for_dump_helpers(struct file *file)
ae4e228f
MT
44091 pipe = file->f_path.dentry->d_inode->i_pipe;
44092
44093 pipe_lock(pipe);
44094- pipe->readers++;
44095- pipe->writers--;
44096+ atomic_inc(&pipe->readers);
44097+ atomic_dec(&pipe->writers);
44098
44099- while ((pipe->readers > 1) && (!signal_pending(current))) {
44100+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
44101 wake_up_interruptible_sync(&pipe->wait);
44102 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
44103 pipe_wait(pipe);
44104 }
44105
44106- pipe->readers--;
44107- pipe->writers++;
44108+ atomic_dec(&pipe->readers);
44109+ atomic_inc(&pipe->writers);
44110 pipe_unlock(pipe);
44111
44112 }
fe2de317 44113@@ -2098,7 +2383,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
8308f9c9
MT
44114 int retval = 0;
44115 int flag = 0;
44116 int ispipe;
44117- static atomic_t core_dump_count = ATOMIC_INIT(0);
44118+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
44119 struct coredump_params cprm = {
44120 .signr = signr,
44121 .regs = regs,
fe2de317 44122@@ -2113,6 +2398,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
71d190be
MT
44123
44124 audit_core_dumps(signr);
44125
44126+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
44127+ gr_handle_brute_attach(current, cprm.mm_flags);
44128+
44129 binfmt = mm->binfmt;
44130 if (!binfmt || !binfmt->core_dump)
44131 goto fail;
fe2de317 44132@@ -2180,7 +2468,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
8308f9c9
MT
44133 }
44134 cprm.limit = RLIM_INFINITY;
44135
44136- dump_count = atomic_inc_return(&core_dump_count);
44137+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
44138 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
44139 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
44140 task_tgid_vnr(current), current->comm);
fe2de317 44141@@ -2207,6 +2495,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
6e9df6a3
MT
44142 } else {
44143 struct inode *inode;
44144
44145+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
44146+
44147 if (cprm.limit < binfmt->min_coredump)
44148 goto fail_unlock;
44149
44150@@ -2250,7 +2540,7 @@ close_fail:
8308f9c9
MT
44151 filp_close(cprm.file, NULL);
44152 fail_dropcount:
44153 if (ispipe)
44154- atomic_dec(&core_dump_count);
44155+ atomic_dec_unchecked(&core_dump_count);
44156 fail_unlock:
44157 kfree(cn.corename);
44158 fail_corename:
6e9df6a3
MT
44159@@ -2269,7 +2559,7 @@ fail:
44160 */
44161 int dump_write(struct file *file, const void *addr, int nr)
44162 {
44163- return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
44164+ return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
44165 }
44166 EXPORT_SYMBOL(dump_write);
44167
fe2de317
MT
44168diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
44169index 8f44cef..cb07120 100644
44170--- a/fs/ext2/balloc.c
44171+++ b/fs/ext2/balloc.c
44172@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
58c5fc13
MT
44173
44174 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
44175 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
44176- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
44177+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
44178 sbi->s_resuid != current_fsuid() &&
44179 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
44180 return 0;
fe2de317
MT
44181diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
44182index 6386d76..0a266b1 100644
44183--- a/fs/ext3/balloc.c
44184+++ b/fs/ext3/balloc.c
44185@@ -1446,7 +1446,7 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi)
58c5fc13
MT
44186
44187 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
44188 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
44189- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
44190+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
44191 sbi->s_resuid != current_fsuid() &&
44192 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
44193 return 0;
fe2de317
MT
44194diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
44195index f8224ad..fbef97c 100644
44196--- a/fs/ext4/balloc.c
44197+++ b/fs/ext4/balloc.c
44198@@ -394,8 +394,8 @@ static int ext4_has_free_blocks(struct ext4_sb_info *sbi,
58c5fc13
MT
44199 /* Hm, nope. Are (enough) root reserved blocks available? */
44200 if (sbi->s_resuid == current_fsuid() ||
44201 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
15a11c5b
MT
44202- capable(CAP_SYS_RESOURCE) ||
44203- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
44204+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
44205+ capable_nolog(CAP_SYS_RESOURCE)) {
44206
58c5fc13
MT
44207 if (free_blocks >= (nblocks + dirty_blocks))
44208 return 1;
fe2de317
MT
44209diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
44210index 5c38120..2291d18 100644
44211--- a/fs/ext4/ext4.h
44212+++ b/fs/ext4/ext4.h
6e9df6a3 44213@@ -1180,19 +1180,19 @@ struct ext4_sb_info {
bc901d79
MT
44214 unsigned long s_mb_last_start;
44215
44216 /* stats for buddy allocator */
44217- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
44218- atomic_t s_bal_success; /* we found long enough chunks */
44219- atomic_t s_bal_allocated; /* in blocks */
44220- atomic_t s_bal_ex_scanned; /* total extents scanned */
44221- atomic_t s_bal_goals; /* goal hits */
44222- atomic_t s_bal_breaks; /* too long searches */
44223- atomic_t s_bal_2orders; /* 2^order hits */
44224+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
44225+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
44226+ atomic_unchecked_t s_bal_allocated; /* in blocks */
44227+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
44228+ atomic_unchecked_t s_bal_goals; /* goal hits */
44229+ atomic_unchecked_t s_bal_breaks; /* too long searches */
44230+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
44231 spinlock_t s_bal_lock;
44232 unsigned long s_mb_buddies_generated;
44233 unsigned long long s_mb_generation_time;
44234- atomic_t s_mb_lost_chunks;
44235- atomic_t s_mb_preallocated;
44236- atomic_t s_mb_discarded;
44237+ atomic_unchecked_t s_mb_lost_chunks;
44238+ atomic_unchecked_t s_mb_preallocated;
44239+ atomic_unchecked_t s_mb_discarded;
44240 atomic_t s_lock_busy;
44241
44242 /* locality groups */
fe2de317
MT
44243diff --git a/fs/ext4/file.c b/fs/ext4/file.c
44244index e4095e9..1c006c5 100644
44245--- a/fs/ext4/file.c
44246+++ b/fs/ext4/file.c
44247@@ -181,8 +181,8 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
6e9df6a3
MT
44248 path.dentry = mnt->mnt_root;
44249 cp = d_path(&path, buf, sizeof(buf));
44250 if (!IS_ERR(cp)) {
44251- memcpy(sbi->s_es->s_last_mounted, cp,
44252- sizeof(sbi->s_es->s_last_mounted));
44253+ strlcpy(sbi->s_es->s_last_mounted, cp,
44254+ sizeof(sbi->s_es->s_last_mounted));
44255 ext4_mark_super_dirty(sb);
44256 }
44257 }
fe2de317
MT
44258diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
44259index f18bfe3..43759b1 100644
44260--- a/fs/ext4/ioctl.c
44261+++ b/fs/ext4/ioctl.c
6e9df6a3
MT
44262@@ -348,7 +348,7 @@ mext_out:
44263 if (!blk_queue_discard(q))
44264 return -EOPNOTSUPP;
44265
44266- if (copy_from_user(&range, (struct fstrim_range *)arg,
44267+ if (copy_from_user(&range, (struct fstrim_range __user *)arg,
44268 sizeof(range)))
44269 return -EFAULT;
44270
44271@@ -358,7 +358,7 @@ mext_out:
44272 if (ret < 0)
44273 return ret;
44274
44275- if (copy_to_user((struct fstrim_range *)arg, &range,
44276+ if (copy_to_user((struct fstrim_range __user *)arg, &range,
44277 sizeof(range)))
44278 return -EFAULT;
44279
fe2de317
MT
44280diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
44281index 17a5a57..b6be3c5 100644
44282--- a/fs/ext4/mballoc.c
44283+++ b/fs/ext4/mballoc.c
44284@@ -1795,7 +1795,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
bc901d79
MT
44285 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
44286
44287 if (EXT4_SB(sb)->s_mb_stats)
44288- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
44289+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
44290
44291 break;
44292 }
6e9df6a3 44293@@ -2089,7 +2089,7 @@ repeat:
bc901d79
MT
44294 ac->ac_status = AC_STATUS_CONTINUE;
44295 ac->ac_flags |= EXT4_MB_HINT_FIRST;
44296 cr = 3;
44297- atomic_inc(&sbi->s_mb_lost_chunks);
44298+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
44299 goto repeat;
44300 }
44301 }
fe2de317 44302@@ -2132,6 +2132,8 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
66a7e928
MT
44303 ext4_grpblk_t counters[16];
44304 } sg;
44305
44306+ pax_track_stack();
44307+
44308 group--;
44309 if (group == 0)
44310 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
fe2de317 44311@@ -2573,25 +2575,25 @@ int ext4_mb_release(struct super_block *sb)
bc901d79 44312 if (sbi->s_mb_stats) {
6e9df6a3
MT
44313 ext4_msg(sb, KERN_INFO,
44314 "mballoc: %u blocks %u reqs (%u success)",
bc901d79
MT
44315- atomic_read(&sbi->s_bal_allocated),
44316- atomic_read(&sbi->s_bal_reqs),
44317- atomic_read(&sbi->s_bal_success));
44318+ atomic_read_unchecked(&sbi->s_bal_allocated),
44319+ atomic_read_unchecked(&sbi->s_bal_reqs),
44320+ atomic_read_unchecked(&sbi->s_bal_success));
6e9df6a3
MT
44321 ext4_msg(sb, KERN_INFO,
44322 "mballoc: %u extents scanned, %u goal hits, "
44323 "%u 2^N hits, %u breaks, %u lost",
bc901d79
MT
44324- atomic_read(&sbi->s_bal_ex_scanned),
44325- atomic_read(&sbi->s_bal_goals),
44326- atomic_read(&sbi->s_bal_2orders),
44327- atomic_read(&sbi->s_bal_breaks),
44328- atomic_read(&sbi->s_mb_lost_chunks));
44329+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
44330+ atomic_read_unchecked(&sbi->s_bal_goals),
44331+ atomic_read_unchecked(&sbi->s_bal_2orders),
44332+ atomic_read_unchecked(&sbi->s_bal_breaks),
44333+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
6e9df6a3
MT
44334 ext4_msg(sb, KERN_INFO,
44335 "mballoc: %lu generated and it took %Lu",
44336 sbi->s_mb_buddies_generated,
bc901d79 44337 sbi->s_mb_generation_time);
6e9df6a3
MT
44338 ext4_msg(sb, KERN_INFO,
44339 "mballoc: %u preallocated, %u discarded",
bc901d79
MT
44340- atomic_read(&sbi->s_mb_preallocated),
44341- atomic_read(&sbi->s_mb_discarded));
44342+ atomic_read_unchecked(&sbi->s_mb_preallocated),
44343+ atomic_read_unchecked(&sbi->s_mb_discarded));
44344 }
44345
44346 free_percpu(sbi->s_locality_groups);
fe2de317 44347@@ -3070,16 +3072,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
bc901d79
MT
44348 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
44349
44350 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
44351- atomic_inc(&sbi->s_bal_reqs);
44352- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
44353+ atomic_inc_unchecked(&sbi->s_bal_reqs);
44354+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
44355 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
44356- atomic_inc(&sbi->s_bal_success);
44357- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
44358+ atomic_inc_unchecked(&sbi->s_bal_success);
44359+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
44360 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
44361 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
44362- atomic_inc(&sbi->s_bal_goals);
44363+ atomic_inc_unchecked(&sbi->s_bal_goals);
44364 if (ac->ac_found > sbi->s_mb_max_to_scan)
44365- atomic_inc(&sbi->s_bal_breaks);
44366+ atomic_inc_unchecked(&sbi->s_bal_breaks);
44367 }
44368
44369 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
fe2de317 44370@@ -3477,7 +3479,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
bc901d79
MT
44371 trace_ext4_mb_new_inode_pa(ac, pa);
44372
44373 ext4_mb_use_inode_pa(ac, pa);
44374- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
44375+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
44376
44377 ei = EXT4_I(ac->ac_inode);
44378 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
fe2de317 44379@@ -3537,7 +3539,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
bc901d79
MT
44380 trace_ext4_mb_new_group_pa(ac, pa);
44381
44382 ext4_mb_use_group_pa(ac, pa);
44383- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
44384+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
44385
44386 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
44387 lg = ac->ac_lg;
fe2de317 44388@@ -3625,7 +3627,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
bc901d79
MT
44389 * from the bitmap and continue.
44390 */
44391 }
44392- atomic_add(free, &sbi->s_mb_discarded);
44393+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
44394
44395 return err;
44396 }
fe2de317 44397@@ -3643,7 +3645,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
bc901d79
MT
44398 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
44399 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
44400 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
44401- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
44402+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
44403 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
44404
44405 return 0;
fe2de317
MT
44406diff --git a/fs/fcntl.c b/fs/fcntl.c
44407index 22764c7..86372c9 100644
44408--- a/fs/fcntl.c
44409+++ b/fs/fcntl.c
44410@@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
57199397
MT
44411 if (err)
44412 return err;
44413
44414+ if (gr_handle_chroot_fowner(pid, type))
44415+ return -ENOENT;
44416+ if (gr_check_protected_task_fowner(pid, type))
44417+ return -EACCES;
44418+
44419 f_modown(filp, pid, type, force);
44420 return 0;
44421 }
6e9df6a3
MT
44422@@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
44423
44424 static int f_setown_ex(struct file *filp, unsigned long arg)
44425 {
44426- struct f_owner_ex * __user owner_p = (void * __user)arg;
44427+ struct f_owner_ex __user *owner_p = (void __user *)arg;
44428 struct f_owner_ex owner;
44429 struct pid *pid;
44430 int type;
fe2de317 44431@@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
6e9df6a3
MT
44432
44433 static int f_getown_ex(struct file *filp, unsigned long arg)
44434 {
44435- struct f_owner_ex * __user owner_p = (void * __user)arg;
44436+ struct f_owner_ex __user *owner_p = (void __user *)arg;
44437 struct f_owner_ex owner;
44438 int ret = 0;
44439
fe2de317 44440@@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
58c5fc13
MT
44441 switch (cmd) {
44442 case F_DUPFD:
44443 case F_DUPFD_CLOEXEC:
44444+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
df50ba0c 44445 if (arg >= rlimit(RLIMIT_NOFILE))
58c5fc13
MT
44446 break;
44447 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
fe2de317
MT
44448diff --git a/fs/fifo.c b/fs/fifo.c
44449index b1a524d..4ee270e 100644
44450--- a/fs/fifo.c
44451+++ b/fs/fifo.c
44452@@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
ae4e228f
MT
44453 */
44454 filp->f_op = &read_pipefifo_fops;
44455 pipe->r_counter++;
44456- if (pipe->readers++ == 0)
44457+ if (atomic_inc_return(&pipe->readers) == 1)
44458 wake_up_partner(inode);
44459
44460- if (!pipe->writers) {
44461+ if (!atomic_read(&pipe->writers)) {
44462 if ((filp->f_flags & O_NONBLOCK)) {
44463 /* suppress POLLHUP until we have
44464 * seen a writer */
fe2de317 44465@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
ae4e228f
MT
44466 * errno=ENXIO when there is no process reading the FIFO.
44467 */
44468 ret = -ENXIO;
44469- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
44470+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
44471 goto err;
44472
44473 filp->f_op = &write_pipefifo_fops;
44474 pipe->w_counter++;
44475- if (!pipe->writers++)
44476+ if (atomic_inc_return(&pipe->writers) == 1)
44477 wake_up_partner(inode);
44478
44479- if (!pipe->readers) {
44480+ if (!atomic_read(&pipe->readers)) {
44481 wait_for_partner(inode, &pipe->r_counter);
44482 if (signal_pending(current))
44483 goto err_wr;
fe2de317 44484@@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
ae4e228f
MT
44485 */
44486 filp->f_op = &rdwr_pipefifo_fops;
44487
44488- pipe->readers++;
44489- pipe->writers++;
44490+ atomic_inc(&pipe->readers);
44491+ atomic_inc(&pipe->writers);
44492 pipe->r_counter++;
44493 pipe->w_counter++;
44494- if (pipe->readers == 1 || pipe->writers == 1)
44495+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
44496 wake_up_partner(inode);
44497 break;
44498
fe2de317 44499@@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
ae4e228f
MT
44500 return 0;
44501
44502 err_rd:
44503- if (!--pipe->readers)
44504+ if (atomic_dec_and_test(&pipe->readers))
44505 wake_up_interruptible(&pipe->wait);
44506 ret = -ERESTARTSYS;
44507 goto err;
44508
44509 err_wr:
44510- if (!--pipe->writers)
44511+ if (atomic_dec_and_test(&pipe->writers))
44512 wake_up_interruptible(&pipe->wait);
44513 ret = -ERESTARTSYS;
44514 goto err;
44515
44516 err:
44517- if (!pipe->readers && !pipe->writers)
44518+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
44519 free_pipe_info(inode);
44520
44521 err_nocleanup:
fe2de317
MT
44522diff --git a/fs/file.c b/fs/file.c
44523index 4c6992d..104cdea 100644
44524--- a/fs/file.c
44525+++ b/fs/file.c
66a7e928 44526@@ -15,6 +15,7 @@
58c5fc13
MT
44527 #include <linux/slab.h>
44528 #include <linux/vmalloc.h>
44529 #include <linux/file.h>
44530+#include <linux/security.h>
44531 #include <linux/fdtable.h>
44532 #include <linux/bitops.h>
44533 #include <linux/interrupt.h>
fe2de317 44534@@ -254,6 +255,7 @@ int expand_files(struct files_struct *files, int nr)
58c5fc13
MT
44535 * N.B. For clone tasks sharing a files structure, this test
44536 * will limit the total number of files that can be opened.
44537 */
58c5fc13 44538+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
df50ba0c 44539 if (nr >= rlimit(RLIMIT_NOFILE))
58c5fc13
MT
44540 return -EMFILE;
44541
fe2de317
MT
44542diff --git a/fs/filesystems.c b/fs/filesystems.c
44543index 0845f84..7b4ebef 100644
44544--- a/fs/filesystems.c
44545+++ b/fs/filesystems.c
44546@@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(const char *name)
71d190be
MT
44547 int len = dot ? dot - name : strlen(name);
44548
44549 fs = __get_fs_type(name, len);
44550+
44551+#ifdef CONFIG_GRKERNSEC_MODHARDEN
44552+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
44553+#else
44554 if (!fs && (request_module("%.*s", len, name) == 0))
44555+#endif
44556 fs = __get_fs_type(name, len);
44557
44558 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
fe2de317
MT
44559diff --git a/fs/fs_struct.c b/fs/fs_struct.c
44560index 78b519c..212c0d0 100644
44561--- a/fs/fs_struct.c
44562+++ b/fs/fs_struct.c
44563@@ -4,6 +4,7 @@
44564 #include <linux/path.h>
44565 #include <linux/slab.h>
44566 #include <linux/fs_struct.h>
44567+#include <linux/grsecurity.h>
44568 #include "internal.h"
44569
44570 static inline void path_get_longterm(struct path *path)
44571@@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
44572 old_root = fs->root;
44573 fs->root = *path;
44574 path_get_longterm(path);
44575+ gr_set_chroot_entries(current, path);
44576 write_seqcount_end(&fs->seq);
44577 spin_unlock(&fs->lock);
44578 if (old_root.dentry)
44579@@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
44580 && fs->root.mnt == old_root->mnt) {
44581 path_get_longterm(new_root);
44582 fs->root = *new_root;
44583+ gr_set_chroot_entries(p, new_root);
44584 count++;
44585 }
44586 if (fs->pwd.dentry == old_root->dentry
44587@@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
44588 spin_lock(&fs->lock);
44589 write_seqcount_begin(&fs->seq);
44590 tsk->fs = NULL;
44591- kill = !--fs->users;
44592+ gr_clear_chroot_entries(tsk);
44593+ kill = !atomic_dec_return(&fs->users);
44594 write_seqcount_end(&fs->seq);
44595 spin_unlock(&fs->lock);
44596 task_unlock(tsk);
44597@@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
44598 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
44599 /* We don't need to lock fs - think why ;-) */
44600 if (fs) {
44601- fs->users = 1;
44602+ atomic_set(&fs->users, 1);
44603 fs->in_exec = 0;
44604 spin_lock_init(&fs->lock);
44605 seqcount_init(&fs->seq);
44606@@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
44607 spin_lock(&old->lock);
44608 fs->root = old->root;
44609 path_get_longterm(&fs->root);
44610+ /* instead of calling gr_set_chroot_entries here,
44611+ we call it from every caller of this function
44612+ */
44613 fs->pwd = old->pwd;
44614 path_get_longterm(&fs->pwd);
44615 spin_unlock(&old->lock);
44616@@ -150,8 +157,9 @@ int unshare_fs_struct(void)
44617
44618 task_lock(current);
44619 spin_lock(&fs->lock);
44620- kill = !--fs->users;
44621+ kill = !atomic_dec_return(&fs->users);
44622 current->fs = new_fs;
44623+ gr_set_chroot_entries(current, &new_fs->root);
44624 spin_unlock(&fs->lock);
44625 task_unlock(current);
44626
44627@@ -170,7 +178,7 @@ EXPORT_SYMBOL(current_umask);
44628
44629 /* to be mentioned only in INIT_TASK */
44630 struct fs_struct init_fs = {
44631- .users = 1,
44632+ .users = ATOMIC_INIT(1),
44633 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
44634 .seq = SEQCNT_ZERO,
44635 .umask = 0022,
44636@@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
44637 task_lock(current);
44638
44639 spin_lock(&init_fs.lock);
44640- init_fs.users++;
44641+ atomic_inc(&init_fs.users);
44642 spin_unlock(&init_fs.lock);
44643
44644 spin_lock(&fs->lock);
44645 current->fs = &init_fs;
44646- kill = !--fs->users;
44647+ gr_set_chroot_entries(current, &current->fs->root);
44648+ kill = !atomic_dec_return(&fs->users);
44649 spin_unlock(&fs->lock);
44650
44651 task_unlock(current);
44652diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
44653index 9905350..02eaec4 100644
44654--- a/fs/fscache/cookie.c
44655+++ b/fs/fscache/cookie.c
44656@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
8308f9c9
MT
44657 parent ? (char *) parent->def->name : "<no-parent>",
44658 def->name, netfs_data);
44659
44660- fscache_stat(&fscache_n_acquires);
44661+ fscache_stat_unchecked(&fscache_n_acquires);
44662
44663 /* if there's no parent cookie, then we don't create one here either */
44664 if (!parent) {
44665- fscache_stat(&fscache_n_acquires_null);
44666+ fscache_stat_unchecked(&fscache_n_acquires_null);
44667 _leave(" [no parent]");
44668 return NULL;
44669 }
fe2de317 44670@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
8308f9c9
MT
44671 /* allocate and initialise a cookie */
44672 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
44673 if (!cookie) {
44674- fscache_stat(&fscache_n_acquires_oom);
44675+ fscache_stat_unchecked(&fscache_n_acquires_oom);
44676 _leave(" [ENOMEM]");
44677 return NULL;
44678 }
fe2de317 44679@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
8308f9c9
MT
44680
44681 switch (cookie->def->type) {
44682 case FSCACHE_COOKIE_TYPE_INDEX:
44683- fscache_stat(&fscache_n_cookie_index);
44684+ fscache_stat_unchecked(&fscache_n_cookie_index);
44685 break;
44686 case FSCACHE_COOKIE_TYPE_DATAFILE:
44687- fscache_stat(&fscache_n_cookie_data);
44688+ fscache_stat_unchecked(&fscache_n_cookie_data);
44689 break;
44690 default:
44691- fscache_stat(&fscache_n_cookie_special);
44692+ fscache_stat_unchecked(&fscache_n_cookie_special);
44693 break;
44694 }
44695
fe2de317 44696@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
8308f9c9
MT
44697 if (fscache_acquire_non_index_cookie(cookie) < 0) {
44698 atomic_dec(&parent->n_children);
44699 __fscache_cookie_put(cookie);
44700- fscache_stat(&fscache_n_acquires_nobufs);
44701+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
44702 _leave(" = NULL");
44703 return NULL;
44704 }
44705 }
44706
44707- fscache_stat(&fscache_n_acquires_ok);
44708+ fscache_stat_unchecked(&fscache_n_acquires_ok);
44709 _leave(" = %p", cookie);
44710 return cookie;
44711 }
fe2de317 44712@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
8308f9c9
MT
44713 cache = fscache_select_cache_for_object(cookie->parent);
44714 if (!cache) {
44715 up_read(&fscache_addremove_sem);
44716- fscache_stat(&fscache_n_acquires_no_cache);
44717+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
44718 _leave(" = -ENOMEDIUM [no cache]");
44719 return -ENOMEDIUM;
44720 }
fe2de317 44721@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
8308f9c9
MT
44722 object = cache->ops->alloc_object(cache, cookie);
44723 fscache_stat_d(&fscache_n_cop_alloc_object);
44724 if (IS_ERR(object)) {
44725- fscache_stat(&fscache_n_object_no_alloc);
44726+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
44727 ret = PTR_ERR(object);
44728 goto error;
44729 }
44730
44731- fscache_stat(&fscache_n_object_alloc);
44732+ fscache_stat_unchecked(&fscache_n_object_alloc);
44733
44734 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
44735
fe2de317 44736@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
8308f9c9
MT
44737 struct fscache_object *object;
44738 struct hlist_node *_p;
44739
44740- fscache_stat(&fscache_n_updates);
44741+ fscache_stat_unchecked(&fscache_n_updates);
44742
44743 if (!cookie) {
44744- fscache_stat(&fscache_n_updates_null);
44745+ fscache_stat_unchecked(&fscache_n_updates_null);
44746 _leave(" [no cookie]");
44747 return;
44748 }
fe2de317 44749@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
8308f9c9
MT
44750 struct fscache_object *object;
44751 unsigned long event;
44752
44753- fscache_stat(&fscache_n_relinquishes);
44754+ fscache_stat_unchecked(&fscache_n_relinquishes);
44755 if (retire)
44756- fscache_stat(&fscache_n_relinquishes_retire);
44757+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
44758
44759 if (!cookie) {
44760- fscache_stat(&fscache_n_relinquishes_null);
44761+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
44762 _leave(" [no cookie]");
44763 return;
44764 }
fe2de317 44765@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
8308f9c9
MT
44766
44767 /* wait for the cookie to finish being instantiated (or to fail) */
44768 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
44769- fscache_stat(&fscache_n_relinquishes_waitcrt);
44770+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
44771 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
44772 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
44773 }
fe2de317
MT
44774diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
44775index f6aad48..88dcf26 100644
44776--- a/fs/fscache/internal.h
44777+++ b/fs/fscache/internal.h
8308f9c9
MT
44778@@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
44779 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
44780 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
44781
44782-extern atomic_t fscache_n_op_pend;
44783-extern atomic_t fscache_n_op_run;
44784-extern atomic_t fscache_n_op_enqueue;
44785-extern atomic_t fscache_n_op_deferred_release;
44786-extern atomic_t fscache_n_op_release;
44787-extern atomic_t fscache_n_op_gc;
44788-extern atomic_t fscache_n_op_cancelled;
44789-extern atomic_t fscache_n_op_rejected;
fe2de317
MT
44790+extern atomic_unchecked_t fscache_n_op_pend;
44791+extern atomic_unchecked_t fscache_n_op_run;
44792+extern atomic_unchecked_t fscache_n_op_enqueue;
44793+extern atomic_unchecked_t fscache_n_op_deferred_release;
44794+extern atomic_unchecked_t fscache_n_op_release;
44795+extern atomic_unchecked_t fscache_n_op_gc;
44796+extern atomic_unchecked_t fscache_n_op_cancelled;
44797+extern atomic_unchecked_t fscache_n_op_rejected;
44798
8308f9c9
MT
44799-extern atomic_t fscache_n_attr_changed;
44800-extern atomic_t fscache_n_attr_changed_ok;
44801-extern atomic_t fscache_n_attr_changed_nobufs;
44802-extern atomic_t fscache_n_attr_changed_nomem;
44803-extern atomic_t fscache_n_attr_changed_calls;
fe2de317
MT
44804+extern atomic_unchecked_t fscache_n_attr_changed;
44805+extern atomic_unchecked_t fscache_n_attr_changed_ok;
44806+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
44807+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
44808+extern atomic_unchecked_t fscache_n_attr_changed_calls;
44809
8308f9c9
MT
44810-extern atomic_t fscache_n_allocs;
44811-extern atomic_t fscache_n_allocs_ok;
44812-extern atomic_t fscache_n_allocs_wait;
44813-extern atomic_t fscache_n_allocs_nobufs;
44814-extern atomic_t fscache_n_allocs_intr;
44815-extern atomic_t fscache_n_allocs_object_dead;
44816-extern atomic_t fscache_n_alloc_ops;
44817-extern atomic_t fscache_n_alloc_op_waits;
fe2de317
MT
44818+extern atomic_unchecked_t fscache_n_allocs;
44819+extern atomic_unchecked_t fscache_n_allocs_ok;
44820+extern atomic_unchecked_t fscache_n_allocs_wait;
44821+extern atomic_unchecked_t fscache_n_allocs_nobufs;
44822+extern atomic_unchecked_t fscache_n_allocs_intr;
44823+extern atomic_unchecked_t fscache_n_allocs_object_dead;
44824+extern atomic_unchecked_t fscache_n_alloc_ops;
44825+extern atomic_unchecked_t fscache_n_alloc_op_waits;
44826
8308f9c9
MT
44827-extern atomic_t fscache_n_retrievals;
44828-extern atomic_t fscache_n_retrievals_ok;
44829-extern atomic_t fscache_n_retrievals_wait;
44830-extern atomic_t fscache_n_retrievals_nodata;
44831-extern atomic_t fscache_n_retrievals_nobufs;
44832-extern atomic_t fscache_n_retrievals_intr;
44833-extern atomic_t fscache_n_retrievals_nomem;
44834-extern atomic_t fscache_n_retrievals_object_dead;
44835-extern atomic_t fscache_n_retrieval_ops;
44836-extern atomic_t fscache_n_retrieval_op_waits;
8308f9c9
MT
44837+extern atomic_unchecked_t fscache_n_retrievals;
44838+extern atomic_unchecked_t fscache_n_retrievals_ok;
44839+extern atomic_unchecked_t fscache_n_retrievals_wait;
44840+extern atomic_unchecked_t fscache_n_retrievals_nodata;
44841+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
44842+extern atomic_unchecked_t fscache_n_retrievals_intr;
44843+extern atomic_unchecked_t fscache_n_retrievals_nomem;
44844+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
44845+extern atomic_unchecked_t fscache_n_retrieval_ops;
44846+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
fe2de317
MT
44847
44848-extern atomic_t fscache_n_stores;
44849-extern atomic_t fscache_n_stores_ok;
44850-extern atomic_t fscache_n_stores_again;
44851-extern atomic_t fscache_n_stores_nobufs;
44852-extern atomic_t fscache_n_stores_oom;
44853-extern atomic_t fscache_n_store_ops;
44854-extern atomic_t fscache_n_store_calls;
44855-extern atomic_t fscache_n_store_pages;
44856-extern atomic_t fscache_n_store_radix_deletes;
44857-extern atomic_t fscache_n_store_pages_over_limit;
8308f9c9
MT
44858+extern atomic_unchecked_t fscache_n_stores;
44859+extern atomic_unchecked_t fscache_n_stores_ok;
44860+extern atomic_unchecked_t fscache_n_stores_again;
44861+extern atomic_unchecked_t fscache_n_stores_nobufs;
44862+extern atomic_unchecked_t fscache_n_stores_oom;
44863+extern atomic_unchecked_t fscache_n_store_ops;
44864+extern atomic_unchecked_t fscache_n_store_calls;
44865+extern atomic_unchecked_t fscache_n_store_pages;
44866+extern atomic_unchecked_t fscache_n_store_radix_deletes;
44867+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
fe2de317
MT
44868
44869-extern atomic_t fscache_n_store_vmscan_not_storing;
44870-extern atomic_t fscache_n_store_vmscan_gone;
44871-extern atomic_t fscache_n_store_vmscan_busy;
44872-extern atomic_t fscache_n_store_vmscan_cancelled;
8308f9c9
MT
44873+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
44874+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
44875+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
44876+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
fe2de317
MT
44877
44878-extern atomic_t fscache_n_marks;
44879-extern atomic_t fscache_n_uncaches;
8308f9c9
MT
44880+extern atomic_unchecked_t fscache_n_marks;
44881+extern atomic_unchecked_t fscache_n_uncaches;
fe2de317
MT
44882
44883-extern atomic_t fscache_n_acquires;
44884-extern atomic_t fscache_n_acquires_null;
44885-extern atomic_t fscache_n_acquires_no_cache;
44886-extern atomic_t fscache_n_acquires_ok;
44887-extern atomic_t fscache_n_acquires_nobufs;
44888-extern atomic_t fscache_n_acquires_oom;
8308f9c9
MT
44889+extern atomic_unchecked_t fscache_n_acquires;
44890+extern atomic_unchecked_t fscache_n_acquires_null;
44891+extern atomic_unchecked_t fscache_n_acquires_no_cache;
44892+extern atomic_unchecked_t fscache_n_acquires_ok;
44893+extern atomic_unchecked_t fscache_n_acquires_nobufs;
44894+extern atomic_unchecked_t fscache_n_acquires_oom;
fe2de317
MT
44895
44896-extern atomic_t fscache_n_updates;
44897-extern atomic_t fscache_n_updates_null;
44898-extern atomic_t fscache_n_updates_run;
8308f9c9
MT
44899+extern atomic_unchecked_t fscache_n_updates;
44900+extern atomic_unchecked_t fscache_n_updates_null;
44901+extern atomic_unchecked_t fscache_n_updates_run;
fe2de317
MT
44902
44903-extern atomic_t fscache_n_relinquishes;
44904-extern atomic_t fscache_n_relinquishes_null;
44905-extern atomic_t fscache_n_relinquishes_waitcrt;
44906-extern atomic_t fscache_n_relinquishes_retire;
8308f9c9
MT
44907+extern atomic_unchecked_t fscache_n_relinquishes;
44908+extern atomic_unchecked_t fscache_n_relinquishes_null;
44909+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
44910+extern atomic_unchecked_t fscache_n_relinquishes_retire;
fe2de317
MT
44911
44912-extern atomic_t fscache_n_cookie_index;
44913-extern atomic_t fscache_n_cookie_data;
44914-extern atomic_t fscache_n_cookie_special;
8308f9c9
MT
44915+extern atomic_unchecked_t fscache_n_cookie_index;
44916+extern atomic_unchecked_t fscache_n_cookie_data;
44917+extern atomic_unchecked_t fscache_n_cookie_special;
fe2de317
MT
44918
44919-extern atomic_t fscache_n_object_alloc;
44920-extern atomic_t fscache_n_object_no_alloc;
44921-extern atomic_t fscache_n_object_lookups;
44922-extern atomic_t fscache_n_object_lookups_negative;
44923-extern atomic_t fscache_n_object_lookups_positive;
44924-extern atomic_t fscache_n_object_lookups_timed_out;
44925-extern atomic_t fscache_n_object_created;
44926-extern atomic_t fscache_n_object_avail;
44927-extern atomic_t fscache_n_object_dead;
8308f9c9
MT
44928+extern atomic_unchecked_t fscache_n_object_alloc;
44929+extern atomic_unchecked_t fscache_n_object_no_alloc;
44930+extern atomic_unchecked_t fscache_n_object_lookups;
44931+extern atomic_unchecked_t fscache_n_object_lookups_negative;
44932+extern atomic_unchecked_t fscache_n_object_lookups_positive;
44933+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
44934+extern atomic_unchecked_t fscache_n_object_created;
44935+extern atomic_unchecked_t fscache_n_object_avail;
44936+extern atomic_unchecked_t fscache_n_object_dead;
fe2de317
MT
44937
44938-extern atomic_t fscache_n_checkaux_none;
44939-extern atomic_t fscache_n_checkaux_okay;
44940-extern atomic_t fscache_n_checkaux_update;
44941-extern atomic_t fscache_n_checkaux_obsolete;
8308f9c9
MT
44942+extern atomic_unchecked_t fscache_n_checkaux_none;
44943+extern atomic_unchecked_t fscache_n_checkaux_okay;
44944+extern atomic_unchecked_t fscache_n_checkaux_update;
44945+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
44946
44947 extern atomic_t fscache_n_cop_alloc_object;
44948 extern atomic_t fscache_n_cop_lookup_object;
fe2de317 44949@@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
8308f9c9
MT
44950 atomic_inc(stat);
44951 }
44952
44953+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
44954+{
44955+ atomic_inc_unchecked(stat);
44956+}
44957+
44958 static inline void fscache_stat_d(atomic_t *stat)
44959 {
44960 atomic_dec(stat);
fe2de317 44961@@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
66a7e928
MT
44962
44963 #define __fscache_stat(stat) (NULL)
44964 #define fscache_stat(stat) do {} while (0)
44965+#define fscache_stat_unchecked(stat) do {} while (0)
44966 #define fscache_stat_d(stat) do {} while (0)
44967 #endif
44968
fe2de317
MT
44969diff --git a/fs/fscache/object.c b/fs/fscache/object.c
44970index b6b897c..0ffff9c 100644
44971--- a/fs/fscache/object.c
44972+++ b/fs/fscache/object.c
44973@@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
8308f9c9
MT
44974 /* update the object metadata on disk */
44975 case FSCACHE_OBJECT_UPDATING:
44976 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
44977- fscache_stat(&fscache_n_updates_run);
44978+ fscache_stat_unchecked(&fscache_n_updates_run);
44979 fscache_stat(&fscache_n_cop_update_object);
44980 object->cache->ops->update_object(object);
44981 fscache_stat_d(&fscache_n_cop_update_object);
fe2de317 44982@@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
8308f9c9
MT
44983 spin_lock(&object->lock);
44984 object->state = FSCACHE_OBJECT_DEAD;
44985 spin_unlock(&object->lock);
44986- fscache_stat(&fscache_n_object_dead);
44987+ fscache_stat_unchecked(&fscache_n_object_dead);
44988 goto terminal_transit;
44989
44990 /* handle the parent cache of this object being withdrawn from
fe2de317 44991@@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
8308f9c9
MT
44992 spin_lock(&object->lock);
44993 object->state = FSCACHE_OBJECT_DEAD;
44994 spin_unlock(&object->lock);
44995- fscache_stat(&fscache_n_object_dead);
44996+ fscache_stat_unchecked(&fscache_n_object_dead);
44997 goto terminal_transit;
44998
44999 /* complain about the object being woken up once it is
fe2de317 45000@@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
8308f9c9
MT
45001 parent->cookie->def->name, cookie->def->name,
45002 object->cache->tag->name);
45003
45004- fscache_stat(&fscache_n_object_lookups);
45005+ fscache_stat_unchecked(&fscache_n_object_lookups);
45006 fscache_stat(&fscache_n_cop_lookup_object);
45007 ret = object->cache->ops->lookup_object(object);
45008 fscache_stat_d(&fscache_n_cop_lookup_object);
fe2de317 45009@@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
8308f9c9
MT
45010 if (ret == -ETIMEDOUT) {
45011 /* probably stuck behind another object, so move this one to
45012 * the back of the queue */
45013- fscache_stat(&fscache_n_object_lookups_timed_out);
45014+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
45015 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
45016 }
45017
fe2de317 45018@@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
8308f9c9
MT
45019
45020 spin_lock(&object->lock);
45021 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
45022- fscache_stat(&fscache_n_object_lookups_negative);
45023+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
45024
45025 /* transit here to allow write requests to begin stacking up
45026 * and read requests to begin returning ENODATA */
fe2de317 45027@@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
8308f9c9
MT
45028 * result, in which case there may be data available */
45029 spin_lock(&object->lock);
45030 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
45031- fscache_stat(&fscache_n_object_lookups_positive);
45032+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
45033
45034 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
45035
fe2de317 45036@@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
8308f9c9
MT
45037 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
45038 } else {
45039 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
45040- fscache_stat(&fscache_n_object_created);
45041+ fscache_stat_unchecked(&fscache_n_object_created);
45042
45043 object->state = FSCACHE_OBJECT_AVAILABLE;
45044 spin_unlock(&object->lock);
fe2de317 45045@@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
8308f9c9
MT
45046 fscache_enqueue_dependents(object);
45047
45048 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
45049- fscache_stat(&fscache_n_object_avail);
45050+ fscache_stat_unchecked(&fscache_n_object_avail);
45051
45052 _leave("");
45053 }
fe2de317 45054@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
8308f9c9
MT
45055 enum fscache_checkaux result;
45056
45057 if (!object->cookie->def->check_aux) {
45058- fscache_stat(&fscache_n_checkaux_none);
45059+ fscache_stat_unchecked(&fscache_n_checkaux_none);
45060 return FSCACHE_CHECKAUX_OKAY;
45061 }
45062
fe2de317 45063@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
8308f9c9
MT
45064 switch (result) {
45065 /* entry okay as is */
45066 case FSCACHE_CHECKAUX_OKAY:
45067- fscache_stat(&fscache_n_checkaux_okay);
45068+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
45069 break;
45070
45071 /* entry requires update */
45072 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
45073- fscache_stat(&fscache_n_checkaux_update);
45074+ fscache_stat_unchecked(&fscache_n_checkaux_update);
45075 break;
45076
45077 /* entry requires deletion */
45078 case FSCACHE_CHECKAUX_OBSOLETE:
45079- fscache_stat(&fscache_n_checkaux_obsolete);
45080+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
45081 break;
45082
45083 default:
fe2de317
MT
45084diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
45085index 30afdfa..2256596 100644
45086--- a/fs/fscache/operation.c
45087+++ b/fs/fscache/operation.c
8308f9c9
MT
45088@@ -17,7 +17,7 @@
45089 #include <linux/slab.h>
45090 #include "internal.h"
45091
45092-atomic_t fscache_op_debug_id;
45093+atomic_unchecked_t fscache_op_debug_id;
45094 EXPORT_SYMBOL(fscache_op_debug_id);
45095
45096 /**
fe2de317 45097@@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
8308f9c9
MT
45098 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
45099 ASSERTCMP(atomic_read(&op->usage), >, 0);
45100
45101- fscache_stat(&fscache_n_op_enqueue);
45102+ fscache_stat_unchecked(&fscache_n_op_enqueue);
45103 switch (op->flags & FSCACHE_OP_TYPE) {
45104 case FSCACHE_OP_ASYNC:
45105 _debug("queue async");
fe2de317 45106@@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
8308f9c9
MT
45107 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
45108 if (op->processor)
45109 fscache_enqueue_operation(op);
45110- fscache_stat(&fscache_n_op_run);
45111+ fscache_stat_unchecked(&fscache_n_op_run);
45112 }
45113
45114 /*
fe2de317 45115@@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
8308f9c9
MT
45116 if (object->n_ops > 1) {
45117 atomic_inc(&op->usage);
45118 list_add_tail(&op->pend_link, &object->pending_ops);
45119- fscache_stat(&fscache_n_op_pend);
45120+ fscache_stat_unchecked(&fscache_n_op_pend);
45121 } else if (!list_empty(&object->pending_ops)) {
45122 atomic_inc(&op->usage);
45123 list_add_tail(&op->pend_link, &object->pending_ops);
45124- fscache_stat(&fscache_n_op_pend);
45125+ fscache_stat_unchecked(&fscache_n_op_pend);
45126 fscache_start_operations(object);
45127 } else {
45128 ASSERTCMP(object->n_in_progress, ==, 0);
fe2de317 45129@@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
8308f9c9
MT
45130 object->n_exclusive++; /* reads and writes must wait */
45131 atomic_inc(&op->usage);
45132 list_add_tail(&op->pend_link, &object->pending_ops);
45133- fscache_stat(&fscache_n_op_pend);
45134+ fscache_stat_unchecked(&fscache_n_op_pend);
45135 ret = 0;
45136 } else {
45137 /* not allowed to submit ops in any other state */
fe2de317 45138@@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
8308f9c9
MT
45139 if (object->n_exclusive > 0) {
45140 atomic_inc(&op->usage);
45141 list_add_tail(&op->pend_link, &object->pending_ops);
45142- fscache_stat(&fscache_n_op_pend);
45143+ fscache_stat_unchecked(&fscache_n_op_pend);
45144 } else if (!list_empty(&object->pending_ops)) {
45145 atomic_inc(&op->usage);
45146 list_add_tail(&op->pend_link, &object->pending_ops);
45147- fscache_stat(&fscache_n_op_pend);
45148+ fscache_stat_unchecked(&fscache_n_op_pend);
45149 fscache_start_operations(object);
45150 } else {
45151 ASSERTCMP(object->n_exclusive, ==, 0);
fe2de317 45152@@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
8308f9c9
MT
45153 object->n_ops++;
45154 atomic_inc(&op->usage);
45155 list_add_tail(&op->pend_link, &object->pending_ops);
45156- fscache_stat(&fscache_n_op_pend);
45157+ fscache_stat_unchecked(&fscache_n_op_pend);
45158 ret = 0;
45159 } else if (object->state == FSCACHE_OBJECT_DYING ||
45160 object->state == FSCACHE_OBJECT_LC_DYING ||
45161 object->state == FSCACHE_OBJECT_WITHDRAWING) {
45162- fscache_stat(&fscache_n_op_rejected);
45163+ fscache_stat_unchecked(&fscache_n_op_rejected);
45164 ret = -ENOBUFS;
45165 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
45166 fscache_report_unexpected_submission(object, op, ostate);
fe2de317 45167@@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
8308f9c9
MT
45168
45169 ret = -EBUSY;
45170 if (!list_empty(&op->pend_link)) {
45171- fscache_stat(&fscache_n_op_cancelled);
45172+ fscache_stat_unchecked(&fscache_n_op_cancelled);
45173 list_del_init(&op->pend_link);
45174 object->n_ops--;
45175 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
fe2de317 45176@@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
8308f9c9
MT
45177 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
45178 BUG();
45179
45180- fscache_stat(&fscache_n_op_release);
45181+ fscache_stat_unchecked(&fscache_n_op_release);
45182
45183 if (op->release) {
45184 op->release(op);
fe2de317 45185@@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
8308f9c9
MT
45186 * lock, and defer it otherwise */
45187 if (!spin_trylock(&object->lock)) {
45188 _debug("defer put");
45189- fscache_stat(&fscache_n_op_deferred_release);
45190+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
45191
45192 cache = object->cache;
45193 spin_lock(&cache->op_gc_list_lock);
fe2de317 45194@@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
8308f9c9
MT
45195
45196 _debug("GC DEFERRED REL OBJ%x OP%x",
45197 object->debug_id, op->debug_id);
45198- fscache_stat(&fscache_n_op_gc);
45199+ fscache_stat_unchecked(&fscache_n_op_gc);
45200
45201 ASSERTCMP(atomic_read(&op->usage), ==, 0);
45202
fe2de317
MT
45203diff --git a/fs/fscache/page.c b/fs/fscache/page.c
45204index 3f7a59b..cf196cc 100644
45205--- a/fs/fscache/page.c
45206+++ b/fs/fscache/page.c
45207@@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
8308f9c9
MT
45208 val = radix_tree_lookup(&cookie->stores, page->index);
45209 if (!val) {
45210 rcu_read_unlock();
45211- fscache_stat(&fscache_n_store_vmscan_not_storing);
45212+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
45213 __fscache_uncache_page(cookie, page);
45214 return true;
45215 }
fe2de317 45216@@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
8308f9c9
MT
45217 spin_unlock(&cookie->stores_lock);
45218
45219 if (xpage) {
45220- fscache_stat(&fscache_n_store_vmscan_cancelled);
45221- fscache_stat(&fscache_n_store_radix_deletes);
45222+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
45223+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
45224 ASSERTCMP(xpage, ==, page);
45225 } else {
45226- fscache_stat(&fscache_n_store_vmscan_gone);
45227+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
45228 }
45229
45230 wake_up_bit(&cookie->flags, 0);
45231@@ -107,7 +107,7 @@ page_busy:
45232 /* we might want to wait here, but that could deadlock the allocator as
45233 * the work threads writing to the cache may all end up sleeping
45234 * on memory allocation */
45235- fscache_stat(&fscache_n_store_vmscan_busy);
45236+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
45237 return false;
45238 }
45239 EXPORT_SYMBOL(__fscache_maybe_release_page);
fe2de317 45240@@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
8308f9c9
MT
45241 FSCACHE_COOKIE_STORING_TAG);
45242 if (!radix_tree_tag_get(&cookie->stores, page->index,
45243 FSCACHE_COOKIE_PENDING_TAG)) {
45244- fscache_stat(&fscache_n_store_radix_deletes);
45245+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
45246 xpage = radix_tree_delete(&cookie->stores, page->index);
45247 }
45248 spin_unlock(&cookie->stores_lock);
fe2de317 45249@@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
8308f9c9
MT
45250
45251 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
45252
45253- fscache_stat(&fscache_n_attr_changed_calls);
45254+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
45255
45256 if (fscache_object_is_active(object)) {
15a11c5b 45257 fscache_stat(&fscache_n_cop_attr_changed);
fe2de317 45258@@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
8308f9c9
MT
45259
45260 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45261
45262- fscache_stat(&fscache_n_attr_changed);
45263+ fscache_stat_unchecked(&fscache_n_attr_changed);
45264
45265 op = kzalloc(sizeof(*op), GFP_KERNEL);
45266 if (!op) {
45267- fscache_stat(&fscache_n_attr_changed_nomem);
45268+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
45269 _leave(" = -ENOMEM");
45270 return -ENOMEM;
45271 }
fe2de317 45272@@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
8308f9c9
MT
45273 if (fscache_submit_exclusive_op(object, op) < 0)
45274 goto nobufs;
45275 spin_unlock(&cookie->lock);
45276- fscache_stat(&fscache_n_attr_changed_ok);
45277+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
45278 fscache_put_operation(op);
45279 _leave(" = 0");
45280 return 0;
fe2de317 45281@@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
8308f9c9
MT
45282 nobufs:
45283 spin_unlock(&cookie->lock);
45284 kfree(op);
45285- fscache_stat(&fscache_n_attr_changed_nobufs);
45286+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
45287 _leave(" = %d", -ENOBUFS);
45288 return -ENOBUFS;
45289 }
fe2de317 45290@@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
8308f9c9
MT
45291 /* allocate a retrieval operation and attempt to submit it */
45292 op = kzalloc(sizeof(*op), GFP_NOIO);
45293 if (!op) {
45294- fscache_stat(&fscache_n_retrievals_nomem);
45295+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45296 return NULL;
45297 }
45298
fe2de317 45299@@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
8308f9c9
MT
45300 return 0;
45301 }
45302
45303- fscache_stat(&fscache_n_retrievals_wait);
45304+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
45305
45306 jif = jiffies;
45307 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
45308 fscache_wait_bit_interruptible,
45309 TASK_INTERRUPTIBLE) != 0) {
45310- fscache_stat(&fscache_n_retrievals_intr);
45311+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
45312 _leave(" = -ERESTARTSYS");
45313 return -ERESTARTSYS;
45314 }
fe2de317 45315@@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
8308f9c9
MT
45316 */
45317 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45318 struct fscache_retrieval *op,
45319- atomic_t *stat_op_waits,
45320- atomic_t *stat_object_dead)
45321+ atomic_unchecked_t *stat_op_waits,
45322+ atomic_unchecked_t *stat_object_dead)
45323 {
45324 int ret;
45325
fe2de317 45326@@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
8308f9c9
MT
45327 goto check_if_dead;
45328
45329 _debug(">>> WT");
45330- fscache_stat(stat_op_waits);
45331+ fscache_stat_unchecked(stat_op_waits);
45332 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
45333 fscache_wait_bit_interruptible,
45334 TASK_INTERRUPTIBLE) < 0) {
fe2de317 45335@@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
8308f9c9
MT
45336
45337 check_if_dead:
45338 if (unlikely(fscache_object_is_dead(object))) {
45339- fscache_stat(stat_object_dead);
45340+ fscache_stat_unchecked(stat_object_dead);
45341 return -ENOBUFS;
45342 }
45343 return 0;
fe2de317 45344@@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
8308f9c9
MT
45345
45346 _enter("%p,%p,,,", cookie, page);
45347
45348- fscache_stat(&fscache_n_retrievals);
45349+ fscache_stat_unchecked(&fscache_n_retrievals);
45350
45351 if (hlist_empty(&cookie->backing_objects))
45352 goto nobufs;
fe2de317 45353@@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
8308f9c9
MT
45354 goto nobufs_unlock;
45355 spin_unlock(&cookie->lock);
45356
45357- fscache_stat(&fscache_n_retrieval_ops);
45358+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
45359
45360 /* pin the netfs read context in case we need to do the actual netfs
45361 * read because we've encountered a cache read failure */
fe2de317 45362@@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
8308f9c9
MT
45363
45364 error:
45365 if (ret == -ENOMEM)
45366- fscache_stat(&fscache_n_retrievals_nomem);
45367+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45368 else if (ret == -ERESTARTSYS)
45369- fscache_stat(&fscache_n_retrievals_intr);
45370+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
45371 else if (ret == -ENODATA)
45372- fscache_stat(&fscache_n_retrievals_nodata);
45373+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
45374 else if (ret < 0)
45375- fscache_stat(&fscache_n_retrievals_nobufs);
45376+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45377 else
45378- fscache_stat(&fscache_n_retrievals_ok);
45379+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
45380
45381 fscache_put_retrieval(op);
45382 _leave(" = %d", ret);
15a11c5b 45383@@ -429,7 +429,7 @@ nobufs_unlock:
8308f9c9
MT
45384 spin_unlock(&cookie->lock);
45385 kfree(op);
45386 nobufs:
45387- fscache_stat(&fscache_n_retrievals_nobufs);
45388+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45389 _leave(" = -ENOBUFS");
45390 return -ENOBUFS;
45391 }
fe2de317 45392@@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
8308f9c9
MT
45393
45394 _enter("%p,,%d,,,", cookie, *nr_pages);
45395
45396- fscache_stat(&fscache_n_retrievals);
45397+ fscache_stat_unchecked(&fscache_n_retrievals);
45398
45399 if (hlist_empty(&cookie->backing_objects))
45400 goto nobufs;
fe2de317 45401@@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
8308f9c9
MT
45402 goto nobufs_unlock;
45403 spin_unlock(&cookie->lock);
45404
45405- fscache_stat(&fscache_n_retrieval_ops);
45406+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
45407
45408 /* pin the netfs read context in case we need to do the actual netfs
45409 * read because we've encountered a cache read failure */
fe2de317 45410@@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
8308f9c9
MT
45411
45412 error:
45413 if (ret == -ENOMEM)
45414- fscache_stat(&fscache_n_retrievals_nomem);
45415+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45416 else if (ret == -ERESTARTSYS)
45417- fscache_stat(&fscache_n_retrievals_intr);
45418+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
45419 else if (ret == -ENODATA)
45420- fscache_stat(&fscache_n_retrievals_nodata);
45421+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
45422 else if (ret < 0)
45423- fscache_stat(&fscache_n_retrievals_nobufs);
45424+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45425 else
45426- fscache_stat(&fscache_n_retrievals_ok);
45427+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
45428
45429 fscache_put_retrieval(op);
45430 _leave(" = %d", ret);
15a11c5b 45431@@ -545,7 +545,7 @@ nobufs_unlock:
8308f9c9
MT
45432 spin_unlock(&cookie->lock);
45433 kfree(op);
45434 nobufs:
45435- fscache_stat(&fscache_n_retrievals_nobufs);
45436+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45437 _leave(" = -ENOBUFS");
45438 return -ENOBUFS;
45439 }
fe2de317 45440@@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
8308f9c9
MT
45441
45442 _enter("%p,%p,,,", cookie, page);
45443
45444- fscache_stat(&fscache_n_allocs);
45445+ fscache_stat_unchecked(&fscache_n_allocs);
45446
45447 if (hlist_empty(&cookie->backing_objects))
45448 goto nobufs;
fe2de317 45449@@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
8308f9c9
MT
45450 goto nobufs_unlock;
45451 spin_unlock(&cookie->lock);
45452
45453- fscache_stat(&fscache_n_alloc_ops);
45454+ fscache_stat_unchecked(&fscache_n_alloc_ops);
45455
45456 ret = fscache_wait_for_retrieval_activation(
45457 object, op,
fe2de317 45458@@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
8308f9c9
MT
45459
45460 error:
45461 if (ret == -ERESTARTSYS)
45462- fscache_stat(&fscache_n_allocs_intr);
45463+ fscache_stat_unchecked(&fscache_n_allocs_intr);
45464 else if (ret < 0)
45465- fscache_stat(&fscache_n_allocs_nobufs);
45466+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
45467 else
45468- fscache_stat(&fscache_n_allocs_ok);
45469+ fscache_stat_unchecked(&fscache_n_allocs_ok);
45470
45471 fscache_put_retrieval(op);
45472 _leave(" = %d", ret);
15a11c5b 45473@@ -625,7 +625,7 @@ nobufs_unlock:
8308f9c9
MT
45474 spin_unlock(&cookie->lock);
45475 kfree(op);
45476 nobufs:
45477- fscache_stat(&fscache_n_allocs_nobufs);
45478+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
45479 _leave(" = -ENOBUFS");
45480 return -ENOBUFS;
45481 }
fe2de317 45482@@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
8308f9c9
MT
45483
45484 spin_lock(&cookie->stores_lock);
45485
45486- fscache_stat(&fscache_n_store_calls);
45487+ fscache_stat_unchecked(&fscache_n_store_calls);
45488
45489 /* find a page to store */
45490 page = NULL;
fe2de317 45491@@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
8308f9c9
MT
45492 page = results[0];
45493 _debug("gang %d [%lx]", n, page->index);
45494 if (page->index > op->store_limit) {
45495- fscache_stat(&fscache_n_store_pages_over_limit);
45496+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
45497 goto superseded;
45498 }
45499
fe2de317 45500@@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
15a11c5b 45501 spin_unlock(&cookie->stores_lock);
8308f9c9
MT
45502 spin_unlock(&object->lock);
45503
8308f9c9
MT
45504- fscache_stat(&fscache_n_store_pages);
45505+ fscache_stat_unchecked(&fscache_n_store_pages);
45506 fscache_stat(&fscache_n_cop_write_page);
45507 ret = object->cache->ops->write_page(op, page);
45508 fscache_stat_d(&fscache_n_cop_write_page);
fe2de317 45509@@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
8308f9c9
MT
45510 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45511 ASSERT(PageFsCache(page));
45512
45513- fscache_stat(&fscache_n_stores);
45514+ fscache_stat_unchecked(&fscache_n_stores);
45515
45516 op = kzalloc(sizeof(*op), GFP_NOIO);
45517 if (!op)
fe2de317 45518@@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
8308f9c9
MT
45519 spin_unlock(&cookie->stores_lock);
45520 spin_unlock(&object->lock);
45521
45522- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
45523+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
45524 op->store_limit = object->store_limit;
45525
45526 if (fscache_submit_op(object, &op->op) < 0)
fe2de317 45527@@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
8308f9c9
MT
45528
45529 spin_unlock(&cookie->lock);
45530 radix_tree_preload_end();
45531- fscache_stat(&fscache_n_store_ops);
45532- fscache_stat(&fscache_n_stores_ok);
45533+ fscache_stat_unchecked(&fscache_n_store_ops);
45534+ fscache_stat_unchecked(&fscache_n_stores_ok);
45535
45536 /* the work queue now carries its own ref on the object */
45537 fscache_put_operation(&op->op);
fe2de317 45538@@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
8308f9c9
MT
45539 return 0;
45540
45541 already_queued:
45542- fscache_stat(&fscache_n_stores_again);
45543+ fscache_stat_unchecked(&fscache_n_stores_again);
45544 already_pending:
45545 spin_unlock(&cookie->stores_lock);
45546 spin_unlock(&object->lock);
45547 spin_unlock(&cookie->lock);
45548 radix_tree_preload_end();
45549 kfree(op);
45550- fscache_stat(&fscache_n_stores_ok);
45551+ fscache_stat_unchecked(&fscache_n_stores_ok);
45552 _leave(" = 0");
45553 return 0;
45554
15a11c5b 45555@@ -851,14 +851,14 @@ nobufs:
8308f9c9
MT
45556 spin_unlock(&cookie->lock);
45557 radix_tree_preload_end();
45558 kfree(op);
45559- fscache_stat(&fscache_n_stores_nobufs);
45560+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
45561 _leave(" = -ENOBUFS");
45562 return -ENOBUFS;
45563
45564 nomem_free:
45565 kfree(op);
45566 nomem:
45567- fscache_stat(&fscache_n_stores_oom);
45568+ fscache_stat_unchecked(&fscache_n_stores_oom);
45569 _leave(" = -ENOMEM");
45570 return -ENOMEM;
45571 }
fe2de317 45572@@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
8308f9c9
MT
45573 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45574 ASSERTCMP(page, !=, NULL);
45575
45576- fscache_stat(&fscache_n_uncaches);
45577+ fscache_stat_unchecked(&fscache_n_uncaches);
45578
45579 /* cache withdrawal may beat us to it */
45580 if (!PageFsCache(page))
fe2de317 45581@@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
8308f9c9
MT
45582 unsigned long loop;
45583
45584 #ifdef CONFIG_FSCACHE_STATS
45585- atomic_add(pagevec->nr, &fscache_n_marks);
45586+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
45587 #endif
45588
45589 for (loop = 0; loop < pagevec->nr; loop++) {
fe2de317
MT
45590diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
45591index 4765190..2a067f2 100644
45592--- a/fs/fscache/stats.c
45593+++ b/fs/fscache/stats.c
8308f9c9
MT
45594@@ -18,95 +18,95 @@
45595 /*
45596 * operation counters
45597 */
45598-atomic_t fscache_n_op_pend;
45599-atomic_t fscache_n_op_run;
45600-atomic_t fscache_n_op_enqueue;
45601-atomic_t fscache_n_op_requeue;
45602-atomic_t fscache_n_op_deferred_release;
45603-atomic_t fscache_n_op_release;
45604-atomic_t fscache_n_op_gc;
45605-atomic_t fscache_n_op_cancelled;
45606-atomic_t fscache_n_op_rejected;
fe2de317
MT
45607+atomic_unchecked_t fscache_n_op_pend;
45608+atomic_unchecked_t fscache_n_op_run;
45609+atomic_unchecked_t fscache_n_op_enqueue;
45610+atomic_unchecked_t fscache_n_op_requeue;
45611+atomic_unchecked_t fscache_n_op_deferred_release;
45612+atomic_unchecked_t fscache_n_op_release;
45613+atomic_unchecked_t fscache_n_op_gc;
45614+atomic_unchecked_t fscache_n_op_cancelled;
45615+atomic_unchecked_t fscache_n_op_rejected;
45616
8308f9c9
MT
45617-atomic_t fscache_n_attr_changed;
45618-atomic_t fscache_n_attr_changed_ok;
45619-atomic_t fscache_n_attr_changed_nobufs;
45620-atomic_t fscache_n_attr_changed_nomem;
45621-atomic_t fscache_n_attr_changed_calls;
fe2de317
MT
45622+atomic_unchecked_t fscache_n_attr_changed;
45623+atomic_unchecked_t fscache_n_attr_changed_ok;
45624+atomic_unchecked_t fscache_n_attr_changed_nobufs;
45625+atomic_unchecked_t fscache_n_attr_changed_nomem;
45626+atomic_unchecked_t fscache_n_attr_changed_calls;
45627
8308f9c9
MT
45628-atomic_t fscache_n_allocs;
45629-atomic_t fscache_n_allocs_ok;
45630-atomic_t fscache_n_allocs_wait;
45631-atomic_t fscache_n_allocs_nobufs;
45632-atomic_t fscache_n_allocs_intr;
45633-atomic_t fscache_n_allocs_object_dead;
45634-atomic_t fscache_n_alloc_ops;
45635-atomic_t fscache_n_alloc_op_waits;
fe2de317
MT
45636+atomic_unchecked_t fscache_n_allocs;
45637+atomic_unchecked_t fscache_n_allocs_ok;
45638+atomic_unchecked_t fscache_n_allocs_wait;
45639+atomic_unchecked_t fscache_n_allocs_nobufs;
45640+atomic_unchecked_t fscache_n_allocs_intr;
45641+atomic_unchecked_t fscache_n_allocs_object_dead;
45642+atomic_unchecked_t fscache_n_alloc_ops;
45643+atomic_unchecked_t fscache_n_alloc_op_waits;
45644
8308f9c9
MT
45645-atomic_t fscache_n_retrievals;
45646-atomic_t fscache_n_retrievals_ok;
45647-atomic_t fscache_n_retrievals_wait;
45648-atomic_t fscache_n_retrievals_nodata;
45649-atomic_t fscache_n_retrievals_nobufs;
45650-atomic_t fscache_n_retrievals_intr;
45651-atomic_t fscache_n_retrievals_nomem;
45652-atomic_t fscache_n_retrievals_object_dead;
45653-atomic_t fscache_n_retrieval_ops;
45654-atomic_t fscache_n_retrieval_op_waits;
8308f9c9
MT
45655+atomic_unchecked_t fscache_n_retrievals;
45656+atomic_unchecked_t fscache_n_retrievals_ok;
45657+atomic_unchecked_t fscache_n_retrievals_wait;
45658+atomic_unchecked_t fscache_n_retrievals_nodata;
45659+atomic_unchecked_t fscache_n_retrievals_nobufs;
45660+atomic_unchecked_t fscache_n_retrievals_intr;
45661+atomic_unchecked_t fscache_n_retrievals_nomem;
45662+atomic_unchecked_t fscache_n_retrievals_object_dead;
45663+atomic_unchecked_t fscache_n_retrieval_ops;
45664+atomic_unchecked_t fscache_n_retrieval_op_waits;
fe2de317
MT
45665
45666-atomic_t fscache_n_stores;
45667-atomic_t fscache_n_stores_ok;
45668-atomic_t fscache_n_stores_again;
45669-atomic_t fscache_n_stores_nobufs;
45670-atomic_t fscache_n_stores_oom;
45671-atomic_t fscache_n_store_ops;
45672-atomic_t fscache_n_store_calls;
45673-atomic_t fscache_n_store_pages;
45674-atomic_t fscache_n_store_radix_deletes;
45675-atomic_t fscache_n_store_pages_over_limit;
8308f9c9
MT
45676+atomic_unchecked_t fscache_n_stores;
45677+atomic_unchecked_t fscache_n_stores_ok;
45678+atomic_unchecked_t fscache_n_stores_again;
45679+atomic_unchecked_t fscache_n_stores_nobufs;
45680+atomic_unchecked_t fscache_n_stores_oom;
45681+atomic_unchecked_t fscache_n_store_ops;
45682+atomic_unchecked_t fscache_n_store_calls;
45683+atomic_unchecked_t fscache_n_store_pages;
45684+atomic_unchecked_t fscache_n_store_radix_deletes;
45685+atomic_unchecked_t fscache_n_store_pages_over_limit;
fe2de317
MT
45686
45687-atomic_t fscache_n_store_vmscan_not_storing;
45688-atomic_t fscache_n_store_vmscan_gone;
45689-atomic_t fscache_n_store_vmscan_busy;
45690-atomic_t fscache_n_store_vmscan_cancelled;
8308f9c9
MT
45691+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
45692+atomic_unchecked_t fscache_n_store_vmscan_gone;
45693+atomic_unchecked_t fscache_n_store_vmscan_busy;
45694+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
fe2de317
MT
45695
45696-atomic_t fscache_n_marks;
45697-atomic_t fscache_n_uncaches;
8308f9c9
MT
45698+atomic_unchecked_t fscache_n_marks;
45699+atomic_unchecked_t fscache_n_uncaches;
fe2de317
MT
45700
45701-atomic_t fscache_n_acquires;
45702-atomic_t fscache_n_acquires_null;
45703-atomic_t fscache_n_acquires_no_cache;
45704-atomic_t fscache_n_acquires_ok;
45705-atomic_t fscache_n_acquires_nobufs;
45706-atomic_t fscache_n_acquires_oom;
8308f9c9
MT
45707+atomic_unchecked_t fscache_n_acquires;
45708+atomic_unchecked_t fscache_n_acquires_null;
45709+atomic_unchecked_t fscache_n_acquires_no_cache;
45710+atomic_unchecked_t fscache_n_acquires_ok;
45711+atomic_unchecked_t fscache_n_acquires_nobufs;
45712+atomic_unchecked_t fscache_n_acquires_oom;
fe2de317
MT
45713
45714-atomic_t fscache_n_updates;
45715-atomic_t fscache_n_updates_null;
45716-atomic_t fscache_n_updates_run;
8308f9c9
MT
45717+atomic_unchecked_t fscache_n_updates;
45718+atomic_unchecked_t fscache_n_updates_null;
45719+atomic_unchecked_t fscache_n_updates_run;
fe2de317
MT
45720
45721-atomic_t fscache_n_relinquishes;
45722-atomic_t fscache_n_relinquishes_null;
45723-atomic_t fscache_n_relinquishes_waitcrt;
45724-atomic_t fscache_n_relinquishes_retire;
8308f9c9
MT
45725+atomic_unchecked_t fscache_n_relinquishes;
45726+atomic_unchecked_t fscache_n_relinquishes_null;
45727+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
45728+atomic_unchecked_t fscache_n_relinquishes_retire;
fe2de317
MT
45729
45730-atomic_t fscache_n_cookie_index;
45731-atomic_t fscache_n_cookie_data;
45732-atomic_t fscache_n_cookie_special;
8308f9c9
MT
45733+atomic_unchecked_t fscache_n_cookie_index;
45734+atomic_unchecked_t fscache_n_cookie_data;
45735+atomic_unchecked_t fscache_n_cookie_special;
fe2de317
MT
45736
45737-atomic_t fscache_n_object_alloc;
45738-atomic_t fscache_n_object_no_alloc;
45739-atomic_t fscache_n_object_lookups;
45740-atomic_t fscache_n_object_lookups_negative;
45741-atomic_t fscache_n_object_lookups_positive;
45742-atomic_t fscache_n_object_lookups_timed_out;
45743-atomic_t fscache_n_object_created;
45744-atomic_t fscache_n_object_avail;
45745-atomic_t fscache_n_object_dead;
8308f9c9
MT
45746+atomic_unchecked_t fscache_n_object_alloc;
45747+atomic_unchecked_t fscache_n_object_no_alloc;
45748+atomic_unchecked_t fscache_n_object_lookups;
45749+atomic_unchecked_t fscache_n_object_lookups_negative;
45750+atomic_unchecked_t fscache_n_object_lookups_positive;
45751+atomic_unchecked_t fscache_n_object_lookups_timed_out;
45752+atomic_unchecked_t fscache_n_object_created;
45753+atomic_unchecked_t fscache_n_object_avail;
45754+atomic_unchecked_t fscache_n_object_dead;
fe2de317
MT
45755
45756-atomic_t fscache_n_checkaux_none;
45757-atomic_t fscache_n_checkaux_okay;
45758-atomic_t fscache_n_checkaux_update;
45759-atomic_t fscache_n_checkaux_obsolete;
8308f9c9
MT
45760+atomic_unchecked_t fscache_n_checkaux_none;
45761+atomic_unchecked_t fscache_n_checkaux_okay;
45762+atomic_unchecked_t fscache_n_checkaux_update;
45763+atomic_unchecked_t fscache_n_checkaux_obsolete;
45764
45765 atomic_t fscache_n_cop_alloc_object;
45766 atomic_t fscache_n_cop_lookup_object;
fe2de317 45767@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
8308f9c9
MT
45768 seq_puts(m, "FS-Cache statistics\n");
45769
45770 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
45771- atomic_read(&fscache_n_cookie_index),
45772- atomic_read(&fscache_n_cookie_data),
45773- atomic_read(&fscache_n_cookie_special));
45774+ atomic_read_unchecked(&fscache_n_cookie_index),
45775+ atomic_read_unchecked(&fscache_n_cookie_data),
45776+ atomic_read_unchecked(&fscache_n_cookie_special));
45777
45778 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
45779- atomic_read(&fscache_n_object_alloc),
45780- atomic_read(&fscache_n_object_no_alloc),
45781- atomic_read(&fscache_n_object_avail),
45782- atomic_read(&fscache_n_object_dead));
45783+ atomic_read_unchecked(&fscache_n_object_alloc),
45784+ atomic_read_unchecked(&fscache_n_object_no_alloc),
45785+ atomic_read_unchecked(&fscache_n_object_avail),
45786+ atomic_read_unchecked(&fscache_n_object_dead));
45787 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
45788- atomic_read(&fscache_n_checkaux_none),
45789- atomic_read(&fscache_n_checkaux_okay),
45790- atomic_read(&fscache_n_checkaux_update),
45791- atomic_read(&fscache_n_checkaux_obsolete));
45792+ atomic_read_unchecked(&fscache_n_checkaux_none),
45793+ atomic_read_unchecked(&fscache_n_checkaux_okay),
45794+ atomic_read_unchecked(&fscache_n_checkaux_update),
45795+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
45796
45797 seq_printf(m, "Pages : mrk=%u unc=%u\n",
45798- atomic_read(&fscache_n_marks),
45799- atomic_read(&fscache_n_uncaches));
45800+ atomic_read_unchecked(&fscache_n_marks),
45801+ atomic_read_unchecked(&fscache_n_uncaches));
45802
45803 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
45804 " oom=%u\n",
45805- atomic_read(&fscache_n_acquires),
45806- atomic_read(&fscache_n_acquires_null),
45807- atomic_read(&fscache_n_acquires_no_cache),
45808- atomic_read(&fscache_n_acquires_ok),
45809- atomic_read(&fscache_n_acquires_nobufs),
45810- atomic_read(&fscache_n_acquires_oom));
45811+ atomic_read_unchecked(&fscache_n_acquires),
45812+ atomic_read_unchecked(&fscache_n_acquires_null),
45813+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
45814+ atomic_read_unchecked(&fscache_n_acquires_ok),
45815+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
45816+ atomic_read_unchecked(&fscache_n_acquires_oom));
45817
45818 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
45819- atomic_read(&fscache_n_object_lookups),
45820- atomic_read(&fscache_n_object_lookups_negative),
45821- atomic_read(&fscache_n_object_lookups_positive),
45822- atomic_read(&fscache_n_object_created),
45823- atomic_read(&fscache_n_object_lookups_timed_out));
45824+ atomic_read_unchecked(&fscache_n_object_lookups),
45825+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
45826+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
45827+ atomic_read_unchecked(&fscache_n_object_created),
45828+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
45829
45830 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
45831- atomic_read(&fscache_n_updates),
45832- atomic_read(&fscache_n_updates_null),
45833- atomic_read(&fscache_n_updates_run));
45834+ atomic_read_unchecked(&fscache_n_updates),
45835+ atomic_read_unchecked(&fscache_n_updates_null),
45836+ atomic_read_unchecked(&fscache_n_updates_run));
45837
45838 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
45839- atomic_read(&fscache_n_relinquishes),
45840- atomic_read(&fscache_n_relinquishes_null),
45841- atomic_read(&fscache_n_relinquishes_waitcrt),
45842- atomic_read(&fscache_n_relinquishes_retire));
45843+ atomic_read_unchecked(&fscache_n_relinquishes),
45844+ atomic_read_unchecked(&fscache_n_relinquishes_null),
45845+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
45846+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
45847
45848 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
45849- atomic_read(&fscache_n_attr_changed),
45850- atomic_read(&fscache_n_attr_changed_ok),
45851- atomic_read(&fscache_n_attr_changed_nobufs),
45852- atomic_read(&fscache_n_attr_changed_nomem),
45853- atomic_read(&fscache_n_attr_changed_calls));
45854+ atomic_read_unchecked(&fscache_n_attr_changed),
45855+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
45856+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
45857+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
45858+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
45859
45860 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
45861- atomic_read(&fscache_n_allocs),
45862- atomic_read(&fscache_n_allocs_ok),
45863- atomic_read(&fscache_n_allocs_wait),
45864- atomic_read(&fscache_n_allocs_nobufs),
45865- atomic_read(&fscache_n_allocs_intr));
45866+ atomic_read_unchecked(&fscache_n_allocs),
45867+ atomic_read_unchecked(&fscache_n_allocs_ok),
45868+ atomic_read_unchecked(&fscache_n_allocs_wait),
45869+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
45870+ atomic_read_unchecked(&fscache_n_allocs_intr));
45871 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
45872- atomic_read(&fscache_n_alloc_ops),
45873- atomic_read(&fscache_n_alloc_op_waits),
45874- atomic_read(&fscache_n_allocs_object_dead));
45875+ atomic_read_unchecked(&fscache_n_alloc_ops),
45876+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
45877+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
45878
45879 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
45880 " int=%u oom=%u\n",
45881- atomic_read(&fscache_n_retrievals),
45882- atomic_read(&fscache_n_retrievals_ok),
45883- atomic_read(&fscache_n_retrievals_wait),
45884- atomic_read(&fscache_n_retrievals_nodata),
45885- atomic_read(&fscache_n_retrievals_nobufs),
45886- atomic_read(&fscache_n_retrievals_intr),
45887- atomic_read(&fscache_n_retrievals_nomem));
45888+ atomic_read_unchecked(&fscache_n_retrievals),
45889+ atomic_read_unchecked(&fscache_n_retrievals_ok),
45890+ atomic_read_unchecked(&fscache_n_retrievals_wait),
45891+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
45892+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
45893+ atomic_read_unchecked(&fscache_n_retrievals_intr),
45894+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
45895 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
45896- atomic_read(&fscache_n_retrieval_ops),
45897- atomic_read(&fscache_n_retrieval_op_waits),
45898- atomic_read(&fscache_n_retrievals_object_dead));
45899+ atomic_read_unchecked(&fscache_n_retrieval_ops),
45900+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
45901+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
45902
45903 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
45904- atomic_read(&fscache_n_stores),
45905- atomic_read(&fscache_n_stores_ok),
45906- atomic_read(&fscache_n_stores_again),
45907- atomic_read(&fscache_n_stores_nobufs),
45908- atomic_read(&fscache_n_stores_oom));
45909+ atomic_read_unchecked(&fscache_n_stores),
45910+ atomic_read_unchecked(&fscache_n_stores_ok),
45911+ atomic_read_unchecked(&fscache_n_stores_again),
45912+ atomic_read_unchecked(&fscache_n_stores_nobufs),
45913+ atomic_read_unchecked(&fscache_n_stores_oom));
45914 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
45915- atomic_read(&fscache_n_store_ops),
45916- atomic_read(&fscache_n_store_calls),
45917- atomic_read(&fscache_n_store_pages),
45918- atomic_read(&fscache_n_store_radix_deletes),
45919- atomic_read(&fscache_n_store_pages_over_limit));
45920+ atomic_read_unchecked(&fscache_n_store_ops),
45921+ atomic_read_unchecked(&fscache_n_store_calls),
45922+ atomic_read_unchecked(&fscache_n_store_pages),
45923+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
45924+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
45925
45926 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
45927- atomic_read(&fscache_n_store_vmscan_not_storing),
45928- atomic_read(&fscache_n_store_vmscan_gone),
45929- atomic_read(&fscache_n_store_vmscan_busy),
45930- atomic_read(&fscache_n_store_vmscan_cancelled));
45931+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
45932+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
45933+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
45934+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
45935
45936 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
45937- atomic_read(&fscache_n_op_pend),
45938- atomic_read(&fscache_n_op_run),
45939- atomic_read(&fscache_n_op_enqueue),
45940- atomic_read(&fscache_n_op_cancelled),
45941- atomic_read(&fscache_n_op_rejected));
45942+ atomic_read_unchecked(&fscache_n_op_pend),
45943+ atomic_read_unchecked(&fscache_n_op_run),
45944+ atomic_read_unchecked(&fscache_n_op_enqueue),
45945+ atomic_read_unchecked(&fscache_n_op_cancelled),
45946+ atomic_read_unchecked(&fscache_n_op_rejected));
45947 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
45948- atomic_read(&fscache_n_op_deferred_release),
45949- atomic_read(&fscache_n_op_release),
45950- atomic_read(&fscache_n_op_gc));
45951+ atomic_read_unchecked(&fscache_n_op_deferred_release),
45952+ atomic_read_unchecked(&fscache_n_op_release),
45953+ atomic_read_unchecked(&fscache_n_op_gc));
45954
45955 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
45956 atomic_read(&fscache_n_cop_alloc_object),
fe2de317
MT
45957diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
45958index b6cca47..ec782c3 100644
45959--- a/fs/fuse/cuse.c
45960+++ b/fs/fuse/cuse.c
15a11c5b 45961@@ -586,10 +586,12 @@ static int __init cuse_init(void)
ae4e228f
MT
45962 INIT_LIST_HEAD(&cuse_conntbl[i]);
45963
15a11c5b 45964 /* inherit and extend fuse_dev_operations */
ae4e228f
MT
45965- cuse_channel_fops = fuse_dev_operations;
45966- cuse_channel_fops.owner = THIS_MODULE;
45967- cuse_channel_fops.open = cuse_channel_open;
45968- cuse_channel_fops.release = cuse_channel_release;
15a11c5b
MT
45969+ pax_open_kernel();
45970+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
45971+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
45972+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
45973+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
45974+ pax_close_kernel();
45975
ae4e228f
MT
45976 cuse_class = class_create(THIS_MODULE, "cuse");
45977 if (IS_ERR(cuse_class))
fe2de317
MT
45978diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
45979index 5cb8614..6865b11 100644
45980--- a/fs/fuse/dev.c
45981+++ b/fs/fuse/dev.c
45982@@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
57199397
MT
45983 ret = 0;
45984 pipe_lock(pipe);
45985
45986- if (!pipe->readers) {
45987+ if (!atomic_read(&pipe->readers)) {
45988 send_sig(SIGPIPE, current, 0);
45989 if (!ret)
45990 ret = -EPIPE;
fe2de317
MT
45991diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
45992index 9f63e49..d8a64c0 100644
45993--- a/fs/fuse/dir.c
45994+++ b/fs/fuse/dir.c
45995@@ -1147,7 +1147,7 @@ static char *read_link(struct dentry *dentry)
58c5fc13
MT
45996 return link;
45997 }
45998
45999-static void free_link(char *link)
46000+static void free_link(const char *link)
46001 {
46002 if (!IS_ERR(link))
46003 free_page((unsigned long) link);
fe2de317
MT
46004diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
46005index 900cf98..3896726 100644
46006--- a/fs/gfs2/inode.c
46007+++ b/fs/gfs2/inode.c
6e9df6a3 46008@@ -1517,7 +1517,7 @@ out:
66a7e928
MT
46009
46010 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46011 {
46012- char *s = nd_get_link(nd);
46013+ const char *s = nd_get_link(nd);
46014 if (!IS_ERR(s))
46015 kfree(s);
46016 }
fe2de317
MT
46017diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
46018index 3ebc437..eb23952 100644
46019--- a/fs/hfs/btree.c
46020+++ b/fs/hfs/btree.c
46021@@ -46,11 +46,27 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
883a9837
MT
46022 case HFS_EXT_CNID:
46023 hfs_inode_read_fork(tree->inode, mdb->drXTExtRec, mdb->drXTFlSize,
46024 mdb->drXTFlSize, be32_to_cpu(mdb->drXTClpSiz));
46025+
46026+ if (HFS_I(tree->inode)->alloc_blocks >
46027+ HFS_I(tree->inode)->first_blocks) {
46028+ printk(KERN_ERR "hfs: invalid btree extent records\n");
46029+ unlock_new_inode(tree->inode);
46030+ goto free_inode;
46031+ }
46032+
46033 tree->inode->i_mapping->a_ops = &hfs_btree_aops;
46034 break;
46035 case HFS_CAT_CNID:
46036 hfs_inode_read_fork(tree->inode, mdb->drCTExtRec, mdb->drCTFlSize,
46037 mdb->drCTFlSize, be32_to_cpu(mdb->drCTClpSiz));
46038+
46039+ if (!HFS_I(tree->inode)->first_blocks) {
46040+ printk(KERN_ERR "hfs: invalid btree extent records "
46041+ "(0 size).\n");
46042+ unlock_new_inode(tree->inode);
46043+ goto free_inode;
46044+ }
46045+
46046 tree->inode->i_mapping->a_ops = &hfs_btree_aops;
46047 break;
46048 default:
fe2de317 46049@@ -59,11 +75,6 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
883a9837
MT
46050 }
46051 unlock_new_inode(tree->inode);
46052
46053- if (!HFS_I(tree->inode)->first_blocks) {
46054- printk(KERN_ERR "hfs: invalid btree extent records (0 size).\n");
46055- goto free_inode;
46056- }
46057-
46058 mapping = tree->inode->i_mapping;
46059 page = read_mapping_page(mapping, 0, NULL);
46060 if (IS_ERR(page))
fe2de317
MT
46061diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
46062index 4dfbfec..947c9c2 100644
46063--- a/fs/hfsplus/catalog.c
46064+++ b/fs/hfsplus/catalog.c
46065@@ -179,6 +179,8 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid,
66a7e928
MT
46066 int err;
46067 u16 type;
46068
46069+ pax_track_stack();
46070+
46071 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
46072 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
46073 if (err)
fe2de317 46074@@ -210,6 +212,8 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir,
66a7e928
MT
46075 int entry_size;
46076 int err;
46077
46078+ pax_track_stack();
46079+
46080 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n",
46081 str->name, cnid, inode->i_nlink);
6e9df6a3
MT
46082 err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
46083@@ -353,6 +357,8 @@ int hfsplus_rename_cat(u32 cnid,
66a7e928 46084 int entry_size, type;
6e9df6a3 46085 int err;
66a7e928
MT
46086
46087+ pax_track_stack();
46088+
46089 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
46090 cnid, src_dir->i_ino, src_name->name,
46091 dst_dir->i_ino, dst_name->name);
fe2de317
MT
46092diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
46093index 25b2443..09a3341 100644
46094--- a/fs/hfsplus/dir.c
46095+++ b/fs/hfsplus/dir.c
46096@@ -131,6 +131,8 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
66a7e928
MT
46097 struct hfsplus_readdir_data *rd;
46098 u16 type;
46099
46100+ pax_track_stack();
46101+
46102 if (filp->f_pos >= inode->i_size)
46103 return 0;
46104
fe2de317
MT
46105diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
46106index 4cc1e3a..ad0f70b 100644
46107--- a/fs/hfsplus/inode.c
46108+++ b/fs/hfsplus/inode.c
46109@@ -501,6 +501,8 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
66a7e928
MT
46110 int res = 0;
46111 u16 type;
46112
46113+ pax_track_stack();
46114+
46115 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
46116
46117 HFSPLUS_I(inode)->linkid = 0;
fe2de317 46118@@ -564,6 +566,8 @@ int hfsplus_cat_write_inode(struct inode *inode)
66a7e928
MT
46119 struct hfs_find_data fd;
46120 hfsplus_cat_entry entry;
46121
46122+ pax_track_stack();
46123+
46124 if (HFSPLUS_IS_RSRC(inode))
46125 main_inode = HFSPLUS_I(inode)->rsrc_inode;
46126
fe2de317
MT
46127diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
46128index fbaa669..c548cd0 100644
46129--- a/fs/hfsplus/ioctl.c
46130+++ b/fs/hfsplus/ioctl.c
46131@@ -122,6 +122,8 @@ int hfsplus_setxattr(struct dentry *dentry, const char *name,
66a7e928
MT
46132 struct hfsplus_cat_file *file;
46133 int res;
46134
46135+ pax_track_stack();
46136+
46137 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
46138 return -EOPNOTSUPP;
46139
fe2de317 46140@@ -166,6 +168,8 @@ ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
66a7e928
MT
46141 struct hfsplus_cat_file *file;
46142 ssize_t res = 0;
46143
46144+ pax_track_stack();
46145+
46146 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
46147 return -EOPNOTSUPP;
46148
fe2de317
MT
46149diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
46150index d24a9b6..dd9b3dd 100644
46151--- a/fs/hfsplus/super.c
46152+++ b/fs/hfsplus/super.c
46153@@ -347,6 +347,8 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
6e9df6a3 46154 u64 last_fs_block, last_fs_page;
66a7e928
MT
46155 int err;
46156
46157+ pax_track_stack();
46158+
46159 err = -EINVAL;
46160 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
46161 if (!sbi)
fe2de317
MT
46162diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
46163index ec88953..cb5e98e 100644
46164--- a/fs/hugetlbfs/inode.c
46165+++ b/fs/hugetlbfs/inode.c
46166@@ -915,7 +915,7 @@ static struct file_system_type hugetlbfs_fs_type = {
df50ba0c
MT
46167 .kill_sb = kill_litter_super,
46168 };
46169
46170-static struct vfsmount *hugetlbfs_vfsmount;
46171+struct vfsmount *hugetlbfs_vfsmount;
46172
46173 static int can_do_hugetlb_shm(void)
46174 {
fe2de317
MT
46175diff --git a/fs/inode.c b/fs/inode.c
46176index ec79246..054c36a 100644
46177--- a/fs/inode.c
46178+++ b/fs/inode.c
6e9df6a3 46179@@ -787,8 +787,8 @@ unsigned int get_next_ino(void)
58c5fc13 46180
71d190be
MT
46181 #ifdef CONFIG_SMP
46182 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
46183- static atomic_t shared_last_ino;
46184- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
46185+ static atomic_unchecked_t shared_last_ino;
46186+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
58c5fc13 46187
71d190be
MT
46188 res = next - LAST_INO_BATCH;
46189 }
fe2de317
MT
46190diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
46191index f94fc48..3bb8d30 100644
46192--- a/fs/jbd/checkpoint.c
46193+++ b/fs/jbd/checkpoint.c
46194@@ -358,6 +358,8 @@ int log_do_checkpoint(journal_t *journal)
66a7e928
MT
46195 tid_t this_tid;
46196 int result;
46197
46198+ pax_track_stack();
46199+
46200 jbd_debug(1, "Start checkpoint\n");
46201
46202 /*
fe2de317
MT
46203diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c
46204index 16a5047..88ff6ca 100644
46205--- a/fs/jffs2/compr_rtime.c
46206+++ b/fs/jffs2/compr_rtime.c
46207@@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned char *data_in,
66a7e928
MT
46208 int outpos = 0;
46209 int pos=0;
46210
46211+ pax_track_stack();
46212+
46213 memset(positions,0,sizeof(positions));
46214
46215 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
fe2de317 46216@@ -78,6 +80,8 @@ static int jffs2_rtime_decompress(unsigned char *data_in,
66a7e928
MT
46217 int outpos = 0;
46218 int pos=0;
46219
46220+ pax_track_stack();
46221+
46222 memset(positions,0,sizeof(positions));
46223
46224 while (outpos<destlen) {
fe2de317
MT
46225diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c
46226index 9e7cec8..4713089 100644
46227--- a/fs/jffs2/compr_rubin.c
46228+++ b/fs/jffs2/compr_rubin.c
46229@@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsigned char *data_in,
66a7e928
MT
46230 int ret;
46231 uint32_t mysrclen, mydstlen;
46232
46233+ pax_track_stack();
46234+
46235 mysrclen = *sourcelen;
46236 mydstlen = *dstlen - 8;
46237
fe2de317
MT
46238diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
46239index e513f19..2ab1351 100644
46240--- a/fs/jffs2/erase.c
46241+++ b/fs/jffs2/erase.c
46242@@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
58c5fc13
MT
46243 struct jffs2_unknown_node marker = {
46244 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
46245 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
46246- .totlen = cpu_to_je32(c->cleanmarker_size)
46247+ .totlen = cpu_to_je32(c->cleanmarker_size),
46248+ .hdr_crc = cpu_to_je32(0)
46249 };
46250
46251 jffs2_prealloc_raw_node_refs(c, jeb, 1);
fe2de317
MT
46252diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
46253index 4515bea..178f2d6 100644
46254--- a/fs/jffs2/wbuf.c
46255+++ b/fs/jffs2/wbuf.c
46256@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
58c5fc13
MT
46257 {
46258 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
46259 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
46260- .totlen = constant_cpu_to_je32(8)
46261+ .totlen = constant_cpu_to_je32(8),
46262+ .hdr_crc = constant_cpu_to_je32(0)
46263 };
46264
46265 /*
fe2de317
MT
46266diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
46267index 3e93cdd..c8a80e1 100644
46268--- a/fs/jffs2/xattr.c
46269+++ b/fs/jffs2/xattr.c
46270@@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c)
66a7e928
MT
46271
46272 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
46273
46274+ pax_track_stack();
46275+
46276 /* Phase.1 : Merge same xref */
46277 for (i=0; i < XREF_TMPHASH_SIZE; i++)
46278 xref_tmphash[i] = NULL;
fe2de317
MT
46279diff --git a/fs/jfs/super.c b/fs/jfs/super.c
46280index 06c8a67..589dbbd 100644
46281--- a/fs/jfs/super.c
46282+++ b/fs/jfs/super.c
15a11c5b
MT
46283@@ -803,7 +803,7 @@ static int __init init_jfs_fs(void)
46284
46285 jfs_inode_cachep =
46286 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
46287- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
46288+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
46289 init_once);
46290 if (jfs_inode_cachep == NULL)
46291 return -ENOMEM;
fe2de317
MT
46292diff --git a/fs/libfs.c b/fs/libfs.c
46293index c18e9a1..0b04e2c 100644
46294--- a/fs/libfs.c
46295+++ b/fs/libfs.c
46296@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
66a7e928
MT
46297
46298 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
46299 struct dentry *next;
46300+ char d_name[sizeof(next->d_iname)];
46301+ const unsigned char *name;
46302+
46303 next = list_entry(p, struct dentry, d_u.d_child);
46304 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
46305 if (!simple_positive(next)) {
fe2de317 46306@@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
71d190be
MT
46307
46308 spin_unlock(&next->d_lock);
46309 spin_unlock(&dentry->d_lock);
46310- if (filldir(dirent, next->d_name.name,
66a7e928
MT
46311+ name = next->d_name.name;
46312+ if (name == next->d_iname) {
46313+ memcpy(d_name, name, next->d_name.len);
71d190be 46314+ name = d_name;
66a7e928 46315+ }
71d190be
MT
46316+ if (filldir(dirent, name,
46317 next->d_name.len, filp->f_pos,
46318 next->d_inode->i_ino,
46319 dt_type(next->d_inode)) < 0)
fe2de317
MT
46320diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
46321index 8392cb8..ae8ed40 100644
46322--- a/fs/lockd/clntproc.c
46323+++ b/fs/lockd/clntproc.c
46324@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
8308f9c9
MT
46325 /*
46326 * Cookie counter for NLM requests
46327 */
46328-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
46329+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
46330
46331 void nlmclnt_next_cookie(struct nlm_cookie *c)
46332 {
46333- u32 cookie = atomic_inc_return(&nlm_cookie);
46334+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
46335
46336 memcpy(c->data, &cookie, 4);
46337 c->len=4;
fe2de317 46338@@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl)
66a7e928
MT
46339 struct nlm_rqst reqst, *req;
46340 int status;
46341
46342+ pax_track_stack();
46343+
46344 req = &reqst;
46345 memset(req, 0, sizeof(*req));
46346 locks_init_lock(&req->a_args.lock.fl);
fe2de317
MT
46347diff --git a/fs/locks.c b/fs/locks.c
46348index 703f545..150a552 100644
46349--- a/fs/locks.c
46350+++ b/fs/locks.c
46351@@ -2022,16 +2022,16 @@ void locks_remove_flock(struct file *filp)
58c5fc13
MT
46352 return;
46353
46354 if (filp->f_op && filp->f_op->flock) {
46355- struct file_lock fl = {
46356+ struct file_lock flock = {
46357 .fl_pid = current->tgid,
46358 .fl_file = filp,
46359 .fl_flags = FL_FLOCK,
46360 .fl_type = F_UNLCK,
46361 .fl_end = OFFSET_MAX,
46362 };
46363- filp->f_op->flock(filp, F_SETLKW, &fl);
46364- if (fl.fl_ops && fl.fl_ops->fl_release_private)
46365- fl.fl_ops->fl_release_private(&fl);
46366+ filp->f_op->flock(filp, F_SETLKW, &flock);
46367+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
46368+ flock.fl_ops->fl_release_private(&flock);
46369 }
46370
bc901d79 46371 lock_flocks();
fe2de317
MT
46372diff --git a/fs/logfs/super.c b/fs/logfs/super.c
46373index ce03a18..ac8c14f 100644
46374--- a/fs/logfs/super.c
46375+++ b/fs/logfs/super.c
46376@@ -266,6 +266,8 @@ static int logfs_recover_sb(struct super_block *sb)
66a7e928
MT
46377 struct logfs_disk_super _ds1, *ds1 = &_ds1;
46378 int err, valid0, valid1;
46379
46380+ pax_track_stack();
46381+
46382 /* read first superblock */
46383 err = wbuf_read(sb, super->s_sb_ofs[0], sizeof(*ds0), ds0);
46384 if (err)
fe2de317
MT
46385diff --git a/fs/minix/bitmap.c b/fs/minix/bitmap.c
46386index 3f32bcb..7c82c29 100644
46387--- a/fs/minix/bitmap.c
46388+++ b/fs/minix/bitmap.c
46389@@ -20,10 +20,11 @@ static const int nibblemap[] = { 4,3,3,2,3,2,2,1,3,2,2,1,2,1,1,0 };
bc901d79 46390
fe2de317
MT
46391 static DEFINE_SPINLOCK(bitmap_lock);
46392
46393-static unsigned long count_free(struct buffer_head *map[], unsigned numblocks, __u32 numbits)
46394+static unsigned long count_free(struct buffer_head *map[], unsigned blocksize, __u32 numbits)
46395 {
46396 unsigned i, j, sum = 0;
46397 struct buffer_head *bh;
46398+ unsigned numblocks = minix_blocks_needed(numbits, blocksize);
46399
46400 for (i=0; i<numblocks-1; i++) {
46401 if (!(bh=map[i]))
46402@@ -105,10 +106,12 @@ int minix_new_block(struct inode * inode)
46403 return 0;
46404 }
46405
46406-unsigned long minix_count_free_blocks(struct minix_sb_info *sbi)
46407+unsigned long minix_count_free_blocks(struct super_block *sb)
46408 {
46409- return (count_free(sbi->s_zmap, sbi->s_zmap_blocks,
46410- sbi->s_nzones - sbi->s_firstdatazone + 1)
46411+ struct minix_sb_info *sbi = minix_sb(sb);
46412+ u32 bits = sbi->s_nzones - (sbi->s_firstdatazone + 1);
46413+
46414+ return (count_free(sbi->s_zmap, sb->s_blocksize, bits)
46415 << sbi->s_log_zone_size);
46416 }
46417
46418@@ -273,7 +276,10 @@ struct inode *minix_new_inode(const struct inode *dir, int mode, int *error)
46419 return inode;
46420 }
46421
46422-unsigned long minix_count_free_inodes(struct minix_sb_info *sbi)
46423+unsigned long minix_count_free_inodes(struct super_block *sb)
46424 {
46425- return count_free(sbi->s_imap, sbi->s_imap_blocks, sbi->s_ninodes + 1);
46426+ struct minix_sb_info *sbi = minix_sb(sb);
46427+ u32 bits = sbi->s_ninodes + 1;
46428+
46429+ return count_free(sbi->s_imap, sb->s_blocksize, bits);
46430 }
46431diff --git a/fs/minix/inode.c b/fs/minix/inode.c
46432index e7d23e2..1ed1351 100644
46433--- a/fs/minix/inode.c
46434+++ b/fs/minix/inode.c
46435@@ -279,6 +279,27 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
46436 else if (sbi->s_mount_state & MINIX_ERROR_FS)
46437 printk("MINIX-fs: mounting file system with errors, "
46438 "running fsck is recommended\n");
46439+
46440+ /* Apparently minix can create filesystems that allocate more blocks for
46441+ * the bitmaps than needed. We simply ignore that, but verify it didn't
46442+ * create one with not enough blocks and bail out if so.
46443+ */
46444+ block = minix_blocks_needed(sbi->s_ninodes, s->s_blocksize);
46445+ if (sbi->s_imap_blocks < block) {
46446+ printk("MINIX-fs: file system does not have enough "
46447+ "imap blocks allocated. Refusing to mount\n");
46448+ goto out_iput;
46449+ }
46450+
46451+ block = minix_blocks_needed(
46452+ (sbi->s_nzones - (sbi->s_firstdatazone + 1)),
46453+ s->s_blocksize);
46454+ if (sbi->s_zmap_blocks < block) {
46455+ printk("MINIX-fs: file system does not have enough "
46456+ "zmap blocks allocated. Refusing to mount.\n");
46457+ goto out_iput;
46458+ }
46459+
46460 return 0;
46461
46462 out_iput:
46463@@ -339,10 +360,10 @@ static int minix_statfs(struct dentry *dentry, struct kstatfs *buf)
46464 buf->f_type = sb->s_magic;
46465 buf->f_bsize = sb->s_blocksize;
46466 buf->f_blocks = (sbi->s_nzones - sbi->s_firstdatazone) << sbi->s_log_zone_size;
46467- buf->f_bfree = minix_count_free_blocks(sbi);
46468+ buf->f_bfree = minix_count_free_blocks(sb);
46469 buf->f_bavail = buf->f_bfree;
46470 buf->f_files = sbi->s_ninodes;
46471- buf->f_ffree = minix_count_free_inodes(sbi);
46472+ buf->f_ffree = minix_count_free_inodes(sb);
46473 buf->f_namelen = sbi->s_namelen;
46474 buf->f_fsid.val[0] = (u32)id;
46475 buf->f_fsid.val[1] = (u32)(id >> 32);
46476diff --git a/fs/minix/minix.h b/fs/minix/minix.h
46477index 341e212..6415fe0 100644
46478--- a/fs/minix/minix.h
46479+++ b/fs/minix/minix.h
46480@@ -48,10 +48,10 @@ extern struct minix_inode * minix_V1_raw_inode(struct super_block *, ino_t, stru
46481 extern struct minix2_inode * minix_V2_raw_inode(struct super_block *, ino_t, struct buffer_head **);
46482 extern struct inode * minix_new_inode(const struct inode *, int, int *);
46483 extern void minix_free_inode(struct inode * inode);
46484-extern unsigned long minix_count_free_inodes(struct minix_sb_info *sbi);
46485+extern unsigned long minix_count_free_inodes(struct super_block *sb);
46486 extern int minix_new_block(struct inode * inode);
46487 extern void minix_free_block(struct inode *inode, unsigned long block);
46488-extern unsigned long minix_count_free_blocks(struct minix_sb_info *sbi);
46489+extern unsigned long minix_count_free_blocks(struct super_block *sb);
46490 extern int minix_getattr(struct vfsmount *, struct dentry *, struct kstat *);
46491 extern int minix_prepare_chunk(struct page *page, loff_t pos, unsigned len);
46492
46493@@ -88,6 +88,11 @@ static inline struct minix_inode_info *minix_i(struct inode *inode)
46494 return list_entry(inode, struct minix_inode_info, vfs_inode);
46495 }
46496
46497+static inline unsigned minix_blocks_needed(unsigned bits, unsigned blocksize)
46498+{
46499+ return DIV_ROUND_UP(bits, blocksize * 8);
46500+}
46501+
46502 #if defined(CONFIG_MINIX_FS_NATIVE_ENDIAN) && \
46503 defined(CONFIG_MINIX_FS_BIG_ENDIAN_16BIT_INDEXED)
46504
46505diff --git a/fs/namei.c b/fs/namei.c
46506index 3d15072..c1ddf9c 100644
46507--- a/fs/namei.c
46508+++ b/fs/namei.c
46509@@ -281,16 +281,32 @@ int generic_permission(struct inode *inode, int mask)
46510 if (ret != -EACCES)
46511 return ret;
46512
46513+#ifdef CONFIG_GRKERNSEC
46514+ /* we'll block if we have to log due to a denied capability use */
46515+ if (mask & MAY_NOT_BLOCK)
46516+ return -ECHILD;
46517+#endif
46518+
6e9df6a3
MT
46519 if (S_ISDIR(inode->i_mode)) {
46520 /* DACs are overridable for directories */
46521- if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46522- return 0;
46523 if (!(mask & MAY_WRITE))
fe2de317
MT
46524- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46525+ if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
46526+ ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
6e9df6a3
MT
46527 return 0;
46528+ if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46529+ return 0;
46530 return -EACCES;
46531 }
bc901d79 46532 /*
66a7e928 46533+ * Searching includes executable on directories, else just read.
6e9df6a3 46534+ */
66a7e928 46535+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
6e9df6a3 46536+ if (mask == MAY_READ)
fe2de317
MT
46537+ if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
46538+ ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
6e9df6a3
MT
46539+ return 0;
46540+
46541+ /*
46542 * Read/write DACs are always overridable.
46543 * Executable DACs are overridable when there is
46544 * at least one exec bit set.
fe2de317 46545@@ -299,14 +315,6 @@ int generic_permission(struct inode *inode, int mask)
6e9df6a3 46546 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
bc901d79
MT
46547 return 0;
46548
6e9df6a3 46549- /*
66a7e928 46550- * Searching includes executable on directories, else just read.
6e9df6a3 46551- */
66a7e928 46552- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
6e9df6a3 46553- if (mask == MAY_READ)
66a7e928 46554- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
6e9df6a3
MT
46555- return 0;
46556-
bc901d79
MT
46557 return -EACCES;
46558 }
bc901d79 46559
fe2de317 46560@@ -653,11 +661,19 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
66a7e928
MT
46561 return error;
46562 }
46563
46564+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
46565+ dentry->d_inode, dentry, nd->path.mnt)) {
46566+ error = -EACCES;
46567+ *p = ERR_PTR(error); /* no ->put_link(), please */
46568+ path_put(&nd->path);
46569+ return error;
46570+ }
46571+
46572 nd->last_type = LAST_BIND;
df50ba0c
MT
46573 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
46574 error = PTR_ERR(*p);
46575 if (!IS_ERR(*p)) {
58c5fc13
MT
46576- char *s = nd_get_link(nd);
46577+ const char *s = nd_get_link(nd);
46578 error = 0;
46579 if (s)
46580 error = __vfs_follow_link(nd, s);
fe2de317 46581@@ -1622,6 +1638,21 @@ static int path_lookupat(int dfd, const char *name,
6e9df6a3
MT
46582 if (!err)
46583 err = complete_walk(nd);
46584
fe2de317
MT
46585+ if (!(nd->flags & LOOKUP_PARENT)) {
46586+#ifdef CONFIG_GRKERNSEC
46587+ if (flags & LOOKUP_RCU) {
46588+ if (!err)
46589+ path_put(&nd->path);
46590+ err = -ECHILD;
46591+ } else
46592+#endif
46593+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46594+ if (!err)
46595+ path_put(&nd->path);
46596+ err = -ENOENT;
46597+ }
6e9df6a3
MT
46598+ }
46599+
46600 if (!err && nd->flags & LOOKUP_DIRECTORY) {
46601 if (!nd->inode->i_op->lookup) {
46602 path_put(&nd->path);
fe2de317 46603@@ -1649,6 +1680,15 @@ static int do_path_lookup(int dfd, const char *name,
66a7e928 46604 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
16454cff
MT
46605
46606 if (likely(!retval)) {
fe2de317
MT
46607+ if (*name != '/' && nd->path.dentry && nd->inode) {
46608+#ifdef CONFIG_GRKERNSEC
46609+ if (flags & LOOKUP_RCU)
46610+ return -ECHILD;
46611+#endif
46612+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
46613+ return -ENOENT;
46614+ }
16454cff
MT
46615+
46616 if (unlikely(!audit_dummy_context())) {
46617 if (nd->path.dentry && nd->inode)
46618 audit_inode(name, nd->path.dentry);
fe2de317 46619@@ -2049,7 +2089,27 @@ static int may_open(struct path *path, int acc_mode, int flag)
bc901d79
MT
46620 /*
46621 * Ensure there are no outstanding leases on the file.
46622 */
46623- return break_lease(inode, flag);
46624+ error = break_lease(inode, flag);
16454cff 46625+
bc901d79
MT
46626+ if (error)
46627+ return error;
46628+
46629+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
46630+ error = -EPERM;
46631+ goto exit;
46632+ }
46633+
46634+ if (gr_handle_rawio(inode)) {
46635+ error = -EPERM;
46636+ goto exit;
46637+ }
46638+
6e9df6a3 46639+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode)) {
bc901d79
MT
46640+ error = -EACCES;
46641+ goto exit;
46642+ }
46643+exit:
46644+ return error;
46645 }
46646
16454cff 46647 static int handle_truncate(struct file *filp)
fe2de317 46648@@ -2110,6 +2170,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
6e9df6a3
MT
46649 error = complete_walk(nd);
46650 if (error)
46651 return ERR_PTR(error);
fe2de317
MT
46652+#ifdef CONFIG_GRKERNSEC
46653+ if (nd->flags & LOOKUP_RCU) {
46654+ error = -ECHILD;
46655+ goto exit;
46656+ }
46657+#endif
6e9df6a3
MT
46658+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46659+ error = -ENOENT;
46660+ goto exit;
46661+ }
46662 audit_inode(pathname, nd->path.dentry);
46663 if (open_flag & O_CREAT) {
46664 error = -EISDIR;
fe2de317 46665@@ -2120,6 +2190,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
6e9df6a3
MT
46666 error = complete_walk(nd);
46667 if (error)
46668 return ERR_PTR(error);
fe2de317
MT
46669+#ifdef CONFIG_GRKERNSEC
46670+ if (nd->flags & LOOKUP_RCU) {
46671+ error = -ECHILD;
46672+ goto exit;
46673+ }
46674+#endif
6e9df6a3
MT
46675+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
46676+ error = -ENOENT;
46677+ goto exit;
46678+ }
46679 audit_inode(pathname, dir);
46680 goto ok;
46681 }
fe2de317
MT
46682@@ -2141,6 +2221,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46683 error = complete_walk(nd);
6e9df6a3
MT
46684 if (error)
46685 return ERR_PTR(-ECHILD);
fe2de317
MT
46686+#ifdef CONFIG_GRKERNSEC
46687+ if (nd->flags & LOOKUP_RCU) {
46688+ error = -ECHILD;
46689+ goto exit;
46690+ }
46691+#endif
6e9df6a3
MT
46692+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46693+ error = -ENOENT;
46694+ goto exit;
46695+ }
fe2de317 46696
6e9df6a3
MT
46697 error = -ENOTDIR;
46698 if (nd->flags & LOOKUP_DIRECTORY) {
fe2de317 46699@@ -2181,6 +2271,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
66a7e928
MT
46700 /* Negative dentry, just create the file */
46701 if (!dentry->d_inode) {
46702 int mode = op->mode;
46703+
6e9df6a3 46704+ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
66a7e928
MT
46705+ error = -EACCES;
46706+ goto exit_mutex_unlock;
46707+ }
46708+
46709 if (!IS_POSIXACL(dir->d_inode))
46710 mode &= ~current_umask();
46711 /*
fe2de317 46712@@ -2204,6 +2300,8 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
66a7e928
MT
46713 error = vfs_create(dir->d_inode, dentry, mode, nd);
46714 if (error)
46715 goto exit_mutex_unlock;
46716+ else
46717+ gr_handle_create(path->dentry, path->mnt);
46718 mutex_unlock(&dir->d_inode->i_mutex);
46719 dput(nd->path.dentry);
46720 nd->path.dentry = dentry;
fe2de317 46721@@ -2213,6 +2311,19 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
58c5fc13
MT
46722 /*
46723 * It already exists.
46724 */
46725+
6e9df6a3
MT
46726+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
46727+ error = -ENOENT;
46728+ goto exit_mutex_unlock;
46729+ }
46730+
bc901d79
MT
46731+ /* only check if O_CREAT is specified, all other checks need to go
46732+ into may_open */
6e9df6a3 46733+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
58c5fc13
MT
46734+ error = -EACCES;
46735+ goto exit_mutex_unlock;
46736+ }
46737+
46738 mutex_unlock(&dir->d_inode->i_mutex);
df50ba0c 46739 audit_inode(pathname, path->dentry);
58c5fc13 46740
fe2de317 46741@@ -2425,6 +2536,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
6e9df6a3
MT
46742 *path = nd.path;
46743 return dentry;
46744 eexist:
46745+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
46746+ dput(dentry);
46747+ dentry = ERR_PTR(-ENOENT);
46748+ goto fail;
46749+ }
46750 dput(dentry);
46751 dentry = ERR_PTR(-EEXIST);
46752 fail:
fe2de317 46753@@ -2447,6 +2563,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
6e9df6a3
MT
46754 }
46755 EXPORT_SYMBOL(user_path_create);
46756
46757+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
46758+{
46759+ char *tmp = getname(pathname);
46760+ struct dentry *res;
46761+ if (IS_ERR(tmp))
46762+ return ERR_CAST(tmp);
46763+ res = kern_path_create(dfd, tmp, path, is_dir);
46764+ if (IS_ERR(res))
46765+ putname(tmp);
46766+ else
46767+ *to = tmp;
46768+ return res;
46769+}
46770+
46771 int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
46772 {
46773 int error = may_create(dir, dentry);
fe2de317 46774@@ -2514,6 +2644,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
6e9df6a3 46775 error = mnt_want_write(path.mnt);
58c5fc13
MT
46776 if (error)
46777 goto out_dput;
46778+
6e9df6a3 46779+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
58c5fc13 46780+ error = -EPERM;
6e9df6a3 46781+ goto out_drop_write;
58c5fc13
MT
46782+ }
46783+
6e9df6a3 46784+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
58c5fc13 46785+ error = -EACCES;
6e9df6a3 46786+ goto out_drop_write;
58c5fc13
MT
46787+ }
46788+
6e9df6a3 46789 error = security_path_mknod(&path, dentry, mode, dev);
58c5fc13 46790 if (error)
6e9df6a3 46791 goto out_drop_write;
fe2de317 46792@@ -2531,6 +2672,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
58c5fc13
MT
46793 }
46794 out_drop_write:
6e9df6a3 46795 mnt_drop_write(path.mnt);
58c5fc13
MT
46796+
46797+ if (!error)
6e9df6a3 46798+ gr_handle_create(dentry, path.mnt);
58c5fc13
MT
46799 out_dput:
46800 dput(dentry);
6e9df6a3 46801 mutex_unlock(&path.dentry->d_inode->i_mutex);
fe2de317 46802@@ -2580,12 +2724,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
6e9df6a3
MT
46803 error = mnt_want_write(path.mnt);
46804 if (error)
46805 goto out_dput;
46806+
46807+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
58c5fc13 46808+ error = -EACCES;
6e9df6a3 46809+ goto out_drop_write;
58c5fc13
MT
46810+ }
46811+
6e9df6a3
MT
46812 error = security_path_mkdir(&path, dentry, mode);
46813 if (error)
46814 goto out_drop_write;
46815 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
58c5fc13 46816 out_drop_write:
6e9df6a3 46817 mnt_drop_write(path.mnt);
58c5fc13
MT
46818+
46819+ if (!error)
6e9df6a3 46820+ gr_handle_create(dentry, path.mnt);
58c5fc13
MT
46821 out_dput:
46822 dput(dentry);
6e9df6a3 46823 mutex_unlock(&path.dentry->d_inode->i_mutex);
fe2de317 46824@@ -2665,6 +2818,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
58c5fc13
MT
46825 char * name;
46826 struct dentry *dentry;
46827 struct nameidata nd;
46828+ ino_t saved_ino = 0;
46829+ dev_t saved_dev = 0;
46830
46831 error = user_path_parent(dfd, pathname, &nd, &name);
46832 if (error)
fe2de317 46833@@ -2693,6 +2848,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
15a11c5b
MT
46834 error = -ENOENT;
46835 goto exit3;
46836 }
58c5fc13 46837+
6e9df6a3
MT
46838+ saved_ino = dentry->d_inode->i_ino;
46839+ saved_dev = gr_get_dev_from_dentry(dentry);
58c5fc13 46840+
15a11c5b
MT
46841+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
46842+ error = -EACCES;
46843+ goto exit3;
58c5fc13
MT
46844+ }
46845+
46846 error = mnt_want_write(nd.path.mnt);
46847 if (error)
46848 goto exit3;
fe2de317 46849@@ -2700,6 +2864,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
58c5fc13
MT
46850 if (error)
46851 goto exit4;
46852 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
46853+ if (!error && (saved_dev || saved_ino))
46854+ gr_handle_delete(saved_ino, saved_dev);
46855 exit4:
46856 mnt_drop_write(nd.path.mnt);
46857 exit3:
fe2de317 46858@@ -2762,6 +2928,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
58c5fc13
MT
46859 struct dentry *dentry;
46860 struct nameidata nd;
46861 struct inode *inode = NULL;
46862+ ino_t saved_ino = 0;
46863+ dev_t saved_dev = 0;
46864
46865 error = user_path_parent(dfd, pathname, &nd, &name);
46866 if (error)
fe2de317 46867@@ -2784,6 +2952,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
15a11c5b 46868 if (!inode)
58c5fc13 46869 goto slashes;
15a11c5b
MT
46870 ihold(inode);
46871+
46872+ if (inode->i_nlink <= 1) {
46873+ saved_ino = inode->i_ino;
46874+ saved_dev = gr_get_dev_from_dentry(dentry);
46875+ }
46876+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
46877+ error = -EACCES;
46878+ goto exit2;
58c5fc13 46879+ }
15a11c5b 46880+
58c5fc13
MT
46881 error = mnt_want_write(nd.path.mnt);
46882 if (error)
46883 goto exit2;
fe2de317 46884@@ -2791,6 +2969,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
58c5fc13
MT
46885 if (error)
46886 goto exit3;
46887 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
46888+ if (!error && (saved_ino || saved_dev))
46889+ gr_handle_delete(saved_ino, saved_dev);
46890 exit3:
46891 mnt_drop_write(nd.path.mnt);
46892 exit2:
fe2de317 46893@@ -2866,10 +3046,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
6e9df6a3
MT
46894 error = mnt_want_write(path.mnt);
46895 if (error)
46896 goto out_dput;
46897+
46898+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
58c5fc13 46899+ error = -EACCES;
6e9df6a3 46900+ goto out_drop_write;
58c5fc13
MT
46901+ }
46902+
6e9df6a3 46903 error = security_path_symlink(&path, dentry, from);
58c5fc13
MT
46904 if (error)
46905 goto out_drop_write;
6e9df6a3 46906 error = vfs_symlink(path.dentry->d_inode, dentry, from);
58c5fc13 46907+ if (!error)
6e9df6a3 46908+ gr_handle_create(dentry, path.mnt);
58c5fc13 46909 out_drop_write:
6e9df6a3 46910 mnt_drop_write(path.mnt);
58c5fc13 46911 out_dput:
fe2de317 46912@@ -2941,6 +3129,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
6e9df6a3
MT
46913 {
46914 struct dentry *new_dentry;
46915 struct path old_path, new_path;
fe2de317 46916+ char *to = NULL;
6e9df6a3
MT
46917 int how = 0;
46918 int error;
46919
fe2de317 46920@@ -2964,7 +3153,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
6e9df6a3
MT
46921 if (error)
46922 return error;
46923
46924- new_dentry = user_path_create(newdfd, newname, &new_path, 0);
46925+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
58c5fc13
MT
46926 error = PTR_ERR(new_dentry);
46927 if (IS_ERR(new_dentry))
6e9df6a3 46928 goto out;
fe2de317 46929@@ -2975,13 +3164,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
6e9df6a3
MT
46930 error = mnt_want_write(new_path.mnt);
46931 if (error)
46932 goto out_dput;
58c5fc13
MT
46933+
46934+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
46935+ old_path.dentry->d_inode,
46936+ old_path.dentry->d_inode->i_mode, to)) {
46937+ error = -EACCES;
6e9df6a3 46938+ goto out_drop_write;
58c5fc13
MT
46939+ }
46940+
6e9df6a3 46941+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
58c5fc13
MT
46942+ old_path.dentry, old_path.mnt, to)) {
46943+ error = -EACCES;
6e9df6a3 46944+ goto out_drop_write;
58c5fc13
MT
46945+ }
46946+
6e9df6a3 46947 error = security_path_link(old_path.dentry, &new_path, new_dentry);
58c5fc13
MT
46948 if (error)
46949 goto out_drop_write;
6e9df6a3 46950 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
58c5fc13 46951+ if (!error)
6e9df6a3 46952+ gr_handle_create(new_dentry, new_path.mnt);
58c5fc13 46953 out_drop_write:
6e9df6a3 46954 mnt_drop_write(new_path.mnt);
58c5fc13 46955 out_dput:
6e9df6a3
MT
46956+ putname(to);
46957 dput(new_dentry);
46958 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
46959 path_put(&new_path);
fe2de317 46960@@ -3153,6 +3359,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
66a7e928
MT
46961 char *to;
46962 int error;
46963
46964+ pax_track_stack();
46965+
46966 error = user_path_parent(olddfd, oldname, &oldnd, &from);
46967 if (error)
46968 goto exit;
fe2de317 46969@@ -3209,6 +3417,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
58c5fc13
MT
46970 if (new_dentry == trap)
46971 goto exit5;
46972
46973+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
46974+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
46975+ to);
46976+ if (error)
46977+ goto exit5;
46978+
46979 error = mnt_want_write(oldnd.path.mnt);
46980 if (error)
46981 goto exit5;
fe2de317 46982@@ -3218,6 +3432,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
58c5fc13
MT
46983 goto exit6;
46984 error = vfs_rename(old_dir->d_inode, old_dentry,
46985 new_dir->d_inode, new_dentry);
46986+ if (!error)
46987+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
46988+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
46989 exit6:
46990 mnt_drop_write(oldnd.path.mnt);
46991 exit5:
fe2de317 46992@@ -3243,6 +3460,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
71d190be
MT
46993
46994 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
46995 {
46996+ char tmpbuf[64];
46997+ const char *newlink;
46998 int len;
46999
47000 len = PTR_ERR(link);
fe2de317 47001@@ -3252,7 +3471,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
71d190be
MT
47002 len = strlen(link);
47003 if (len > (unsigned) buflen)
47004 len = buflen;
47005- if (copy_to_user(buffer, link, len))
47006+
47007+ if (len < sizeof(tmpbuf)) {
47008+ memcpy(tmpbuf, link, len);
47009+ newlink = tmpbuf;
47010+ } else
47011+ newlink = link;
47012+
47013+ if (copy_to_user(buffer, newlink, len))
47014 len = -EFAULT;
47015 out:
47016 return len;
fe2de317
MT
47017diff --git a/fs/namespace.c b/fs/namespace.c
47018index e5e1c7d..019609e 100644
47019--- a/fs/namespace.c
47020+++ b/fs/namespace.c
47021@@ -1329,6 +1329,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
58c5fc13
MT
47022 if (!(sb->s_flags & MS_RDONLY))
47023 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
47024 up_write(&sb->s_umount);
47025+
47026+ gr_log_remount(mnt->mnt_devname, retval);
47027+
47028 return retval;
47029 }
47030
fe2de317 47031@@ -1348,6 +1351,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
6892158b 47032 br_write_unlock(vfsmount_lock);
58c5fc13
MT
47033 up_write(&namespace_sem);
47034 release_mounts(&umount_list);
47035+
47036+ gr_log_unmount(mnt->mnt_devname, retval);
47037+
47038 return retval;
47039 }
47040
fe2de317 47041@@ -2339,6 +2345,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
ae4e228f
MT
47042 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
47043 MS_STRICTATIME);
58c5fc13 47044
ae4e228f
MT
47045+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
47046+ retval = -EPERM;
47047+ goto dput_out;
47048+ }
47049+
58c5fc13
MT
47050+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
47051+ retval = -EPERM;
47052+ goto dput_out;
47053+ }
47054+
47055 if (flags & MS_REMOUNT)
47056 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
47057 data_page);
fe2de317 47058@@ -2353,6 +2369,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
58c5fc13
MT
47059 dev_name, data_page);
47060 dput_out:
47061 path_put(&path);
47062+
47063+ gr_log_mount(dev_name, dir_name, retval);
47064+
47065 return retval;
47066 }
47067
fe2de317 47068@@ -2576,6 +2595,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
66a7e928
MT
47069 if (error)
47070 goto out2;
58c5fc13
MT
47071
47072+ if (gr_handle_chroot_pivot()) {
47073+ error = -EPERM;
66a7e928 47074+ goto out2;
58c5fc13
MT
47075+ }
47076+
6892158b 47077 get_fs_root(current->fs, &root);
66a7e928
MT
47078 error = lock_mount(&old);
47079 if (error)
fe2de317
MT
47080diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
47081index 9c51f62..503b252 100644
47082--- a/fs/ncpfs/dir.c
47083+++ b/fs/ncpfs/dir.c
47084@@ -299,6 +299,8 @@ ncp_lookup_validate(struct dentry *dentry, struct nameidata *nd)
66a7e928
MT
47085 int res, val = 0, len;
47086 __u8 __name[NCP_MAXPATHLEN + 1];
47087
47088+ pax_track_stack();
47089+
47090 if (dentry == dentry->d_sb->s_root)
47091 return 1;
47092
fe2de317 47093@@ -844,6 +846,8 @@ static struct dentry *ncp_lookup(struct inode *dir, struct dentry *dentry, struc
66a7e928
MT
47094 int error, res, len;
47095 __u8 __name[NCP_MAXPATHLEN + 1];
47096
47097+ pax_track_stack();
47098+
47099 error = -EIO;
47100 if (!ncp_conn_valid(server))
47101 goto finished;
fe2de317 47102@@ -931,6 +935,8 @@ int ncp_create_new(struct inode *dir, struct dentry *dentry, int mode,
66a7e928
MT
47103 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
47104 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
47105
47106+ pax_track_stack();
47107+
47108 ncp_age_dentry(server, dentry);
47109 len = sizeof(__name);
47110 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
fe2de317 47111@@ -992,6 +998,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
66a7e928
MT
47112 int error, len;
47113 __u8 __name[NCP_MAXPATHLEN + 1];
47114
47115+ pax_track_stack();
47116+
47117 DPRINTK("ncp_mkdir: making %s/%s\n",
47118 dentry->d_parent->d_name.name, dentry->d_name.name);
47119
fe2de317 47120@@ -1140,6 +1148,8 @@ static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry,
66a7e928
MT
47121 int old_len, new_len;
47122 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
47123
47124+ pax_track_stack();
47125+
47126 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
47127 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
47128 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
fe2de317
MT
47129diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
47130index 202f370..9d4565e 100644
47131--- a/fs/ncpfs/inode.c
47132+++ b/fs/ncpfs/inode.c
47133@@ -461,6 +461,8 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
66a7e928
MT
47134 #endif
47135 struct ncp_entry_info finfo;
47136
47137+ pax_track_stack();
47138+
15a11c5b 47139 memset(&data, 0, sizeof(data));
66a7e928
MT
47140 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
47141 if (!server)
fe2de317
MT
47142diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
47143index 281ae95..dd895b9 100644
47144--- a/fs/nfs/blocklayout/blocklayout.c
47145+++ b/fs/nfs/blocklayout/blocklayout.c
47146@@ -90,7 +90,7 @@ static int is_writable(struct pnfs_block_extent *be, sector_t isect)
6e9df6a3
MT
47147 */
47148 struct parallel_io {
47149 struct kref refcnt;
47150- struct rpc_call_ops call_ops;
47151+ rpc_call_ops_no_const call_ops;
47152 void (*pnfs_callback) (void *data);
47153 void *data;
47154 };
fe2de317
MT
47155diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
47156index 679d2f5..ef1ffec 100644
47157--- a/fs/nfs/inode.c
47158+++ b/fs/nfs/inode.c
47159@@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
15a11c5b
MT
47160 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
47161 nfsi->attrtimeo_timestamp = jiffies;
47162
47163- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
47164+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
47165 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
47166 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
47167 else
fe2de317 47168@@ -1002,16 +1002,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
ae4e228f
MT
47169 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
47170 }
47171
47172-static atomic_long_t nfs_attr_generation_counter;
47173+static atomic_long_unchecked_t nfs_attr_generation_counter;
47174
47175 static unsigned long nfs_read_attr_generation_counter(void)
47176 {
47177- return atomic_long_read(&nfs_attr_generation_counter);
47178+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
47179 }
47180
47181 unsigned long nfs_inc_attr_generation_counter(void)
47182 {
47183- return atomic_long_inc_return(&nfs_attr_generation_counter);
47184+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
47185 }
47186
47187 void nfs_fattr_init(struct nfs_fattr *fattr)
fe2de317
MT
47188diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
47189index 6f8bcc7..8f823c5 100644
47190--- a/fs/nfsd/nfs4state.c
47191+++ b/fs/nfsd/nfs4state.c
47192@@ -3999,6 +3999,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
66a7e928
MT
47193 unsigned int strhashval;
47194 int err;
47195
47196+ pax_track_stack();
47197+
47198 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
47199 (long long) lock->lk_offset,
47200 (long long) lock->lk_length);
fe2de317
MT
47201diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
47202index f810996..cec8977 100644
47203--- a/fs/nfsd/nfs4xdr.c
47204+++ b/fs/nfsd/nfs4xdr.c
47205@@ -1875,6 +1875,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
66a7e928
MT
47206 .dentry = dentry,
47207 };
47208
47209+ pax_track_stack();
47210+
47211 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
47212 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
47213 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
fe2de317
MT
47214diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
47215index acf88ae..4fd6245 100644
47216--- a/fs/nfsd/vfs.c
47217+++ b/fs/nfsd/vfs.c
47218@@ -896,7 +896,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
ae4e228f
MT
47219 } else {
47220 oldfs = get_fs();
47221 set_fs(KERNEL_DS);
47222- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
6e9df6a3 47223+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
ae4e228f
MT
47224 set_fs(oldfs);
47225 }
47226
fe2de317 47227@@ -1000,7 +1000,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
ae4e228f
MT
47228
47229 /* Write the data. */
47230 oldfs = get_fs(); set_fs(KERNEL_DS);
47231- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
6e9df6a3 47232+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
ae4e228f
MT
47233 set_fs(oldfs);
47234 if (host_err < 0)
47235 goto out_nfserr;
fe2de317 47236@@ -1535,7 +1535,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
ae4e228f 47237 */
58c5fc13 47238
ae4e228f
MT
47239 oldfs = get_fs(); set_fs(KERNEL_DS);
47240- host_err = inode->i_op->readlink(dentry, buf, *lenp);
6e9df6a3 47241+ host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
ae4e228f 47242 set_fs(oldfs);
58c5fc13 47243
ae4e228f 47244 if (host_err < 0)
fe2de317
MT
47245diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
47246index 9fde1c0..14e8827 100644
47247--- a/fs/notify/fanotify/fanotify_user.c
47248+++ b/fs/notify/fanotify/fanotify_user.c
47249@@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
15a11c5b 47250 goto out_close_fd;
66a7e928 47251
15a11c5b
MT
47252 ret = -EFAULT;
47253- if (copy_to_user(buf, &fanotify_event_metadata,
47254+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
47255+ copy_to_user(buf, &fanotify_event_metadata,
47256 fanotify_event_metadata.event_len))
47257 goto out_kill_access_response;
47258
fe2de317
MT
47259diff --git a/fs/notify/notification.c b/fs/notify/notification.c
47260index ee18815..7aa5d01 100644
47261--- a/fs/notify/notification.c
47262+++ b/fs/notify/notification.c
47263@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
8308f9c9
MT
47264 * get set to 0 so it will never get 'freed'
47265 */
47266 static struct fsnotify_event *q_overflow_event;
47267-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
47268+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
47269
47270 /**
47271 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
fe2de317 47272@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
8308f9c9
MT
47273 */
47274 u32 fsnotify_get_cookie(void)
47275 {
47276- return atomic_inc_return(&fsnotify_sync_cookie);
47277+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
47278 }
47279 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
47280
fe2de317
MT
47281diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
47282index 99e3610..02c1068 100644
47283--- a/fs/ntfs/dir.c
47284+++ b/fs/ntfs/dir.c
6892158b
MT
47285@@ -1329,7 +1329,7 @@ find_next_index_buffer:
47286 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
47287 ~(s64)(ndir->itype.index.block_size - 1)));
47288 /* Bounds checks. */
47289- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
47290+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
47291 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
47292 "inode 0x%lx or driver bug.", vdir->i_ino);
47293 goto err_out;
fe2de317
MT
47294diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
47295index c587e2d..3641eaa 100644
47296--- a/fs/ntfs/file.c
47297+++ b/fs/ntfs/file.c
47298@@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_inode_ops = {
58c5fc13
MT
47299 #endif /* NTFS_RW */
47300 };
47301
47302-const struct file_operations ntfs_empty_file_ops = {};
ae4e228f 47303+const struct file_operations ntfs_empty_file_ops __read_only;
58c5fc13
MT
47304
47305-const struct inode_operations ntfs_empty_inode_ops = {};
ae4e228f 47306+const struct inode_operations ntfs_empty_inode_ops __read_only;
fe2de317
MT
47307diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
47308index 210c352..a174f83 100644
47309--- a/fs/ocfs2/localalloc.c
47310+++ b/fs/ocfs2/localalloc.c
47311@@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
58c5fc13
MT
47312 goto bail;
47313 }
47314
47315- atomic_inc(&osb->alloc_stats.moves);
47316+ atomic_inc_unchecked(&osb->alloc_stats.moves);
47317
58c5fc13 47318 bail:
57199397 47319 if (handle)
fe2de317
MT
47320diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
47321index 53aa41e..d7df9f1 100644
47322--- a/fs/ocfs2/namei.c
47323+++ b/fs/ocfs2/namei.c
47324@@ -1063,6 +1063,8 @@ static int ocfs2_rename(struct inode *old_dir,
66a7e928
MT
47325 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
47326 struct ocfs2_dir_lookup_result target_insert = { NULL, };
47327
47328+ pax_track_stack();
47329+
47330 /* At some point it might be nice to break this function up a
47331 * bit. */
47332
fe2de317
MT
47333diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
47334index 4092858..51c70ff 100644
47335--- a/fs/ocfs2/ocfs2.h
47336+++ b/fs/ocfs2/ocfs2.h
66a7e928 47337@@ -235,11 +235,11 @@ enum ocfs2_vol_state
58c5fc13
MT
47338
47339 struct ocfs2_alloc_stats
47340 {
47341- atomic_t moves;
47342- atomic_t local_data;
47343- atomic_t bitmap_data;
47344- atomic_t bg_allocs;
47345- atomic_t bg_extends;
47346+ atomic_unchecked_t moves;
47347+ atomic_unchecked_t local_data;
47348+ atomic_unchecked_t bitmap_data;
47349+ atomic_unchecked_t bg_allocs;
47350+ atomic_unchecked_t bg_extends;
47351 };
47352
47353 enum ocfs2_local_alloc_state
fe2de317
MT
47354diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
47355index ba5d97e..c77db25 100644
47356--- a/fs/ocfs2/suballoc.c
47357+++ b/fs/ocfs2/suballoc.c
47358@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
58c5fc13
MT
47359 mlog_errno(status);
47360 goto bail;
47361 }
47362- atomic_inc(&osb->alloc_stats.bg_extends);
47363+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
47364
47365 /* You should never ask for this much metadata */
47366 BUG_ON(bits_wanted >
fe2de317 47367@@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle,
58c5fc13
MT
47368 mlog_errno(status);
47369 goto bail;
47370 }
57199397
MT
47371- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47372+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
58c5fc13 47373
57199397
MT
47374 *suballoc_loc = res.sr_bg_blkno;
47375 *suballoc_bit_start = res.sr_bit_offset;
fe2de317 47376@@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
66a7e928
MT
47377 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
47378 res->sr_bits);
47379
47380- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47381+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47382
47383 BUG_ON(res->sr_bits != 1);
47384
fe2de317 47385@@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
58c5fc13
MT
47386 mlog_errno(status);
47387 goto bail;
47388 }
57199397
MT
47389- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47390+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
58c5fc13 47391
57199397 47392 BUG_ON(res.sr_bits != 1);
58c5fc13 47393
fe2de317 47394@@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
58c5fc13
MT
47395 cluster_start,
47396 num_clusters);
47397 if (!status)
47398- atomic_inc(&osb->alloc_stats.local_data);
47399+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
47400 } else {
47401 if (min_clusters > (osb->bitmap_cpg - 1)) {
47402 /* The only paths asking for contiguousness
fe2de317 47403@@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
58c5fc13 47404 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
57199397
MT
47405 res.sr_bg_blkno,
47406 res.sr_bit_offset);
58c5fc13
MT
47407- atomic_inc(&osb->alloc_stats.bitmap_data);
47408+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
57199397 47409 *num_clusters = res.sr_bits;
58c5fc13
MT
47410 }
47411 }
fe2de317
MT
47412diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
47413index 56f6102..1433c29 100644
47414--- a/fs/ocfs2/super.c
47415+++ b/fs/ocfs2/super.c
47416@@ -300,11 +300,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
58c5fc13
MT
47417 "%10s => GlobalAllocs: %d LocalAllocs: %d "
47418 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
47419 "Stats",
47420- atomic_read(&osb->alloc_stats.bitmap_data),
47421- atomic_read(&osb->alloc_stats.local_data),
47422- atomic_read(&osb->alloc_stats.bg_allocs),
47423- atomic_read(&osb->alloc_stats.moves),
47424- atomic_read(&osb->alloc_stats.bg_extends));
47425+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
47426+ atomic_read_unchecked(&osb->alloc_stats.local_data),
47427+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
47428+ atomic_read_unchecked(&osb->alloc_stats.moves),
47429+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
47430
47431 out += snprintf(buf + out, len - out,
47432 "%10s => State: %u Descriptor: %llu Size: %u bits "
fe2de317 47433@@ -2112,11 +2112,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
58c5fc13 47434 spin_lock_init(&osb->osb_xattr_lock);
df50ba0c 47435 ocfs2_init_steal_slots(osb);
58c5fc13
MT
47436
47437- atomic_set(&osb->alloc_stats.moves, 0);
47438- atomic_set(&osb->alloc_stats.local_data, 0);
47439- atomic_set(&osb->alloc_stats.bitmap_data, 0);
47440- atomic_set(&osb->alloc_stats.bg_allocs, 0);
47441- atomic_set(&osb->alloc_stats.bg_extends, 0);
47442+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
47443+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
47444+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
47445+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
47446+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
47447
47448 /* Copy the blockcheck stats from the superblock probe */
47449 osb->osb_ecc_stats = *stats;
fe2de317
MT
47450diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
47451index 5d22872..523db20 100644
47452--- a/fs/ocfs2/symlink.c
47453+++ b/fs/ocfs2/symlink.c
66a7e928 47454@@ -142,7 +142,7 @@ bail:
58c5fc13 47455
ae4e228f
MT
47456 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
47457 {
47458- char *link = nd_get_link(nd);
47459+ const char *link = nd_get_link(nd);
47460 if (!IS_ERR(link))
47461 kfree(link);
58c5fc13 47462 }
fe2de317
MT
47463diff --git a/fs/open.c b/fs/open.c
47464index f711921..28d5958 100644
47465--- a/fs/open.c
47466+++ b/fs/open.c
47467@@ -112,6 +112,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
bc901d79
MT
47468 error = locks_verify_truncate(inode, NULL, length);
47469 if (!error)
47470 error = security_path_truncate(&path);
58c5fc13 47471+
bc901d79
MT
47472+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
47473+ error = -EACCES;
47474+
47475 if (!error)
47476 error = do_truncate(path.dentry, length, 0, NULL);
47477
fe2de317 47478@@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
58c5fc13
MT
47479 if (__mnt_is_readonly(path.mnt))
47480 res = -EROFS;
47481
47482+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
47483+ res = -EACCES;
47484+
47485 out_path_release:
47486 path_put(&path);
47487 out:
fe2de317 47488@@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
58c5fc13
MT
47489 if (error)
47490 goto dput_and_out;
47491
47492+ gr_log_chdir(path.dentry, path.mnt);
47493+
47494 set_fs_pwd(current->fs, &path);
47495
47496 dput_and_out:
fe2de317 47497@@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
58c5fc13
MT
47498 goto out_putf;
47499
6892158b 47500 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
58c5fc13
MT
47501+
47502+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
47503+ error = -EPERM;
47504+
47505+ if (!error)
47506+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
47507+
47508 if (!error)
47509 set_fs_pwd(current->fs, &file->f_path);
47510 out_putf:
fe2de317 47511@@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
ae4e228f 47512 if (error)
58c5fc13
MT
47513 goto dput_and_out;
47514
47515+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
47516+ goto dput_and_out;
58c5fc13
MT
47517+
47518 set_fs_root(current->fs, &path);
47519+
47520+ gr_handle_chroot_chdir(&path);
47521+
47522 error = 0;
47523 dput_and_out:
47524 path_put(&path);
fe2de317 47525@@ -456,6 +478,16 @@ static int chmod_common(struct path *path, umode_t mode)
58c5fc13 47526 if (error)
6e9df6a3 47527 return error;
6892158b 47528 mutex_lock(&inode->i_mutex);
58c5fc13 47529+
6e9df6a3 47530+ if (!gr_acl_handle_fchmod(path->dentry, path->mnt, mode)) {
58c5fc13 47531+ error = -EACCES;
6892158b 47532+ goto out_unlock;
58c5fc13 47533+ }
6e9df6a3 47534+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
58c5fc13 47535+ error = -EACCES;
ae4e228f 47536+ goto out_unlock;
58c5fc13
MT
47537+ }
47538+
6e9df6a3
MT
47539 error = security_path_chmod(path->dentry, path->mnt, mode);
47540 if (error)
47541 goto out_unlock;
fe2de317 47542@@ -506,6 +538,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
58c5fc13
MT
47543 int error;
47544 struct iattr newattrs;
47545
ae4e228f 47546+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
58c5fc13
MT
47547+ return -EACCES;
47548+
47549 newattrs.ia_valid = ATTR_CTIME;
47550 if (user != (uid_t) -1) {
47551 newattrs.ia_valid |= ATTR_UID;
fe2de317
MT
47552diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
47553index 6296b40..417c00f 100644
47554--- a/fs/partitions/efi.c
47555+++ b/fs/partitions/efi.c
47556@@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
47557 if (!gpt)
47558 return NULL;
47559
47560+ if (!le32_to_cpu(gpt->num_partition_entries))
47561+ return NULL;
47562+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
47563+ if (!pte)
47564+ return NULL;
47565+
47566 count = le32_to_cpu(gpt->num_partition_entries) *
47567 le32_to_cpu(gpt->sizeof_partition_entry);
47568- if (!count)
47569- return NULL;
47570- pte = kzalloc(count, GFP_KERNEL);
47571- if (!pte)
47572- return NULL;
47573-
47574 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
47575 (u8 *) pte,
47576 count) < count) {
47577diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
47578index af9fdf0..75b15c3 100644
47579--- a/fs/partitions/ldm.c
47580+++ b/fs/partitions/ldm.c
47581@@ -1322,7 +1322,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
317566c1 47582 goto found;
c52201e0 47583 }
317566c1
MT
47584
47585- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
47586+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
47587 if (!f) {
47588 ldm_crit ("Out of memory.");
47589 return false;
fe2de317
MT
47590diff --git a/fs/pipe.c b/fs/pipe.c
47591index 0e0be1d..f62a72d 100644
47592--- a/fs/pipe.c
47593+++ b/fs/pipe.c
57199397 47594@@ -420,9 +420,9 @@ redo:
ae4e228f
MT
47595 }
47596 if (bufs) /* More to do? */
47597 continue;
47598- if (!pipe->writers)
47599+ if (!atomic_read(&pipe->writers))
47600 break;
47601- if (!pipe->waiting_writers) {
47602+ if (!atomic_read(&pipe->waiting_writers)) {
47603 /* syscall merging: Usually we must not sleep
47604 * if O_NONBLOCK is set, or if we got some data.
47605 * But if a writer sleeps in kernel space, then
fe2de317 47606@@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
ae4e228f
MT
47607 mutex_lock(&inode->i_mutex);
47608 pipe = inode->i_pipe;
47609
47610- if (!pipe->readers) {
47611+ if (!atomic_read(&pipe->readers)) {
47612 send_sig(SIGPIPE, current, 0);
47613 ret = -EPIPE;
47614 goto out;
57199397 47615@@ -530,7 +530,7 @@ redo1:
ae4e228f
MT
47616 for (;;) {
47617 int bufs;
47618
47619- if (!pipe->readers) {
47620+ if (!atomic_read(&pipe->readers)) {
47621 send_sig(SIGPIPE, current, 0);
47622 if (!ret)
47623 ret = -EPIPE;
57199397 47624@@ -616,9 +616,9 @@ redo2:
ae4e228f
MT
47625 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
47626 do_wakeup = 0;
47627 }
47628- pipe->waiting_writers++;
47629+ atomic_inc(&pipe->waiting_writers);
47630 pipe_wait(pipe);
47631- pipe->waiting_writers--;
47632+ atomic_dec(&pipe->waiting_writers);
47633 }
47634 out:
47635 mutex_unlock(&inode->i_mutex);
fe2de317 47636@@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table *wait)
ae4e228f
MT
47637 mask = 0;
47638 if (filp->f_mode & FMODE_READ) {
47639 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
47640- if (!pipe->writers && filp->f_version != pipe->w_counter)
47641+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
47642 mask |= POLLHUP;
47643 }
47644
fe2de317 47645@@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table *wait)
ae4e228f
MT
47646 * Most Unices do not set POLLERR for FIFOs but on Linux they
47647 * behave exactly like pipes for poll().
47648 */
47649- if (!pipe->readers)
47650+ if (!atomic_read(&pipe->readers))
47651 mask |= POLLERR;
47652 }
47653
fe2de317 47654@@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int decr, int decw)
ae4e228f
MT
47655
47656 mutex_lock(&inode->i_mutex);
47657 pipe = inode->i_pipe;
47658- pipe->readers -= decr;
47659- pipe->writers -= decw;
47660+ atomic_sub(decr, &pipe->readers);
47661+ atomic_sub(decw, &pipe->writers);
47662
47663- if (!pipe->readers && !pipe->writers) {
47664+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
47665 free_pipe_info(inode);
47666 } else {
16454cff 47667 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
fe2de317 47668@@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
ae4e228f
MT
47669
47670 if (inode->i_pipe) {
47671 ret = 0;
47672- inode->i_pipe->readers++;
47673+ atomic_inc(&inode->i_pipe->readers);
47674 }
47675
47676 mutex_unlock(&inode->i_mutex);
fe2de317 47677@@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
ae4e228f
MT
47678
47679 if (inode->i_pipe) {
47680 ret = 0;
47681- inode->i_pipe->writers++;
47682+ atomic_inc(&inode->i_pipe->writers);
47683 }
47684
47685 mutex_unlock(&inode->i_mutex);
fe2de317 47686@@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
ae4e228f
MT
47687 if (inode->i_pipe) {
47688 ret = 0;
47689 if (filp->f_mode & FMODE_READ)
47690- inode->i_pipe->readers++;
47691+ atomic_inc(&inode->i_pipe->readers);
47692 if (filp->f_mode & FMODE_WRITE)
47693- inode->i_pipe->writers++;
47694+ atomic_inc(&inode->i_pipe->writers);
47695 }
47696
47697 mutex_unlock(&inode->i_mutex);
57199397 47698@@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
58c5fc13
MT
47699 inode->i_pipe = NULL;
47700 }
47701
47702-static struct vfsmount *pipe_mnt __read_mostly;
47703+struct vfsmount *pipe_mnt __read_mostly;
ae4e228f
MT
47704
47705 /*
47706 * pipefs_dname() is called from d_path().
fe2de317 47707@@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(void)
ae4e228f
MT
47708 goto fail_iput;
47709 inode->i_pipe = pipe;
47710
47711- pipe->readers = pipe->writers = 1;
47712+ atomic_set(&pipe->readers, 1);
47713+ atomic_set(&pipe->writers, 1);
47714 inode->i_fop = &rdwr_pipefifo_fops;
47715
58c5fc13 47716 /*
fe2de317
MT
47717diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
47718index 15af622..0e9f4467 100644
47719--- a/fs/proc/Kconfig
47720+++ b/fs/proc/Kconfig
47721@@ -30,12 +30,12 @@ config PROC_FS
47722
47723 config PROC_KCORE
47724 bool "/proc/kcore support" if !ARM
47725- depends on PROC_FS && MMU
47726+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
47727
47728 config PROC_VMCORE
47729 bool "/proc/vmcore support"
47730- depends on PROC_FS && CRASH_DUMP
47731- default y
47732+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
47733+ default n
47734 help
47735 Exports the dump image of crashed kernel in ELF format.
47736
47737@@ -59,8 +59,8 @@ config PROC_SYSCTL
47738 limited in memory.
47739
47740 config PROC_PAGE_MONITOR
47741- default y
47742- depends on PROC_FS && MMU
47743+ default n
47744+ depends on PROC_FS && MMU && !GRKERNSEC
47745 bool "Enable /proc page monitoring" if EXPERT
47746 help
47747 Various /proc files exist to monitor process memory utilization:
47748diff --git a/fs/proc/array.c b/fs/proc/array.c
47749index 3a1dafd..c7fed72 100644
47750--- a/fs/proc/array.c
47751+++ b/fs/proc/array.c
6892158b
MT
47752@@ -60,6 +60,7 @@
47753 #include <linux/tty.h>
47754 #include <linux/string.h>
47755 #include <linux/mman.h>
47756+#include <linux/grsecurity.h>
47757 #include <linux/proc_fs.h>
47758 #include <linux/ioport.h>
47759 #include <linux/uaccess.h>
fe2de317 47760@@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
16454cff 47761 seq_putc(m, '\n');
57199397
MT
47762 }
47763
47764+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47765+static inline void task_pax(struct seq_file *m, struct task_struct *p)
47766+{
47767+ if (p->mm)
47768+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
47769+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
47770+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
47771+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
47772+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
47773+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
47774+ else
47775+ seq_printf(m, "PaX:\t-----\n");
47776+}
47777+#endif
47778+
47779 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
47780 struct pid *pid, struct task_struct *task)
47781 {
fe2de317 47782@@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
c52201e0
MT
47783 task_cpus_allowed(m, task);
47784 cpuset_task_status_allowed(m, task);
57199397
MT
47785 task_context_switch_counts(m, task);
47786+
47787+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47788+ task_pax(m, task);
47789+#endif
6892158b
MT
47790+
47791+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
47792+ task_grsec_rbac(m, task);
47793+#endif
57199397
MT
47794+
47795 return 0;
47796 }
47797
47798+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47799+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
47800+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
47801+ _mm->pax_flags & MF_PAX_SEGMEXEC))
47802+#endif
47803+
47804 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47805 struct pid *pid, struct task_struct *task, int whole)
47806 {
fe2de317 47807@@ -378,6 +409,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
6e9df6a3 47808 char tcomm[sizeof(task->comm)];
66a7e928
MT
47809 unsigned long flags;
47810
47811+ pax_track_stack();
47812+
47813 state = *get_task_state(task);
47814 vsize = eip = esp = 0;
47815 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
fe2de317 47816@@ -449,6 +482,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
57199397
MT
47817 gtime = task->gtime;
47818 }
47819
47820+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47821+ if (PAX_RAND_FLAGS(mm)) {
47822+ eip = 0;
47823+ esp = 0;
47824+ wchan = 0;
47825+ }
47826+#endif
47827+#ifdef CONFIG_GRKERNSEC_HIDESYM
47828+ wchan = 0;
47829+ eip =0;
47830+ esp =0;
47831+#endif
47832+
47833 /* scale priority and nice values from timeslices to -20..20 */
47834 /* to make it look like a "normal" Unix priority/nice value */
47835 priority = task_prio(task);
fe2de317 47836@@ -489,9 +535,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
57199397
MT
47837 vsize,
47838 mm ? get_mm_rss(mm) : 0,
47839 rsslim,
47840+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
ea610fa8
AF
47841+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
47842+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
57199397
MT
47843+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
47844+#else
ea610fa8
AF
47845 mm ? (permitted ? mm->start_code : 1) : 0,
47846 mm ? (permitted ? mm->end_code : 1) : 0,
57199397
MT
47847 (permitted && mm) ? mm->start_stack : 0,
47848+#endif
47849 esp,
47850 eip,
47851 /* The signal information here is obsolete.
fe2de317 47852@@ -544,3 +596,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
57199397
MT
47853
47854 return 0;
47855 }
47856+
47857+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
47858+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
47859+{
71d190be
MT
47860+ u32 curr_ip = 0;
47861+ unsigned long flags;
47862+
47863+ if (lock_task_sighand(task, &flags)) {
47864+ curr_ip = task->signal->curr_ip;
47865+ unlock_task_sighand(task, &flags);
47866+ }
47867+
47868+ return sprintf(buffer, "%pI4\n", &curr_ip);
57199397
MT
47869+}
47870+#endif
fe2de317
MT
47871diff --git a/fs/proc/base.c b/fs/proc/base.c
47872index 5eb0206..fe01db4 100644
47873--- a/fs/proc/base.c
47874+++ b/fs/proc/base.c
15a11c5b 47875@@ -107,6 +107,22 @@ struct pid_entry {
57199397
MT
47876 union proc_op op;
47877 };
47878
47879+struct getdents_callback {
47880+ struct linux_dirent __user * current_dir;
47881+ struct linux_dirent __user * previous;
47882+ struct file * file;
47883+ int count;
47884+ int error;
47885+};
47886+
47887+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
47888+ loff_t offset, u64 ino, unsigned int d_type)
47889+{
47890+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
47891+ buf->error = -EINVAL;
47892+ return 0;
47893+}
47894+
47895 #define NOD(NAME, MODE, IOP, FOP, OP) { \
47896 .name = (NAME), \
47897 .len = sizeof(NAME) - 1, \
fe2de317 47898@@ -209,6 +225,9 @@ static struct mm_struct *__check_mem_permission(struct task_struct *task)
57199397 47899 if (task == current)
66a7e928 47900 return mm;
57199397
MT
47901
47902+ if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
66a7e928 47903+ return ERR_PTR(-EPERM);
57199397
MT
47904+
47905 /*
47906 * If current is actively ptrace'ing, and would also be
47907 * permitted to freshly attach with ptrace now, permit it.
fe2de317 47908@@ -282,6 +301,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
57199397
MT
47909 if (!mm->arg_end)
47910 goto out_mm; /* Shh! No looking before we're done */
47911
47912+ if (gr_acl_handle_procpidmem(task))
47913+ goto out_mm;
47914+
47915 len = mm->arg_end - mm->arg_start;
47916
47917 if (len > PAGE_SIZE)
15a11c5b 47918@@ -309,12 +331,28 @@ out:
57199397
MT
47919 return res;
47920 }
47921
47922+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47923+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
47924+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
47925+ _mm->pax_flags & MF_PAX_SEGMEXEC))
47926+#endif
47927+
47928 static int proc_pid_auxv(struct task_struct *task, char *buffer)
47929 {
66a7e928
MT
47930 struct mm_struct *mm = mm_for_maps(task);
47931 int res = PTR_ERR(mm);
47932 if (mm && !IS_ERR(mm)) {
57199397
MT
47933 unsigned int nwords = 0;
47934+
47935+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
6892158b
MT
47936+ /* allow if we're currently ptracing this task */
47937+ if (PAX_RAND_FLAGS(mm) &&
47938+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
57199397 47939+ mmput(mm);
15a11c5b 47940+ return 0;
57199397
MT
47941+ }
47942+#endif
47943+
47944 do {
47945 nwords += 2;
47946 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
fe2de317 47947@@ -328,7 +366,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
6892158b
MT
47948 }
47949
47950
47951-#ifdef CONFIG_KALLSYMS
47952+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47953 /*
47954 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
47955 * Returns the resolved symbol. If that fails, simply return the address.
fe2de317 47956@@ -367,7 +405,7 @@ static void unlock_trace(struct task_struct *task)
66a7e928 47957 mutex_unlock(&task->signal->cred_guard_mutex);
57199397 47958 }
57199397
MT
47959
47960-#ifdef CONFIG_STACKTRACE
47961+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47962
47963 #define MAX_STACK_TRACE_DEPTH 64
47964
fe2de317 47965@@ -558,7 +596,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
57199397
MT
47966 return count;
47967 }
47968
47969-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
47970+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
47971 static int proc_pid_syscall(struct task_struct *task, char *buffer)
47972 {
47973 long nr;
fe2de317 47974@@ -587,7 +625,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
16454cff
MT
47975 /************************************************************************/
47976
47977 /* permission checks */
47978-static int proc_fd_access_allowed(struct inode *inode)
47979+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
47980 {
47981 struct task_struct *task;
47982 int allowed = 0;
fe2de317 47983@@ -597,7 +635,10 @@ static int proc_fd_access_allowed(struct inode *inode)
16454cff
MT
47984 */
47985 task = get_proc_task(inode);
47986 if (task) {
47987- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
47988+ if (log)
47989+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
47990+ else
47991+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
47992 put_task_struct(task);
47993 }
47994 return allowed;
fe2de317 47995@@ -978,6 +1019,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
57199397
MT
47996 if (!task)
47997 goto out_no_task;
47998
47999+ if (gr_acl_handle_procpidmem(task))
48000+ goto out;
48001+
66a7e928
MT
48002 ret = -ENOMEM;
48003 page = (char *)__get_free_page(GFP_TEMPORARY);
48004 if (!page)
fe2de317 48005@@ -1613,7 +1657,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
16454cff
MT
48006 path_put(&nd->path);
48007
48008 /* Are we allowed to snoop on the tasks file descriptors? */
48009- if (!proc_fd_access_allowed(inode))
48010+ if (!proc_fd_access_allowed(inode,0))
48011 goto out;
48012
48013 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
fe2de317 48014@@ -1652,8 +1696,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
16454cff
MT
48015 struct path path;
48016
48017 /* Are we allowed to snoop on the tasks file descriptors? */
48018- if (!proc_fd_access_allowed(inode))
48019- goto out;
48020+ /* logging this is needed for learning on chromium to work properly,
48021+ but we don't want to flood the logs from 'ps' which does a readlink
48022+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
48023+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
48024+ */
48025+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
48026+ if (!proc_fd_access_allowed(inode,0))
48027+ goto out;
48028+ } else {
48029+ if (!proc_fd_access_allowed(inode,1))
48030+ goto out;
48031+ }
48032
48033 error = PROC_I(inode)->op.proc_get_link(inode, &path);
48034 if (error)
fe2de317 48035@@ -1718,7 +1772,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
57199397
MT
48036 rcu_read_lock();
48037 cred = __task_cred(task);
48038 inode->i_uid = cred->euid;
48039+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48040+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48041+#else
48042 inode->i_gid = cred->egid;
48043+#endif
48044 rcu_read_unlock();
48045 }
48046 security_task_to_inode(task, inode);
fe2de317 48047@@ -1736,6 +1794,9 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
57199397
MT
48048 struct inode *inode = dentry->d_inode;
48049 struct task_struct *task;
48050 const struct cred *cred;
48051+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48052+ const struct cred *tmpcred = current_cred();
48053+#endif
48054
48055 generic_fillattr(inode, stat);
48056
fe2de317 48057@@ -1743,13 +1804,41 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
57199397
MT
48058 stat->uid = 0;
48059 stat->gid = 0;
48060 task = pid_task(proc_pid(inode), PIDTYPE_PID);
48061+
48062+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
48063+ rcu_read_unlock();
48064+ return -ENOENT;
48065+ }
48066+
48067 if (task) {
48068+ cred = __task_cred(task);
48069+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48070+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
48071+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48072+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
48073+#endif
15a11c5b 48074+ ) {
57199397
MT
48075+#endif
48076 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
48077+#ifdef CONFIG_GRKERNSEC_PROC_USER
48078+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
48079+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48080+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
48081+#endif
48082 task_dumpable(task)) {
48083- cred = __task_cred(task);
48084 stat->uid = cred->euid;
48085+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48086+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
48087+#else
48088 stat->gid = cred->egid;
48089+#endif
48090 }
15a11c5b
MT
48091+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48092+ } else {
48093+ rcu_read_unlock();
48094+ return -ENOENT;
48095+ }
48096+#endif
57199397
MT
48097 }
48098 rcu_read_unlock();
15a11c5b 48099 return 0;
fe2de317 48100@@ -1786,11 +1875,20 @@ int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
57199397
MT
48101
48102 if (task) {
48103 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
48104+#ifdef CONFIG_GRKERNSEC_PROC_USER
48105+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
48106+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48107+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
48108+#endif
48109 task_dumpable(task)) {
48110 rcu_read_lock();
48111 cred = __task_cred(task);
48112 inode->i_uid = cred->euid;
48113+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48114+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48115+#else
48116 inode->i_gid = cred->egid;
48117+#endif
48118 rcu_read_unlock();
48119 } else {
48120 inode->i_uid = 0;
fe2de317 48121@@ -1908,7 +2006,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
57199397
MT
48122 int fd = proc_fd(inode);
48123
48124 if (task) {
48125- files = get_files_struct(task);
48126+ if (!gr_acl_handle_procpidmem(task))
48127+ files = get_files_struct(task);
48128 put_task_struct(task);
48129 }
48130 if (files) {
fe2de317 48131@@ -2176,11 +2275,21 @@ static const struct file_operations proc_fd_operations = {
16454cff 48132 */
6e9df6a3 48133 static int proc_fd_permission(struct inode *inode, int mask)
57199397 48134 {
57199397 48135+ struct task_struct *task;
6e9df6a3 48136 int rv = generic_permission(inode, mask);
57199397
MT
48137- if (rv == 0)
48138- return 0;
48139+
48140 if (task_pid(current) == proc_pid(inode))
48141 rv = 0;
48142+
48143+ task = get_proc_task(inode);
48144+ if (task == NULL)
48145+ return rv;
48146+
48147+ if (gr_acl_handle_procpidmem(task))
48148+ rv = -EACCES;
48149+
48150+ put_task_struct(task);
48151+
48152 return rv;
48153 }
48154
fe2de317 48155@@ -2290,6 +2399,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
57199397
MT
48156 if (!task)
48157 goto out_no_task;
48158
48159+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48160+ goto out;
48161+
48162 /*
48163 * Yes, it does not scale. And it should not. Don't add
48164 * new entries into /proc/<tgid>/ without very good reasons.
fe2de317 48165@@ -2334,6 +2446,9 @@ static int proc_pident_readdir(struct file *filp,
57199397
MT
48166 if (!task)
48167 goto out_no_task;
48168
48169+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48170+ goto out;
48171+
48172 ret = 0;
48173 i = filp->f_pos;
48174 switch (i) {
fe2de317 48175@@ -2604,7 +2719,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
57199397
MT
48176 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
48177 void *cookie)
48178 {
48179- char *s = nd_get_link(nd);
48180+ const char *s = nd_get_link(nd);
48181 if (!IS_ERR(s))
48182 __putname(s);
48183 }
fe2de317 48184@@ -2802,7 +2917,7 @@ static const struct pid_entry tgid_base_stuff[] = {
16454cff 48185 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
57199397
MT
48186 #endif
48187 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
48188-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
48189+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
66a7e928 48190 INF("syscall", S_IRUGO, proc_pid_syscall),
57199397
MT
48191 #endif
48192 INF("cmdline", S_IRUGO, proc_pid_cmdline),
fe2de317 48193@@ -2827,10 +2942,10 @@ static const struct pid_entry tgid_base_stuff[] = {
6892158b
MT
48194 #ifdef CONFIG_SECURITY
48195 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
48196 #endif
48197-#ifdef CONFIG_KALLSYMS
48198+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
57199397
MT
48199 INF("wchan", S_IRUGO, proc_pid_wchan),
48200 #endif
48201-#ifdef CONFIG_STACKTRACE
48202+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66a7e928 48203 ONE("stack", S_IRUGO, proc_pid_stack),
57199397
MT
48204 #endif
48205 #ifdef CONFIG_SCHEDSTATS
fe2de317 48206@@ -2864,6 +2979,9 @@ static const struct pid_entry tgid_base_stuff[] = {
15a11c5b
MT
48207 #ifdef CONFIG_HARDWALL
48208 INF("hardwall", S_IRUGO, proc_pid_hardwall),
57199397
MT
48209 #endif
48210+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
48211+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
48212+#endif
48213 };
48214
48215 static int proc_tgid_base_readdir(struct file * filp,
fe2de317 48216@@ -2989,7 +3107,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
57199397
MT
48217 if (!inode)
48218 goto out;
48219
48220+#ifdef CONFIG_GRKERNSEC_PROC_USER
48221+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
48222+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48223+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48224+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
48225+#else
48226 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
48227+#endif
48228 inode->i_op = &proc_tgid_base_inode_operations;
48229 inode->i_fop = &proc_tgid_base_operations;
48230 inode->i_flags|=S_IMMUTABLE;
fe2de317 48231@@ -3031,7 +3156,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
57199397
MT
48232 if (!task)
48233 goto out;
48234
48235+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48236+ goto out_put_task;
48237+
48238 result = proc_pid_instantiate(dir, dentry, task, NULL);
48239+out_put_task:
48240 put_task_struct(task);
48241 out:
48242 return result;
fe2de317 48243@@ -3096,6 +3225,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
57199397 48244 {
71d190be
MT
48245 unsigned int nr;
48246 struct task_struct *reaper;
57199397
MT
48247+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48248+ const struct cred *tmpcred = current_cred();
48249+ const struct cred *itercred;
48250+#endif
48251+ filldir_t __filldir = filldir;
48252 struct tgid_iter iter;
48253 struct pid_namespace *ns;
48254
fe2de317 48255@@ -3119,8 +3253,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
57199397
MT
48256 for (iter = next_tgid(ns, iter);
48257 iter.task;
48258 iter.tgid += 1, iter = next_tgid(ns, iter)) {
48259+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48260+ rcu_read_lock();
48261+ itercred = __task_cred(iter.task);
48262+#endif
48263+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
48264+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48265+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
48266+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48267+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
48268+#endif
48269+ )
48270+#endif
48271+ )
48272+ __filldir = &gr_fake_filldir;
48273+ else
48274+ __filldir = filldir;
48275+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48276+ rcu_read_unlock();
48277+#endif
48278 filp->f_pos = iter.tgid + TGID_OFFSET;
48279- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
48280+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
48281 put_task_struct(iter.task);
48282 goto out;
48283 }
fe2de317 48284@@ -3148,7 +3301,7 @@ static const struct pid_entry tid_base_stuff[] = {
57199397
MT
48285 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
48286 #endif
48287 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
48288-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
48289+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
66a7e928 48290 INF("syscall", S_IRUGO, proc_pid_syscall),
57199397
MT
48291 #endif
48292 INF("cmdline", S_IRUGO, proc_pid_cmdline),
fe2de317 48293@@ -3172,10 +3325,10 @@ static const struct pid_entry tid_base_stuff[] = {
6892158b
MT
48294 #ifdef CONFIG_SECURITY
48295 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
48296 #endif
48297-#ifdef CONFIG_KALLSYMS
48298+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
57199397
MT
48299 INF("wchan", S_IRUGO, proc_pid_wchan),
48300 #endif
48301-#ifdef CONFIG_STACKTRACE
48302+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66a7e928 48303 ONE("stack", S_IRUGO, proc_pid_stack),
57199397
MT
48304 #endif
48305 #ifdef CONFIG_SCHEDSTATS
fe2de317
MT
48306diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
48307index 82676e3..5f8518a 100644
48308--- a/fs/proc/cmdline.c
48309+++ b/fs/proc/cmdline.c
48310@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
57199397
MT
48311
48312 static int __init proc_cmdline_init(void)
48313 {
48314+#ifdef CONFIG_GRKERNSEC_PROC_ADD
48315+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
48316+#else
48317 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
48318+#endif
48319 return 0;
48320 }
48321 module_init(proc_cmdline_init);
fe2de317
MT
48322diff --git a/fs/proc/devices.c b/fs/proc/devices.c
48323index b143471..bb105e5 100644
48324--- a/fs/proc/devices.c
48325+++ b/fs/proc/devices.c
48326@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
57199397
MT
48327
48328 static int __init proc_devices_init(void)
48329 {
48330+#ifdef CONFIG_GRKERNSEC_PROC_ADD
48331+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
48332+#else
48333 proc_create("devices", 0, NULL, &proc_devinfo_operations);
48334+#endif
48335 return 0;
48336 }
48337 module_init(proc_devices_init);
fe2de317
MT
48338diff --git a/fs/proc/inode.c b/fs/proc/inode.c
48339index 7ed72d6..d5f061a 100644
48340--- a/fs/proc/inode.c
48341+++ b/fs/proc/inode.c
6e9df6a3
MT
48342@@ -18,12 +18,18 @@
48343 #include <linux/module.h>
48344 #include <linux/sysctl.h>
48345 #include <linux/slab.h>
48346+#include <linux/grsecurity.h>
48347
48348 #include <asm/system.h>
48349 #include <asm/uaccess.h>
48350
48351 #include "internal.h"
48352
48353+#ifdef CONFIG_PROC_SYSCTL
48354+extern const struct inode_operations proc_sys_inode_operations;
48355+extern const struct inode_operations proc_sys_dir_operations;
48356+#endif
48357+
48358 static void proc_evict_inode(struct inode *inode)
48359 {
48360 struct proc_dir_entry *de;
fe2de317 48361@@ -49,6 +55,13 @@ static void proc_evict_inode(struct inode *inode)
6e9df6a3
MT
48362 ns_ops = PROC_I(inode)->ns_ops;
48363 if (ns_ops && ns_ops->put)
48364 ns_ops->put(PROC_I(inode)->ns);
48365+
48366+#ifdef CONFIG_PROC_SYSCTL
48367+ if (inode->i_op == &proc_sys_inode_operations ||
48368+ inode->i_op == &proc_sys_dir_operations)
48369+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
48370+#endif
48371+
48372 }
48373
48374 static struct kmem_cache * proc_inode_cachep;
fe2de317 48375@@ -440,7 +453,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
57199397
MT
48376 if (de->mode) {
48377 inode->i_mode = de->mode;
48378 inode->i_uid = de->uid;
48379+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48380+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48381+#else
48382 inode->i_gid = de->gid;
48383+#endif
48384 }
48385 if (de->size)
48386 inode->i_size = de->size;
fe2de317
MT
48387diff --git a/fs/proc/internal.h b/fs/proc/internal.h
48388index 7838e5c..ff92cbc 100644
48389--- a/fs/proc/internal.h
48390+++ b/fs/proc/internal.h
48391@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
57199397
MT
48392 struct pid *pid, struct task_struct *task);
48393 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
48394 struct pid *pid, struct task_struct *task);
48395+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
48396+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
48397+#endif
48398 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
48399
48400 extern const struct file_operations proc_maps_operations;
fe2de317
MT
48401diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
48402index d245cb2..7e645bd 100644
48403--- a/fs/proc/kcore.c
48404+++ b/fs/proc/kcore.c
48405@@ -321,6 +321,8 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
66a7e928
MT
48406 off_t offset = 0;
48407 struct kcore_list *m;
48408
48409+ pax_track_stack();
48410+
48411 /* setup ELF header */
48412 elf = (struct elfhdr *) bufp;
48413 bufp += sizeof(struct elfhdr);
fe2de317 48414@@ -478,9 +480,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
57199397
MT
48415 * the addresses in the elf_phdr on our list.
48416 */
48417 start = kc_offset_to_vaddr(*fpos - elf_buflen);
48418- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
48419+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
48420+ if (tsz > buflen)
48421 tsz = buflen;
48422-
58c5fc13 48423+
57199397
MT
48424 while (buflen) {
48425 struct kcore_list *m;
58c5fc13 48426
fe2de317 48427@@ -509,20 +512,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
57199397 48428 kfree(elf_buf);
58c5fc13 48429 } else {
57199397
MT
48430 if (kern_addr_valid(start)) {
48431- unsigned long n;
48432+ char *elf_buf;
bc901d79 48433+ mm_segment_t oldfs;
57199397
MT
48434
48435- n = copy_to_user(buffer, (char *)start, tsz);
48436- /*
48437- * We cannot distingush between fault on source
48438- * and fault on destination. When this happens
48439- * we clear too and hope it will trigger the
48440- * EFAULT again.
48441- */
48442- if (n) {
48443- if (clear_user(buffer + tsz - n,
48444- n))
48445+ elf_buf = kmalloc(tsz, GFP_KERNEL);
48446+ if (!elf_buf)
48447+ return -ENOMEM;
bc901d79
MT
48448+ oldfs = get_fs();
48449+ set_fs(KERNEL_DS);
57199397 48450+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
bc901d79 48451+ set_fs(oldfs);
57199397
MT
48452+ if (copy_to_user(buffer, elf_buf, tsz)) {
48453+ kfree(elf_buf);
48454 return -EFAULT;
48455+ }
48456 }
bc901d79 48457+ set_fs(oldfs);
57199397
MT
48458+ kfree(elf_buf);
48459 } else {
48460 if (clear_user(buffer, tsz))
48461 return -EFAULT;
fe2de317 48462@@ -542,6 +548,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
58c5fc13 48463
ae4e228f 48464 static int open_kcore(struct inode *inode, struct file *filp)
58c5fc13 48465 {
ae4e228f
MT
48466+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
48467+ return -EPERM;
58c5fc13 48468+#endif
ae4e228f
MT
48469 if (!capable(CAP_SYS_RAWIO))
48470 return -EPERM;
48471 if (kcore_need_update)
fe2de317
MT
48472diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
48473index 5861741..32c53bc 100644
48474--- a/fs/proc/meminfo.c
48475+++ b/fs/proc/meminfo.c
48476@@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
66a7e928
MT
48477 unsigned long pages[NR_LRU_LISTS];
48478 int lru;
48479
48480+ pax_track_stack();
48481+
48482 /*
48483 * display in kilobytes.
48484 */
fe2de317 48485@@ -157,7 +159,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
ae4e228f
MT
48486 vmi.used >> 10,
48487 vmi.largest_chunk >> 10
48488 #ifdef CONFIG_MEMORY_FAILURE
48489- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
48490+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
48491 #endif
16454cff
MT
48492 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
48493 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
fe2de317
MT
48494diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
48495index b1822dd..df622cb 100644
48496--- a/fs/proc/nommu.c
48497+++ b/fs/proc/nommu.c
48498@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
58c5fc13
MT
48499 if (len < 1)
48500 len = 1;
48501 seq_printf(m, "%*c", len, ' ');
48502- seq_path(m, &file->f_path, "");
48503+ seq_path(m, &file->f_path, "\n\\");
48504 }
48505
48506 seq_putc(m, '\n');
fe2de317
MT
48507diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
48508index f738024..876984a 100644
48509--- a/fs/proc/proc_net.c
48510+++ b/fs/proc/proc_net.c
48511@@ -105,6 +105,17 @@ static struct net *get_proc_task_net(struct inode *dir)
58c5fc13
MT
48512 struct task_struct *task;
48513 struct nsproxy *ns;
48514 struct net *net = NULL;
48515+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48516+ const struct cred *cred = current_cred();
48517+#endif
48518+
48519+#ifdef CONFIG_GRKERNSEC_PROC_USER
48520+ if (cred->fsuid)
48521+ return net;
48522+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48523+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
48524+ return net;
48525+#endif
48526
48527 rcu_read_lock();
48528 task = pid_task(proc_pid(dir), PIDTYPE_PID);
fe2de317
MT
48529diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
48530index 1a77dbe..56ec911 100644
48531--- a/fs/proc/proc_sysctl.c
48532+++ b/fs/proc/proc_sysctl.c
6e9df6a3 48533@@ -8,11 +8,13 @@
16454cff 48534 #include <linux/namei.h>
58c5fc13
MT
48535 #include "internal.h"
48536
48537+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
48538+
48539 static const struct dentry_operations proc_sys_dentry_operations;
48540 static const struct file_operations proc_sys_file_operations;
6e9df6a3
MT
48541-static const struct inode_operations proc_sys_inode_operations;
48542+const struct inode_operations proc_sys_inode_operations;
48543 static const struct file_operations proc_sys_dir_file_operations;
48544-static const struct inode_operations proc_sys_dir_operations;
48545+const struct inode_operations proc_sys_dir_operations;
48546
48547 static struct inode *proc_sys_make_inode(struct super_block *sb,
48548 struct ctl_table_header *head, struct ctl_table *table)
fe2de317 48549@@ -121,8 +123,14 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
6e9df6a3
MT
48550
48551 err = NULL;
48552 d_set_d_op(dentry, &proc_sys_dentry_operations);
48553+
48554+ gr_handle_proc_create(dentry, inode);
48555+
48556 d_add(dentry, inode);
58c5fc13
MT
48557
48558+ if (gr_handle_sysctl(p, MAY_EXEC))
6e9df6a3
MT
48559+ err = ERR_PTR(-ENOENT);
48560+
48561 out:
48562 sysctl_head_finish(head);
48563 return err;
fe2de317 48564@@ -202,6 +210,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
6e9df6a3
MT
48565 return -ENOMEM;
48566 } else {
48567 d_set_d_op(child, &proc_sys_dentry_operations);
48568+
48569+ gr_handle_proc_create(child, inode);
58c5fc13 48570+
6e9df6a3
MT
48571 d_add(child, inode);
48572 }
48573 } else {
fe2de317 48574@@ -230,6 +241,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
58c5fc13
MT
48575 if (*pos < file->f_pos)
48576 continue;
48577
48578+ if (gr_handle_sysctl(table, 0))
48579+ continue;
48580+
48581 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
48582 if (res)
48583 return res;
fe2de317 48584@@ -355,6 +369,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
58c5fc13
MT
48585 if (IS_ERR(head))
48586 return PTR_ERR(head);
48587
48588+ if (table && gr_handle_sysctl(table, MAY_EXEC))
48589+ return -ENOENT;
48590+
48591 generic_fillattr(inode, stat);
48592 if (table)
48593 stat->mode = (stat->mode & S_IFMT) | table->mode;
fe2de317 48594@@ -370,17 +387,18 @@ static const struct file_operations proc_sys_file_operations = {
883a9837
MT
48595 };
48596
48597 static const struct file_operations proc_sys_dir_file_operations = {
48598+ .read = generic_read_dir,
48599 .readdir = proc_sys_readdir,
6e9df6a3
MT
48600 .llseek = generic_file_llseek,
48601 };
48602
48603-static const struct inode_operations proc_sys_inode_operations = {
48604+const struct inode_operations proc_sys_inode_operations = {
48605 .permission = proc_sys_permission,
48606 .setattr = proc_sys_setattr,
48607 .getattr = proc_sys_getattr,
48608 };
48609
48610-static const struct inode_operations proc_sys_dir_operations = {
48611+const struct inode_operations proc_sys_dir_operations = {
48612 .lookup = proc_sys_lookup,
48613 .permission = proc_sys_permission,
48614 .setattr = proc_sys_setattr,
fe2de317
MT
48615diff --git a/fs/proc/root.c b/fs/proc/root.c
48616index 9a8a2b7..3018df6 100644
48617--- a/fs/proc/root.c
48618+++ b/fs/proc/root.c
15a11c5b 48619@@ -123,7 +123,15 @@ void __init proc_root_init(void)
58c5fc13
MT
48620 #ifdef CONFIG_PROC_DEVICETREE
48621 proc_device_tree_init();
48622 #endif
48623+#ifdef CONFIG_GRKERNSEC_PROC_ADD
48624+#ifdef CONFIG_GRKERNSEC_PROC_USER
48625+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
48626+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48627+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
48628+#endif
48629+#else
48630 proc_mkdir("bus", NULL);
48631+#endif
48632 proc_sys_init();
48633 }
48634
fe2de317
MT
48635diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
48636index c7d4ee6..41c5564 100644
48637--- a/fs/proc/task_mmu.c
48638+++ b/fs/proc/task_mmu.c
48639@@ -51,8 +51,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
58c5fc13
MT
48640 "VmExe:\t%8lu kB\n"
48641 "VmLib:\t%8lu kB\n"
df50ba0c
MT
48642 "VmPTE:\t%8lu kB\n"
48643- "VmSwap:\t%8lu kB\n",
58c5fc13 48644- hiwater_vm << (PAGE_SHIFT-10),
df50ba0c 48645+ "VmSwap:\t%8lu kB\n"
58c5fc13
MT
48646+
48647+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
48648+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
48649+#endif
48650+
48651+ ,hiwater_vm << (PAGE_SHIFT-10),
48652 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
48653 mm->locked_vm << (PAGE_SHIFT-10),
48654 hiwater_rss << (PAGE_SHIFT-10),
fe2de317 48655@@ -60,7 +65,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
58c5fc13
MT
48656 data << (PAGE_SHIFT-10),
48657 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
df50ba0c
MT
48658 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
48659- swap << (PAGE_SHIFT-10));
48660+ swap << (PAGE_SHIFT-10)
58c5fc13
MT
48661+
48662+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
48663+ , mm->context.user_cs_base, mm->context.user_cs_limit
48664+#endif
48665+
48666+ );
48667 }
48668
48669 unsigned long task_vsize(struct mm_struct *mm)
fe2de317 48670@@ -207,6 +218,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
58c5fc13
MT
48671 return ret;
48672 }
48673
48674+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48675+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
48676+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
48677+ _mm->pax_flags & MF_PAX_SEGMEXEC))
48678+#endif
48679+
48680 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
48681 {
48682 struct mm_struct *mm = vma->vm_mm;
fe2de317 48683@@ -225,13 +242,13 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
57199397 48684 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
58c5fc13
MT
48685 }
48686
57199397 48687- /* We don't show the stack guard page in /proc/maps */
58c5fc13 48688+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66a7e928
MT
48689+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
48690+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
58c5fc13 48691+#else
66a7e928
MT
48692 start = vma->vm_start;
48693- if (stack_guard_page_start(vma, start))
48694- start += PAGE_SIZE;
48695 end = vma->vm_end;
48696- if (stack_guard_page_end(vma, end))
48697- end -= PAGE_SIZE;
58c5fc13 48698+#endif
66a7e928
MT
48699
48700 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
48701 start,
fe2de317 48702@@ -240,7 +257,11 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
58c5fc13
MT
48703 flags & VM_WRITE ? 'w' : '-',
48704 flags & VM_EXEC ? 'x' : '-',
48705 flags & VM_MAYSHARE ? 's' : 'p',
48706+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48707+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
48708+#else
48709 pgoff,
48710+#endif
48711 MAJOR(dev), MINOR(dev), ino, &len);
48712
48713 /*
fe2de317 48714@@ -249,7 +270,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
58c5fc13
MT
48715 */
48716 if (file) {
48717 pad_len_spaces(m, len);
48718- seq_path(m, &file->f_path, "\n");
48719+ seq_path(m, &file->f_path, "\n\\");
48720 } else {
48721 const char *name = arch_vma_name(vma);
48722 if (!name) {
fe2de317 48723@@ -257,8 +278,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
66a7e928
MT
48724 if (vma->vm_start <= mm->brk &&
48725 vma->vm_end >= mm->start_brk) {
58c5fc13
MT
48726 name = "[heap]";
48727- } else if (vma->vm_start <= mm->start_stack &&
48728- vma->vm_end >= mm->start_stack) {
48729+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
48730+ (vma->vm_start <= mm->start_stack &&
48731+ vma->vm_end >= mm->start_stack)) {
48732 name = "[stack]";
df50ba0c
MT
48733 }
48734 } else {
fe2de317 48735@@ -433,11 +455,16 @@ static int show_smap(struct seq_file *m, void *v)
58c5fc13
MT
48736 };
48737
48738 memset(&mss, 0, sizeof mss);
48739- mss.vma = vma;
df50ba0c 48740- /* mmap_sem is held in m_start */
58c5fc13
MT
48741- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
48742- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
df50ba0c 48743-
58c5fc13
MT
48744+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48745+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
48746+#endif
48747+ mss.vma = vma;
df50ba0c 48748+ /* mmap_sem is held in m_start */
58c5fc13
MT
48749+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
48750+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
48751+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48752+ }
48753+#endif
58c5fc13
MT
48754 show_map_vma(m, vma);
48755
df50ba0c 48756 seq_printf(m,
fe2de317 48757@@ -455,7 +482,11 @@ static int show_smap(struct seq_file *m, void *v)
58c5fc13 48758 "KernelPageSize: %8lu kB\n"
16454cff
MT
48759 "MMUPageSize: %8lu kB\n"
48760 "Locked: %8lu kB\n",
58c5fc13
MT
48761+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48762+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
48763+#else
48764 (vma->vm_end - vma->vm_start) >> 10,
48765+#endif
48766 mss.resident >> 10,
48767 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
48768 mss.shared_clean >> 10,
fe2de317 48769@@ -1031,7 +1062,7 @@ static int show_numa_map(struct seq_file *m, void *v)
15a11c5b
MT
48770
48771 if (file) {
48772 seq_printf(m, " file=");
48773- seq_path(m, &file->f_path, "\n\t= ");
48774+ seq_path(m, &file->f_path, "\n\t\\= ");
48775 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
48776 seq_printf(m, " heap");
48777 } else if (vma->vm_start <= mm->start_stack &&
fe2de317
MT
48778diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
48779index 980de54..2a4db5f 100644
48780--- a/fs/proc/task_nommu.c
48781+++ b/fs/proc/task_nommu.c
48782@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
58c5fc13
MT
48783 else
48784 bytes += kobjsize(mm);
48785
48786- if (current->fs && current->fs->users > 1)
48787+ if (current->fs && atomic_read(&current->fs->users) > 1)
48788 sbytes += kobjsize(current->fs);
48789 else
48790 bytes += kobjsize(current->fs);
fe2de317 48791@@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
57199397
MT
48792
48793 if (file) {
48794 pad_len_spaces(m, len);
58c5fc13
MT
48795- seq_path(m, &file->f_path, "");
48796+ seq_path(m, &file->f_path, "\n\\");
57199397
MT
48797 } else if (mm) {
48798 if (vma->vm_start <= mm->start_stack &&
48799 vma->vm_end >= mm->start_stack) {
fe2de317
MT
48800diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
48801index d67908b..d13f6a6 100644
48802--- a/fs/quota/netlink.c
48803+++ b/fs/quota/netlink.c
48804@@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
8308f9c9
MT
48805 void quota_send_warning(short type, unsigned int id, dev_t dev,
48806 const char warntype)
48807 {
48808- static atomic_t seq;
48809+ static atomic_unchecked_t seq;
48810 struct sk_buff *skb;
48811 void *msg_head;
48812 int ret;
fe2de317 48813@@ -49,7 +49,7 @@ void quota_send_warning(short type, unsigned int id, dev_t dev,
8308f9c9
MT
48814 "VFS: Not enough memory to send quota warning.\n");
48815 return;
48816 }
48817- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
48818+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
48819 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
48820 if (!msg_head) {
48821 printk(KERN_ERR
fe2de317
MT
48822diff --git a/fs/readdir.c b/fs/readdir.c
48823index 356f715..c918d38 100644
48824--- a/fs/readdir.c
48825+++ b/fs/readdir.c
6892158b 48826@@ -17,6 +17,7 @@
58c5fc13
MT
48827 #include <linux/security.h>
48828 #include <linux/syscalls.h>
48829 #include <linux/unistd.h>
48830+#include <linux/namei.h>
48831
48832 #include <asm/uaccess.h>
48833
48834@@ -67,6 +68,7 @@ struct old_linux_dirent {
48835
48836 struct readdir_callback {
48837 struct old_linux_dirent __user * dirent;
48838+ struct file * file;
48839 int result;
48840 };
48841
fe2de317 48842@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
58c5fc13
MT
48843 buf->result = -EOVERFLOW;
48844 return -EOVERFLOW;
48845 }
48846+
48847+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48848+ return 0;
48849+
48850 buf->result++;
48851 dirent = buf->dirent;
48852 if (!access_ok(VERIFY_WRITE, dirent,
fe2de317 48853@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
58c5fc13
MT
48854
48855 buf.result = 0;
48856 buf.dirent = dirent;
48857+ buf.file = file;
48858
48859 error = vfs_readdir(file, fillonedir, &buf);
48860 if (buf.result)
48861@@ -142,6 +149,7 @@ struct linux_dirent {
48862 struct getdents_callback {
48863 struct linux_dirent __user * current_dir;
48864 struct linux_dirent __user * previous;
48865+ struct file * file;
48866 int count;
48867 int error;
48868 };
fe2de317 48869@@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
58c5fc13
MT
48870 buf->error = -EOVERFLOW;
48871 return -EOVERFLOW;
48872 }
48873+
48874+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48875+ return 0;
48876+
48877 dirent = buf->previous;
48878 if (dirent) {
48879 if (__put_user(offset, &dirent->d_off))
fe2de317 48880@@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
58c5fc13
MT
48881 buf.previous = NULL;
48882 buf.count = count;
48883 buf.error = 0;
48884+ buf.file = file;
48885
48886 error = vfs_readdir(file, filldir, &buf);
48887 if (error >= 0)
6892158b 48888@@ -229,6 +242,7 @@ out:
58c5fc13
MT
48889 struct getdents_callback64 {
48890 struct linux_dirent64 __user * current_dir;
48891 struct linux_dirent64 __user * previous;
48892+ struct file *file;
48893 int count;
48894 int error;
48895 };
fe2de317 48896@@ -244,6 +258,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
58c5fc13
MT
48897 buf->error = -EINVAL; /* only used if we fail.. */
48898 if (reclen > buf->count)
48899 return -EINVAL;
48900+
48901+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48902+ return 0;
48903+
48904 dirent = buf->previous;
48905 if (dirent) {
48906 if (__put_user(offset, &dirent->d_off))
fe2de317 48907@@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
58c5fc13
MT
48908
48909 buf.current_dir = dirent;
48910 buf.previous = NULL;
48911+ buf.file = file;
48912 buf.count = count;
48913 buf.error = 0;
48914
fe2de317 48915@@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
6e9df6a3
MT
48916 error = buf.error;
48917 lastdirent = buf.previous;
48918 if (lastdirent) {
48919- typeof(lastdirent->d_off) d_off = file->f_pos;
48920+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
48921 if (__put_user(d_off, &lastdirent->d_off))
48922 error = -EFAULT;
48923 else
fe2de317
MT
48924diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
48925index 133e935..349ef18 100644
48926--- a/fs/reiserfs/dir.c
48927+++ b/fs/reiserfs/dir.c
48928@@ -75,6 +75,8 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
66a7e928
MT
48929 struct reiserfs_dir_entry de;
48930 int ret = 0;
48931
48932+ pax_track_stack();
48933+
48934 reiserfs_write_lock(inode->i_sb);
48935
48936 reiserfs_check_lock_depth(inode->i_sb, "readdir");
fe2de317
MT
48937diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
48938index 60c0804..d814f98 100644
48939--- a/fs/reiserfs/do_balan.c
48940+++ b/fs/reiserfs/do_balan.c
48941@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
58c5fc13
MT
48942 return;
48943 }
48944
48945- atomic_inc(&(fs_generation(tb->tb_sb)));
48946+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
48947 do_balance_starts(tb);
48948
48949 /* balance leaf returns 0 except if combining L R and S into
fe2de317
MT
48950diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
48951index a159ba5..0396a76 100644
48952--- a/fs/reiserfs/journal.c
48953+++ b/fs/reiserfs/journal.c
48954@@ -2289,6 +2289,8 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
66a7e928
MT
48955 struct buffer_head *bh;
48956 int i, j;
48957
48958+ pax_track_stack();
48959+
48960 bh = __getblk(dev, block, bufsize);
48961 if (buffer_uptodate(bh))
48962 return (bh);
fe2de317
MT
48963diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
48964index ef39232..0fa91ba 100644
48965--- a/fs/reiserfs/namei.c
48966+++ b/fs/reiserfs/namei.c
48967@@ -1225,6 +1225,8 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
66a7e928
MT
48968 unsigned long savelink = 1;
48969 struct timespec ctime;
48970
48971+ pax_track_stack();
48972+
48973 /* three balancings: (1) old name removal, (2) new name insertion
48974 and (3) maybe "save" link insertion
48975 stat data updates: (1) old directory,
fe2de317
MT
48976diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
48977index 7a99811..2c9286f 100644
48978--- a/fs/reiserfs/procfs.c
48979+++ b/fs/reiserfs/procfs.c
48980@@ -113,7 +113,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
58c5fc13
MT
48981 "SMALL_TAILS " : "NO_TAILS ",
48982 replay_only(sb) ? "REPLAY_ONLY " : "",
48983 convert_reiserfs(sb) ? "CONV " : "",
48984- atomic_read(&r->s_generation_counter),
48985+ atomic_read_unchecked(&r->s_generation_counter),
48986 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
48987 SF(s_do_balance), SF(s_unneeded_left_neighbor),
48988 SF(s_good_search_by_key_reada), SF(s_bmaps),
fe2de317 48989@@ -299,6 +299,8 @@ static int show_journal(struct seq_file *m, struct super_block *sb)
66a7e928
MT
48990 struct journal_params *jp = &rs->s_v1.s_journal;
48991 char b[BDEVNAME_SIZE];
48992
48993+ pax_track_stack();
48994+
48995 seq_printf(m, /* on-disk fields */
48996 "jp_journal_1st_block: \t%i\n"
48997 "jp_journal_dev: \t%s[%x]\n"
fe2de317
MT
48998diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
48999index 313d39d..3a5811b 100644
49000--- a/fs/reiserfs/stree.c
49001+++ b/fs/reiserfs/stree.c
49002@@ -1196,6 +1196,8 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
66a7e928
MT
49003 int iter = 0;
49004 #endif
49005
49006+ pax_track_stack();
49007+
49008 BUG_ON(!th->t_trans_id);
49009
49010 init_tb_struct(th, &s_del_balance, sb, path,
fe2de317 49011@@ -1333,6 +1335,8 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
66a7e928
MT
49012 int retval;
49013 int quota_cut_bytes = 0;
49014
49015+ pax_track_stack();
49016+
49017 BUG_ON(!th->t_trans_id);
49018
49019 le_key2cpu_key(&cpu_key, key);
fe2de317 49020@@ -1562,6 +1566,8 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
66a7e928
MT
49021 int quota_cut_bytes;
49022 loff_t tail_pos = 0;
49023
49024+ pax_track_stack();
49025+
49026 BUG_ON(!th->t_trans_id);
49027
49028 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
fe2de317 49029@@ -1957,6 +1963,8 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
66a7e928
MT
49030 int retval;
49031 int fs_gen;
49032
49033+ pax_track_stack();
49034+
49035 BUG_ON(!th->t_trans_id);
49036
49037 fs_gen = get_generation(inode->i_sb);
fe2de317 49038@@ -2045,6 +2053,8 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
66a7e928
MT
49039 int fs_gen = 0;
49040 int quota_bytes = 0;
49041
49042+ pax_track_stack();
49043+
49044 BUG_ON(!th->t_trans_id);
49045
49046 if (inode) { /* Do we count quotas for item? */
fe2de317
MT
49047diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
49048index 14363b9..dd95a04 100644
49049--- a/fs/reiserfs/super.c
49050+++ b/fs/reiserfs/super.c
49051@@ -927,6 +927,8 @@ static int reiserfs_parse_options(struct super_block *s, char *options, /* strin
66a7e928
MT
49052 {.option_name = NULL}
49053 };
49054
49055+ pax_track_stack();
49056+
49057 *blocks = 0;
49058 if (!options || !*options)
49059 /* use default configuration: create tails, journaling on, no
fe2de317
MT
49060diff --git a/fs/select.c b/fs/select.c
49061index d33418f..f8e06bc 100644
49062--- a/fs/select.c
49063+++ b/fs/select.c
ae4e228f 49064@@ -20,6 +20,7 @@
58c5fc13
MT
49065 #include <linux/module.h>
49066 #include <linux/slab.h>
49067 #include <linux/poll.h>
49068+#include <linux/security.h>
49069 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
49070 #include <linux/file.h>
49071 #include <linux/fdtable.h>
fe2de317 49072@@ -403,6 +404,8 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
66a7e928
MT
49073 int retval, i, timed_out = 0;
49074 unsigned long slack = 0;
49075
49076+ pax_track_stack();
49077+
49078 rcu_read_lock();
49079 retval = max_select_fd(n, fds);
49080 rcu_read_unlock();
fe2de317 49081@@ -528,6 +531,8 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
66a7e928
MT
49082 /* Allocate small arguments on the stack to save memory and be faster */
49083 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
49084
49085+ pax_track_stack();
49086+
49087 ret = -EINVAL;
49088 if (n < 0)
49089 goto out_nofds;
fe2de317 49090@@ -837,6 +842,9 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
58c5fc13
MT
49091 struct poll_list *walk = head;
49092 unsigned long todo = nfds;
49093
66a7e928
MT
49094+ pax_track_stack();
49095+
58c5fc13 49096+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
df50ba0c 49097 if (nfds > rlimit(RLIMIT_NOFILE))
58c5fc13
MT
49098 return -EINVAL;
49099
fe2de317
MT
49100diff --git a/fs/seq_file.c b/fs/seq_file.c
49101index 05d6b0e..ee96362 100644
49102--- a/fs/seq_file.c
49103+++ b/fs/seq_file.c
49104@@ -76,7 +76,8 @@ static int traverse(struct seq_file *m, loff_t offset)
58c5fc13
MT
49105 return 0;
49106 }
49107 if (!m->buf) {
49108- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
49109+ m->size = PAGE_SIZE;
ae4e228f 49110+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
58c5fc13
MT
49111 if (!m->buf)
49112 return -ENOMEM;
49113 }
fe2de317 49114@@ -116,7 +117,8 @@ static int traverse(struct seq_file *m, loff_t offset)
58c5fc13
MT
49115 Eoverflow:
49116 m->op->stop(m, p);
49117 kfree(m->buf);
49118- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
49119+ m->size <<= 1;
49120+ m->buf = kmalloc(m->size, GFP_KERNEL);
49121 return !m->buf ? -ENOMEM : -EAGAIN;
49122 }
49123
fe2de317 49124@@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
58c5fc13
MT
49125 m->version = file->f_version;
49126 /* grab buffer if we didn't have one */
49127 if (!m->buf) {
49128- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
49129+ m->size = PAGE_SIZE;
ae4e228f 49130+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
58c5fc13
MT
49131 if (!m->buf)
49132 goto Enomem;
49133 }
fe2de317 49134@@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
58c5fc13
MT
49135 goto Fill;
49136 m->op->stop(m, p);
49137 kfree(m->buf);
49138- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
49139+ m->size <<= 1;
49140+ m->buf = kmalloc(m->size, GFP_KERNEL);
49141 if (!m->buf)
49142 goto Enomem;
49143 m->count = 0;
fe2de317 49144@@ -549,7 +553,7 @@ static void single_stop(struct seq_file *p, void *v)
15a11c5b
MT
49145 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
49146 void *data)
49147 {
49148- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
49149+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
49150 int res = -ENOMEM;
49151
49152 if (op) {
fe2de317
MT
49153diff --git a/fs/splice.c b/fs/splice.c
49154index fa2defa..9a697a5 100644
49155--- a/fs/splice.c
49156+++ b/fs/splice.c
49157@@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
ae4e228f
MT
49158 pipe_lock(pipe);
49159
49160 for (;;) {
49161- if (!pipe->readers) {
49162+ if (!atomic_read(&pipe->readers)) {
49163 send_sig(SIGPIPE, current, 0);
49164 if (!ret)
49165 ret = -EPIPE;
fe2de317 49166@@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
ae4e228f
MT
49167 do_wakeup = 0;
49168 }
49169
49170- pipe->waiting_writers++;
49171+ atomic_inc(&pipe->waiting_writers);
49172 pipe_wait(pipe);
49173- pipe->waiting_writers--;
49174+ atomic_dec(&pipe->waiting_writers);
49175 }
49176
49177 pipe_unlock(pipe);
fe2de317 49178@@ -320,6 +320,8 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
66a7e928
MT
49179 .spd_release = spd_release_page,
49180 };
49181
49182+ pax_track_stack();
49183+
49184 if (splice_grow_spd(pipe, &spd))
49185 return -ENOMEM;
49186
fe2de317 49187@@ -560,7 +562,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
ae4e228f
MT
49188 old_fs = get_fs();
49189 set_fs(get_ds());
49190 /* The cast to a user pointer is valid due to the set_fs() */
49191- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
6e9df6a3 49192+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
ae4e228f
MT
49193 set_fs(old_fs);
49194
49195 return res;
fe2de317 49196@@ -575,7 +577,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
ae4e228f
MT
49197 old_fs = get_fs();
49198 set_fs(get_ds());
49199 /* The cast to a user pointer is valid due to the set_fs() */
49200- res = vfs_write(file, (const char __user *)buf, count, &pos);
6e9df6a3 49201+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
ae4e228f
MT
49202 set_fs(old_fs);
49203
49204 return res;
fe2de317 49205@@ -603,6 +605,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
66a7e928
MT
49206 .spd_release = spd_release_page,
49207 };
49208
49209+ pax_track_stack();
49210+
49211 if (splice_grow_spd(pipe, &spd))
49212 return -ENOMEM;
49213
fe2de317 49214@@ -626,7 +630,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
ae4e228f
MT
49215 goto err;
49216
49217 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
49218- vec[i].iov_base = (void __user *) page_address(page);
6e9df6a3 49219+ vec[i].iov_base = (void __force_user *) page_address(page);
ae4e228f 49220 vec[i].iov_len = this_len;
57199397 49221 spd.pages[i] = page;
ae4e228f 49222 spd.nr_pages++;
15a11c5b 49223@@ -846,10 +850,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
ae4e228f
MT
49224 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
49225 {
49226 while (!pipe->nrbufs) {
49227- if (!pipe->writers)
49228+ if (!atomic_read(&pipe->writers))
49229 return 0;
49230
49231- if (!pipe->waiting_writers && sd->num_spliced)
49232+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
49233 return 0;
49234
49235 if (sd->flags & SPLICE_F_NONBLOCK)
fe2de317 49236@@ -1182,7 +1186,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
ae4e228f
MT
49237 * out of the pipe right after the splice_to_pipe(). So set
49238 * PIPE_READERS appropriately.
49239 */
49240- pipe->readers = 1;
49241+ atomic_set(&pipe->readers, 1);
49242
49243 current->splice_pipe = pipe;
49244 }
fe2de317 49245@@ -1619,6 +1623,8 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
66a7e928
MT
49246 };
49247 long ret;
49248
49249+ pax_track_stack();
49250+
49251 pipe = get_pipe_info(file);
49252 if (!pipe)
49253 return -EBADF;
fe2de317 49254@@ -1734,9 +1740,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
ae4e228f
MT
49255 ret = -ERESTARTSYS;
49256 break;
49257 }
49258- if (!pipe->writers)
49259+ if (!atomic_read(&pipe->writers))
49260 break;
49261- if (!pipe->waiting_writers) {
49262+ if (!atomic_read(&pipe->waiting_writers)) {
49263 if (flags & SPLICE_F_NONBLOCK) {
49264 ret = -EAGAIN;
49265 break;
fe2de317 49266@@ -1768,7 +1774,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
ae4e228f
MT
49267 pipe_lock(pipe);
49268
57199397 49269 while (pipe->nrbufs >= pipe->buffers) {
ae4e228f
MT
49270- if (!pipe->readers) {
49271+ if (!atomic_read(&pipe->readers)) {
49272 send_sig(SIGPIPE, current, 0);
49273 ret = -EPIPE;
49274 break;
fe2de317 49275@@ -1781,9 +1787,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
ae4e228f
MT
49276 ret = -ERESTARTSYS;
49277 break;
49278 }
49279- pipe->waiting_writers++;
49280+ atomic_inc(&pipe->waiting_writers);
49281 pipe_wait(pipe);
49282- pipe->waiting_writers--;
49283+ atomic_dec(&pipe->waiting_writers);
49284 }
58c5fc13 49285
ae4e228f 49286 pipe_unlock(pipe);
15a11c5b 49287@@ -1819,14 +1825,14 @@ retry:
ae4e228f
MT
49288 pipe_double_lock(ipipe, opipe);
49289
49290 do {
49291- if (!opipe->readers) {
49292+ if (!atomic_read(&opipe->readers)) {
49293 send_sig(SIGPIPE, current, 0);
49294 if (!ret)
49295 ret = -EPIPE;
49296 break;
49297 }
49298
49299- if (!ipipe->nrbufs && !ipipe->writers)
49300+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
49301 break;
49302
49303 /*
fe2de317 49304@@ -1923,7 +1929,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
ae4e228f
MT
49305 pipe_double_lock(ipipe, opipe);
49306
49307 do {
49308- if (!opipe->readers) {
49309+ if (!atomic_read(&opipe->readers)) {
49310 send_sig(SIGPIPE, current, 0);
49311 if (!ret)
49312 ret = -EPIPE;
fe2de317 49313@@ -1968,7 +1974,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
ae4e228f
MT
49314 * return EAGAIN if we have the potential of some data in the
49315 * future, otherwise just return 0
49316 */
49317- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
49318+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
49319 ret = -EAGAIN;
49320
49321 pipe_unlock(ipipe);
fe2de317
MT
49322diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
49323index 1ad8c93..6633545 100644
49324--- a/fs/sysfs/file.c
49325+++ b/fs/sysfs/file.c
49326@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
8308f9c9
MT
49327
49328 struct sysfs_open_dirent {
49329 atomic_t refcnt;
49330- atomic_t event;
49331+ atomic_unchecked_t event;
49332 wait_queue_head_t poll;
49333 struct list_head buffers; /* goes through sysfs_buffer.list */
49334 };
fe2de317 49335@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
8308f9c9
MT
49336 if (!sysfs_get_active(attr_sd))
49337 return -ENODEV;
49338
49339- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
49340+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
49341 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
49342
49343 sysfs_put_active(attr_sd);
fe2de317 49344@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
8308f9c9
MT
49345 return -ENOMEM;
49346
49347 atomic_set(&new_od->refcnt, 0);
49348- atomic_set(&new_od->event, 1);
49349+ atomic_set_unchecked(&new_od->event, 1);
49350 init_waitqueue_head(&new_od->poll);
49351 INIT_LIST_HEAD(&new_od->buffers);
49352 goto retry;
fe2de317 49353@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
8308f9c9
MT
49354
49355 sysfs_put_active(attr_sd);
49356
49357- if (buffer->event != atomic_read(&od->event))
49358+ if (buffer->event != atomic_read_unchecked(&od->event))
49359 goto trigger;
49360
49361 return DEFAULT_POLLMASK;
fe2de317 49362@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
8308f9c9
MT
49363
49364 od = sd->s_attr.open;
49365 if (od) {
49366- atomic_inc(&od->event);
49367+ atomic_inc_unchecked(&od->event);
49368 wake_up_interruptible(&od->poll);
49369 }
49370
fe2de317
MT
49371diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
49372index e34f0d9..740ea7b 100644
49373--- a/fs/sysfs/mount.c
49374+++ b/fs/sysfs/mount.c
16454cff
MT
49375@@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
49376 .s_name = "",
49377 .s_count = ATOMIC_INIT(1),
49378 .s_flags = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT),
49379+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
49380+ .s_mode = S_IFDIR | S_IRWXU,
49381+#else
49382 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
49383+#endif
49384 .s_ino = 1,
49385 };
49386
fe2de317
MT
49387diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
49388index a7ac78f..02158e1 100644
49389--- a/fs/sysfs/symlink.c
49390+++ b/fs/sysfs/symlink.c
49391@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
58c5fc13
MT
49392
49393 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
49394 {
49395- char *page = nd_get_link(nd);
49396+ const char *page = nd_get_link(nd);
49397 if (!IS_ERR(page))
49398 free_page((unsigned long)page);
49399 }
fe2de317
MT
49400diff --git a/fs/udf/inode.c b/fs/udf/inode.c
49401index 1d1358e..408bedb 100644
49402--- a/fs/udf/inode.c
49403+++ b/fs/udf/inode.c
49404@@ -560,6 +560,8 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
66a7e928
MT
49405 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
49406 int lastblock = 0;
49407
49408+ pax_track_stack();
49409+
49410 prev_epos.offset = udf_file_entry_alloc_offset(inode);
49411 prev_epos.block = iinfo->i_location;
49412 prev_epos.bh = NULL;
fe2de317
MT
49413diff --git a/fs/udf/misc.c b/fs/udf/misc.c
49414index 9215700..bf1f68e 100644
49415--- a/fs/udf/misc.c
49416+++ b/fs/udf/misc.c
49417@@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
bc901d79
MT
49418
49419 u8 udf_tag_checksum(const struct tag *t)
49420 {
49421- u8 *data = (u8 *)t;
49422+ const u8 *data = (const u8 *)t;
49423 u8 checksum = 0;
49424 int i;
49425 for (i = 0; i < sizeof(struct tag); ++i)
fe2de317
MT
49426diff --git a/fs/utimes.c b/fs/utimes.c
49427index ba653f3..06ea4b1 100644
49428--- a/fs/utimes.c
49429+++ b/fs/utimes.c
58c5fc13
MT
49430@@ -1,6 +1,7 @@
49431 #include <linux/compiler.h>
49432 #include <linux/file.h>
49433 #include <linux/fs.h>
49434+#include <linux/security.h>
49435 #include <linux/linkage.h>
49436 #include <linux/mount.h>
49437 #include <linux/namei.h>
fe2de317 49438@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
58c5fc13
MT
49439 goto mnt_drop_write_and_out;
49440 }
49441 }
49442+
49443+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
49444+ error = -EACCES;
49445+ goto mnt_drop_write_and_out;
49446+ }
49447+
49448 mutex_lock(&inode->i_mutex);
49449 error = notify_change(path->dentry, &newattrs);
49450 mutex_unlock(&inode->i_mutex);
fe2de317
MT
49451diff --git a/fs/xattr.c b/fs/xattr.c
49452index f060663..def7007 100644
49453--- a/fs/xattr.c
49454+++ b/fs/xattr.c
15a11c5b 49455@@ -254,7 +254,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
bc901d79
MT
49456 * Extended attribute SET operations
49457 */
49458 static long
49459-setxattr(struct dentry *d, const char __user *name, const void __user *value,
49460+setxattr(struct path *path, const char __user *name, const void __user *value,
49461 size_t size, int flags)
49462 {
49463 int error;
fe2de317 49464@@ -278,7 +278,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
bc901d79
MT
49465 return PTR_ERR(kvalue);
49466 }
49467
49468- error = vfs_setxattr(d, kname, kvalue, size, flags);
49469+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
49470+ error = -EACCES;
49471+ goto out;
49472+ }
49473+
49474+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
49475+out:
49476 kfree(kvalue);
49477 return error;
49478 }
fe2de317 49479@@ -295,7 +301,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
bc901d79
MT
49480 return error;
49481 error = mnt_want_write(path.mnt);
49482 if (!error) {
49483- error = setxattr(path.dentry, name, value, size, flags);
49484+ error = setxattr(&path, name, value, size, flags);
49485 mnt_drop_write(path.mnt);
49486 }
49487 path_put(&path);
fe2de317 49488@@ -314,7 +320,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
bc901d79
MT
49489 return error;
49490 error = mnt_want_write(path.mnt);
49491 if (!error) {
49492- error = setxattr(path.dentry, name, value, size, flags);
49493+ error = setxattr(&path, name, value, size, flags);
49494 mnt_drop_write(path.mnt);
49495 }
49496 path_put(&path);
fe2de317 49497@@ -325,17 +331,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
bc901d79
MT
49498 const void __user *,value, size_t, size, int, flags)
49499 {
49500 struct file *f;
49501- struct dentry *dentry;
49502 int error = -EBADF;
49503
49504 f = fget(fd);
49505 if (!f)
49506 return error;
49507- dentry = f->f_path.dentry;
49508- audit_inode(NULL, dentry);
49509+ audit_inode(NULL, f->f_path.dentry);
49510 error = mnt_want_write_file(f);
49511 if (!error) {
49512- error = setxattr(dentry, name, value, size, flags);
49513+ error = setxattr(&f->f_path, name, value, size, flags);
49514 mnt_drop_write(f->f_path.mnt);
49515 }
49516 fput(f);
fe2de317
MT
49517diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
49518index 8d5a506..7f62712 100644
49519--- a/fs/xattr_acl.c
49520+++ b/fs/xattr_acl.c
49521@@ -17,8 +17,8 @@
49522 struct posix_acl *
49523 posix_acl_from_xattr(const void *value, size_t size)
49524 {
49525- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
49526- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
49527+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
49528+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
49529 int count;
49530 struct posix_acl *acl;
49531 struct posix_acl_entry *acl_e;
49532diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
49533index 452a291..91a95f3b 100644
49534--- a/fs/xfs/xfs_bmap.c
49535+++ b/fs/xfs/xfs_bmap.c
6e9df6a3 49536@@ -250,7 +250,7 @@ xfs_bmap_validate_ret(
58c5fc13
MT
49537 int nmap,
49538 int ret_nmap);
49539 #else
49540-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
49541+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
49542 #endif /* DEBUG */
49543
ae4e228f 49544 STATIC int
fe2de317
MT
49545diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
49546index 79d05e8..e3e5861 100644
49547--- a/fs/xfs/xfs_dir2_sf.c
49548+++ b/fs/xfs/xfs_dir2_sf.c
6e9df6a3 49549@@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
71d190be
MT
49550 }
49551
6e9df6a3 49552 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
71d190be
MT
49553- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
49554+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
49555+ char name[sfep->namelen];
49556+ memcpy(name, sfep->name, sfep->namelen);
49557+ if (filldir(dirent, name, sfep->namelen,
49558+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
49559+ *offset = off & 0x7fffffff;
49560+ return 0;
49561+ }
49562+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
49563 off & 0x7fffffff, ino, DT_UNKNOWN)) {
49564 *offset = off & 0x7fffffff;
49565 return 0;
fe2de317
MT
49566diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
49567index f7ce7de..e1a5db0 100644
49568--- a/fs/xfs/xfs_ioctl.c
49569+++ b/fs/xfs/xfs_ioctl.c
6e9df6a3
MT
49570@@ -128,7 +128,7 @@ xfs_find_handle(
49571 }
49572
49573 error = -EFAULT;
49574- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
49575+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
49576 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
49577 goto out_put;
49578
fe2de317
MT
49579diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
49580index 474920b..97169a9 100644
49581--- a/fs/xfs/xfs_iops.c
49582+++ b/fs/xfs/xfs_iops.c
6e9df6a3
MT
49583@@ -446,7 +446,7 @@ xfs_vn_put_link(
49584 struct nameidata *nd,
49585 void *p)
49586 {
49587- char *s = nd_get_link(nd);
49588+ const char *s = nd_get_link(nd);
49589
49590 if (!IS_ERR(s))
49591 kfree(s);
fe2de317
MT
49592diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
49593new file mode 100644
49594index 0000000..9629731
49595--- /dev/null
49596+++ b/grsecurity/Kconfig
49597@@ -0,0 +1,1037 @@
49598+#
49599+# grecurity configuration
49600+#
883a9837 49601+
fe2de317 49602+menu "Grsecurity"
58c5fc13 49603+
fe2de317
MT
49604+config GRKERNSEC
49605+ bool "Grsecurity"
49606+ select CRYPTO
49607+ select CRYPTO_SHA256
49608+ help
49609+ If you say Y here, you will be able to configure many features
49610+ that will enhance the security of your system. It is highly
49611+ recommended that you say Y here and read through the help
49612+ for each option so that you fully understand the features and
49613+ can evaluate their usefulness for your machine.
58c5fc13 49614+
fe2de317
MT
49615+choice
49616+ prompt "Security Level"
49617+ depends on GRKERNSEC
49618+ default GRKERNSEC_CUSTOM
58c5fc13 49619+
fe2de317
MT
49620+config GRKERNSEC_LOW
49621+ bool "Low"
49622+ select GRKERNSEC_LINK
49623+ select GRKERNSEC_FIFO
49624+ select GRKERNSEC_RANDNET
49625+ select GRKERNSEC_DMESG
49626+ select GRKERNSEC_CHROOT
49627+ select GRKERNSEC_CHROOT_CHDIR
58c5fc13 49628+
fe2de317
MT
49629+ help
49630+ If you choose this option, several of the grsecurity options will
49631+ be enabled that will give you greater protection against a number
49632+ of attacks, while assuring that none of your software will have any
49633+ conflicts with the additional security measures. If you run a lot
49634+ of unusual software, or you are having problems with the higher
49635+ security levels, you should say Y here. With this option, the
49636+ following features are enabled:
58c5fc13 49637+
fe2de317
MT
49638+ - Linking restrictions
49639+ - FIFO restrictions
49640+ - Restricted dmesg
49641+ - Enforced chdir("/") on chroot
49642+ - Runtime module disabling
58c5fc13 49643+
fe2de317
MT
49644+config GRKERNSEC_MEDIUM
49645+ bool "Medium"
49646+ select PAX
49647+ select PAX_EI_PAX
49648+ select PAX_PT_PAX_FLAGS
49649+ select PAX_HAVE_ACL_FLAGS
49650+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
49651+ select GRKERNSEC_CHROOT
49652+ select GRKERNSEC_CHROOT_SYSCTL
49653+ select GRKERNSEC_LINK
49654+ select GRKERNSEC_FIFO
49655+ select GRKERNSEC_DMESG
49656+ select GRKERNSEC_RANDNET
49657+ select GRKERNSEC_FORKFAIL
49658+ select GRKERNSEC_TIME
49659+ select GRKERNSEC_SIGNAL
49660+ select GRKERNSEC_CHROOT
49661+ select GRKERNSEC_CHROOT_UNIX
49662+ select GRKERNSEC_CHROOT_MOUNT
49663+ select GRKERNSEC_CHROOT_PIVOT
49664+ select GRKERNSEC_CHROOT_DOUBLE
49665+ select GRKERNSEC_CHROOT_CHDIR
49666+ select GRKERNSEC_CHROOT_MKNOD
49667+ select GRKERNSEC_PROC
49668+ select GRKERNSEC_PROC_USERGROUP
49669+ select PAX_RANDUSTACK
49670+ select PAX_ASLR
49671+ select PAX_RANDMMAP
49672+ select PAX_REFCOUNT if (X86 || SPARC64)
49673+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
58c5fc13 49674+
fe2de317
MT
49675+ help
49676+ If you say Y here, several features in addition to those included
49677+ in the low additional security level will be enabled. These
49678+ features provide even more security to your system, though in rare
49679+ cases they may be incompatible with very old or poorly written
49680+ software. If you enable this option, make sure that your auth
49681+ service (identd) is running as gid 1001. With this option,
49682+ the following features (in addition to those provided in the
49683+ low additional security level) will be enabled:
58c5fc13 49684+
fe2de317
MT
49685+ - Failed fork logging
49686+ - Time change logging
49687+ - Signal logging
49688+ - Deny mounts in chroot
49689+ - Deny double chrooting
49690+ - Deny sysctl writes in chroot
49691+ - Deny mknod in chroot
49692+ - Deny access to abstract AF_UNIX sockets out of chroot
49693+ - Deny pivot_root in chroot
49694+ - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
49695+ - /proc restrictions with special GID set to 10 (usually wheel)
49696+ - Address Space Layout Randomization (ASLR)
49697+ - Prevent exploitation of most refcount overflows
49698+ - Bounds checking of copying between the kernel and userland
58c5fc13 49699+
fe2de317
MT
49700+config GRKERNSEC_HIGH
49701+ bool "High"
49702+ select GRKERNSEC_LINK
49703+ select GRKERNSEC_FIFO
49704+ select GRKERNSEC_DMESG
49705+ select GRKERNSEC_FORKFAIL
49706+ select GRKERNSEC_TIME
49707+ select GRKERNSEC_SIGNAL
49708+ select GRKERNSEC_CHROOT
49709+ select GRKERNSEC_CHROOT_SHMAT
49710+ select GRKERNSEC_CHROOT_UNIX
49711+ select GRKERNSEC_CHROOT_MOUNT
49712+ select GRKERNSEC_CHROOT_FCHDIR
49713+ select GRKERNSEC_CHROOT_PIVOT
49714+ select GRKERNSEC_CHROOT_DOUBLE
49715+ select GRKERNSEC_CHROOT_CHDIR
49716+ select GRKERNSEC_CHROOT_MKNOD
49717+ select GRKERNSEC_CHROOT_CAPS
49718+ select GRKERNSEC_CHROOT_SYSCTL
49719+ select GRKERNSEC_CHROOT_FINDTASK
49720+ select GRKERNSEC_SYSFS_RESTRICT
49721+ select GRKERNSEC_PROC
49722+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
49723+ select GRKERNSEC_HIDESYM
49724+ select GRKERNSEC_BRUTE
49725+ select GRKERNSEC_PROC_USERGROUP
49726+ select GRKERNSEC_KMEM
49727+ select GRKERNSEC_RESLOG
49728+ select GRKERNSEC_RANDNET
49729+ select GRKERNSEC_PROC_ADD
49730+ select GRKERNSEC_CHROOT_CHMOD
49731+ select GRKERNSEC_CHROOT_NICE
49732+ select GRKERNSEC_AUDIT_MOUNT
49733+ select GRKERNSEC_MODHARDEN if (MODULES)
49734+ select GRKERNSEC_HARDEN_PTRACE
49735+ select GRKERNSEC_VM86 if (X86_32)
49736+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
49737+ select PAX
49738+ select PAX_RANDUSTACK
49739+ select PAX_ASLR
49740+ select PAX_RANDMMAP
49741+ select PAX_NOEXEC
49742+ select PAX_MPROTECT
49743+ select PAX_EI_PAX
49744+ select PAX_PT_PAX_FLAGS
49745+ select PAX_HAVE_ACL_FLAGS
49746+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
49747+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
49748+ select PAX_RANDKSTACK if (X86_TSC && X86)
49749+ select PAX_SEGMEXEC if (X86_32)
49750+ select PAX_PAGEEXEC
49751+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
49752+ select PAX_EMUTRAMP if (PARISC)
49753+ select PAX_EMUSIGRT if (PARISC)
49754+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
49755+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
49756+ select PAX_REFCOUNT if (X86 || SPARC64)
49757+ select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
49758+ help
49759+ If you say Y here, many of the features of grsecurity will be
49760+ enabled, which will protect you against many kinds of attacks
49761+ against your system. The heightened security comes at a cost
49762+ of an increased chance of incompatibilities with rare software
49763+ on your machine. Since this security level enables PaX, you should
49764+ view <http://pax.grsecurity.net> and read about the PaX
49765+ project. While you are there, download chpax and run it on
49766+ binaries that cause problems with PaX. Also remember that
49767+ since the /proc restrictions are enabled, you must run your
49768+ identd as gid 1001. This security level enables the following
49769+ features in addition to those listed in the low and medium
49770+ security levels:
58c5fc13 49771+
fe2de317
MT
49772+ - Additional /proc restrictions
49773+ - Chmod restrictions in chroot
49774+ - No signals, ptrace, or viewing of processes outside of chroot
49775+ - Capability restrictions in chroot
49776+ - Deny fchdir out of chroot
49777+ - Priority restrictions in chroot
49778+ - Segmentation-based implementation of PaX
49779+ - Mprotect restrictions
49780+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
49781+ - Kernel stack randomization
49782+ - Mount/unmount/remount logging
49783+ - Kernel symbol hiding
49784+ - Hardening of module auto-loading
49785+ - Ptrace restrictions
49786+ - Restricted vm86 mode
49787+ - Restricted sysfs/debugfs
49788+ - Active kernel exploit response
58c5fc13 49789+
fe2de317
MT
49790+config GRKERNSEC_CUSTOM
49791+ bool "Custom"
49792+ help
49793+ If you say Y here, you will be able to configure every grsecurity
49794+ option, which allows you to enable many more features that aren't
49795+ covered in the basic security levels. These additional features
49796+ include TPE, socket restrictions, and the sysctl system for
49797+ grsecurity. It is advised that you read through the help for
49798+ each option to determine its usefulness in your situation.
58c5fc13 49799+
fe2de317 49800+endchoice
58c5fc13 49801+
fe2de317
MT
49802+menu "Address Space Protection"
49803+depends on GRKERNSEC
58c5fc13 49804+
fe2de317
MT
49805+config GRKERNSEC_KMEM
49806+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
49807+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
49808+ help
49809+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
49810+ be written to or read from to modify or leak the contents of the running
49811+ kernel. /dev/port will also not be allowed to be opened. If you have module
49812+ support disabled, enabling this will close up four ways that are
49813+ currently used to insert malicious code into the running kernel.
49814+ Even with all these features enabled, we still highly recommend that
49815+ you use the RBAC system, as it is still possible for an attacker to
49816+ modify the running kernel through privileged I/O granted by ioperm/iopl.
49817+ If you are not using XFree86, you may be able to stop this additional
49818+ case by enabling the 'Disable privileged I/O' option. Though nothing
49819+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
49820+ but only to video memory, which is the only writing we allow in this
49821+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
49822+ not be allowed to mprotect it with PROT_WRITE later.
49823+ It is highly recommended that you say Y here if you meet all the
49824+ conditions above.
58c5fc13 49825+
fe2de317
MT
49826+config GRKERNSEC_VM86
49827+ bool "Restrict VM86 mode"
49828+ depends on X86_32
58c5fc13 49829+
fe2de317
MT
49830+ help
49831+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
49832+ make use of a special execution mode on 32bit x86 processors called
49833+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
49834+ video cards and will still work with this option enabled. The purpose
49835+ of the option is to prevent exploitation of emulation errors in
49836+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
49837+ Nearly all users should be able to enable this option.
58c5fc13 49838+
fe2de317
MT
49839+config GRKERNSEC_IO
49840+ bool "Disable privileged I/O"
49841+ depends on X86
49842+ select RTC_CLASS
49843+ select RTC_INTF_DEV
49844+ select RTC_DRV_CMOS
58c5fc13 49845+
fe2de317
MT
49846+ help
49847+ If you say Y here, all ioperm and iopl calls will return an error.
49848+ Ioperm and iopl can be used to modify the running kernel.
49849+ Unfortunately, some programs need this access to operate properly,
49850+ the most notable of which are XFree86 and hwclock. hwclock can be
49851+ remedied by having RTC support in the kernel, so real-time
49852+ clock support is enabled if this option is enabled, to ensure
49853+ that hwclock operates correctly. XFree86 still will not
49854+ operate correctly with this option enabled, so DO NOT CHOOSE Y
49855+ IF YOU USE XFree86. If you use XFree86 and you still want to
49856+ protect your kernel against modification, use the RBAC system.
58c5fc13 49857+
fe2de317
MT
49858+config GRKERNSEC_PROC_MEMMAP
49859+ bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
49860+ default y if (PAX_NOEXEC || PAX_ASLR)
49861+ depends on PAX_NOEXEC || PAX_ASLR
49862+ help
49863+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
49864+ give no information about the addresses of its mappings if
49865+ PaX features that rely on random addresses are enabled on the task.
49866+ If you use PaX it is greatly recommended that you say Y here as it
49867+ closes up a hole that makes the full ASLR useless for suid
49868+ binaries.
58c5fc13 49869+
fe2de317
MT
49870+config GRKERNSEC_BRUTE
49871+ bool "Deter exploit bruteforcing"
49872+ help
49873+ If you say Y here, attempts to bruteforce exploits against forking
49874+ daemons such as apache or sshd, as well as against suid/sgid binaries
49875+ will be deterred. When a child of a forking daemon is killed by PaX
49876+ or crashes due to an illegal instruction or other suspicious signal,
49877+ the parent process will be delayed 30 seconds upon every subsequent
49878+ fork until the administrator is able to assess the situation and
49879+ restart the daemon.
49880+ In the suid/sgid case, the attempt is logged, the user has all their
49881+ processes terminated, and they are prevented from executing any further
49882+ processes for 15 minutes.
49883+ It is recommended that you also enable signal logging in the auditing
49884+ section so that logs are generated when a process triggers a suspicious
49885+ signal.
49886+ If the sysctl option is enabled, a sysctl option with name
49887+ "deter_bruteforce" is created.
58c5fc13 49888+
58c5fc13 49889+
fe2de317
MT
49890+config GRKERNSEC_MODHARDEN
49891+ bool "Harden module auto-loading"
49892+ depends on MODULES
49893+ help
49894+ If you say Y here, module auto-loading in response to use of some
49895+ feature implemented by an unloaded module will be restricted to
49896+ root users. Enabling this option helps defend against attacks
49897+ by unprivileged users who abuse the auto-loading behavior to
49898+ cause a vulnerable module to load that is then exploited.
58c5fc13 49899+
fe2de317
MT
49900+ If this option prevents a legitimate use of auto-loading for a
49901+ non-root user, the administrator can execute modprobe manually
49902+ with the exact name of the module mentioned in the alert log.
49903+ Alternatively, the administrator can add the module to the list
49904+ of modules loaded at boot by modifying init scripts.
58c5fc13 49905+
fe2de317
MT
49906+ Modification of init scripts will most likely be needed on
49907+ Ubuntu servers with encrypted home directory support enabled,
49908+ as the first non-root user logging in will cause the ecb(aes),
49909+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
58c5fc13 49910+
fe2de317
MT
49911+config GRKERNSEC_HIDESYM
49912+ bool "Hide kernel symbols"
49913+ help
49914+ If you say Y here, getting information on loaded modules, and
49915+ displaying all kernel symbols through a syscall will be restricted
49916+ to users with CAP_SYS_MODULE. For software compatibility reasons,
49917+ /proc/kallsyms will be restricted to the root user. The RBAC
49918+ system can hide that entry even from root.
58c5fc13 49919+
fe2de317
MT
49920+ This option also prevents leaking of kernel addresses through
49921+ several /proc entries.
58c5fc13 49922+
fe2de317
MT
49923+ Note that this option is only effective provided the following
49924+ conditions are met:
49925+ 1) The kernel using grsecurity is not precompiled by some distribution
49926+ 2) You have also enabled GRKERNSEC_DMESG
49927+ 3) You are using the RBAC system and hiding other files such as your
49928+ kernel image and System.map. Alternatively, enabling this option
49929+ causes the permissions on /boot, /lib/modules, and the kernel
49930+ source directory to change at compile time to prevent
49931+ reading by non-root users.
49932+ If the above conditions are met, this option will aid in providing a
49933+ useful protection against local kernel exploitation of overflows
49934+ and arbitrary read/write vulnerabilities.
58c5fc13 49935+
fe2de317
MT
49936+config GRKERNSEC_KERN_LOCKOUT
49937+ bool "Active kernel exploit response"
49938+ depends on X86 || ARM || PPC || SPARC
49939+ help
49940+ If you say Y here, when a PaX alert is triggered due to suspicious
49941+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
49942+ or an OOPs occurs due to bad memory accesses, instead of just
49943+ terminating the offending process (and potentially allowing
49944+ a subsequent exploit from the same user), we will take one of two
49945+ actions:
49946+ If the user was root, we will panic the system
49947+ If the user was non-root, we will log the attempt, terminate
49948+ all processes owned by the user, then prevent them from creating
49949+ any new processes until the system is restarted
49950+ This deters repeated kernel exploitation/bruteforcing attempts
49951+ and is useful for later forensics.
ae4e228f 49952+
fe2de317
MT
49953+endmenu
49954+menu "Role Based Access Control Options"
49955+depends on GRKERNSEC
58c5fc13 49956+
fe2de317
MT
49957+config GRKERNSEC_RBAC_DEBUG
49958+ bool
58c5fc13 49959+
fe2de317
MT
49960+config GRKERNSEC_NO_RBAC
49961+ bool "Disable RBAC system"
49962+ help
49963+ If you say Y here, the /dev/grsec device will be removed from the kernel,
49964+ preventing the RBAC system from being enabled. You should only say Y
49965+ here if you have no intention of using the RBAC system, so as to prevent
49966+ an attacker with root access from misusing the RBAC system to hide files
49967+ and processes when loadable module support and /dev/[k]mem have been
49968+ locked down.
58c5fc13 49969+
fe2de317
MT
49970+config GRKERNSEC_ACL_HIDEKERN
49971+ bool "Hide kernel processes"
49972+ help
49973+ If you say Y here, all kernel threads will be hidden to all
49974+ processes but those whose subject has the "view hidden processes"
49975+ flag.
58c5fc13 49976+
fe2de317
MT
49977+config GRKERNSEC_ACL_MAXTRIES
49978+ int "Maximum tries before password lockout"
49979+ default 3
49980+ help
49981+ This option enforces the maximum number of times a user can attempt
49982+ to authorize themselves with the grsecurity RBAC system before being
49983+ denied the ability to attempt authorization again for a specified time.
49984+ The lower the number, the harder it will be to brute-force a password.
58c5fc13 49985+
fe2de317
MT
49986+config GRKERNSEC_ACL_TIMEOUT
49987+ int "Time to wait after max password tries, in seconds"
49988+ default 30
49989+ help
49990+ This option specifies the time the user must wait after attempting to
49991+ authorize to the RBAC system with the maximum number of invalid
49992+ passwords. The higher the number, the harder it will be to brute-force
49993+ a password.
58c5fc13 49994+
fe2de317
MT
49995+endmenu
49996+menu "Filesystem Protections"
49997+depends on GRKERNSEC
58c5fc13 49998+
fe2de317
MT
49999+config GRKERNSEC_PROC
50000+ bool "Proc restrictions"
50001+ help
50002+ If you say Y here, the permissions of the /proc filesystem
50003+ will be altered to enhance system security and privacy. You MUST
50004+ choose either a user only restriction or a user and group restriction.
50005+ Depending upon the option you choose, you can either restrict users to
50006+ see only the processes they themselves run, or choose a group that can
50007+ view all processes and files normally restricted to root if you choose
50008+ the "restrict to user only" option. NOTE: If you're running identd as
50009+ a non-root user, you will have to run it as the group you specify here.
58c5fc13 50010+
fe2de317
MT
50011+config GRKERNSEC_PROC_USER
50012+ bool "Restrict /proc to user only"
50013+ depends on GRKERNSEC_PROC
50014+ help
50015+ If you say Y here, non-root users will only be able to view their own
50016+ processes, and restricts them from viewing network-related information,
50017+ and viewing kernel symbol and module information.
58c5fc13 50018+
fe2de317
MT
50019+config GRKERNSEC_PROC_USERGROUP
50020+ bool "Allow special group"
50021+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
50022+ help
50023+ If you say Y here, you will be able to select a group that will be
50024+ able to view all processes and network-related information. If you've
50025+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
50026+ remain hidden. This option is useful if you want to run identd as
50027+ a non-root user.
58c5fc13 50028+
fe2de317
MT
50029+config GRKERNSEC_PROC_GID
50030+ int "GID for special group"
50031+ depends on GRKERNSEC_PROC_USERGROUP
50032+ default 1001
df50ba0c 50033+
fe2de317
MT
50034+config GRKERNSEC_PROC_ADD
50035+ bool "Additional restrictions"
50036+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
50037+ help
50038+ If you say Y here, additional restrictions will be placed on
50039+ /proc that keep normal users from viewing device information and
50040+ slabinfo information that could be useful for exploits.
58c5fc13 50041+
fe2de317
MT
50042+config GRKERNSEC_LINK
50043+ bool "Linking restrictions"
50044+ help
50045+ If you say Y here, /tmp race exploits will be prevented, since users
50046+ will no longer be able to follow symlinks owned by other users in
50047+ world-writable +t directories (e.g. /tmp), unless the owner of the
50048+ symlink is the owner of the directory. users will also not be
50049+ able to hardlink to files they do not own. If the sysctl option is
50050+ enabled, a sysctl option with name "linking_restrictions" is created.
15a11c5b 50051+
fe2de317
MT
50052+config GRKERNSEC_FIFO
50053+ bool "FIFO restrictions"
50054+ help
50055+ If you say Y here, users will not be able to write to FIFOs they don't
50056+ own in world-writable +t directories (e.g. /tmp), unless the owner of
50057+ the FIFO is the same owner of the directory it's held in. If the sysctl
50058+ option is enabled, a sysctl option with name "fifo_restrictions" is
50059+ created.
58c5fc13 50060+
fe2de317
MT
50061+config GRKERNSEC_SYSFS_RESTRICT
50062+ bool "Sysfs/debugfs restriction"
50063+ depends on SYSFS
50064+ help
50065+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
50066+ any filesystem normally mounted under it (e.g. debugfs) will only
50067+ be accessible by root. These filesystems generally provide access
50068+ to hardware and debug information that isn't appropriate for unprivileged
50069+ users of the system. Sysfs and debugfs have also become a large source
50070+ of new vulnerabilities, ranging from infoleaks to local compromise.
50071+ There has been very little oversight with an eye toward security involved
50072+ in adding new exporters of information to these filesystems, so their
50073+ use is discouraged.
50074+ This option is equivalent to a chmod 0700 of the mount paths.
df50ba0c 50075+
fe2de317
MT
50076+config GRKERNSEC_ROFS
50077+ bool "Runtime read-only mount protection"
50078+ help
50079+ If you say Y here, a sysctl option with name "romount_protect" will
50080+ be created. By setting this option to 1 at runtime, filesystems
50081+ will be protected in the following ways:
50082+ * No new writable mounts will be allowed
50083+ * Existing read-only mounts won't be able to be remounted read/write
50084+ * Write operations will be denied on all block devices
50085+ This option acts independently of grsec_lock: once it is set to 1,
50086+ it cannot be turned off. Therefore, please be mindful of the resulting
50087+ behavior if this option is enabled in an init script on a read-only
50088+ filesystem. This feature is mainly intended for secure embedded systems.
58c5fc13 50089+
fe2de317
MT
50090+config GRKERNSEC_CHROOT
50091+ bool "Chroot jail restrictions"
50092+ help
50093+ If you say Y here, you will be able to choose several options that will
50094+ make breaking out of a chrooted jail much more difficult. If you
50095+ encounter no software incompatibilities with the following options, it
50096+ is recommended that you enable each one.
58c5fc13 50097+
fe2de317
MT
50098+config GRKERNSEC_CHROOT_MOUNT
50099+ bool "Deny mounts"
50100+ depends on GRKERNSEC_CHROOT
50101+ help
50102+ If you say Y here, processes inside a chroot will not be able to
50103+ mount or remount filesystems. If the sysctl option is enabled, a
50104+ sysctl option with name "chroot_deny_mount" is created.
58c5fc13 50105+
fe2de317
MT
50106+config GRKERNSEC_CHROOT_DOUBLE
50107+ bool "Deny double-chroots"
50108+ depends on GRKERNSEC_CHROOT
50109+ help
50110+ If you say Y here, processes inside a chroot will not be able to chroot
50111+ again outside the chroot. This is a widely used method of breaking
50112+ out of a chroot jail and should not be allowed. If the sysctl
50113+ option is enabled, a sysctl option with name
50114+ "chroot_deny_chroot" is created.
16454cff 50115+
fe2de317
MT
50116+config GRKERNSEC_CHROOT_PIVOT
50117+ bool "Deny pivot_root in chroot"
50118+ depends on GRKERNSEC_CHROOT
50119+ help
50120+ If you say Y here, processes inside a chroot will not be able to use
50121+ a function called pivot_root() that was introduced in Linux 2.3.41. It
50122+ works similar to chroot in that it changes the root filesystem. This
50123+ function could be misused in a chrooted process to attempt to break out
50124+ of the chroot, and therefore should not be allowed. If the sysctl
50125+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
50126+ created.
16454cff 50127+
fe2de317
MT
50128+config GRKERNSEC_CHROOT_CHDIR
50129+ bool "Enforce chdir(\"/\") on all chroots"
50130+ depends on GRKERNSEC_CHROOT
50131+ help
50132+ If you say Y here, the current working directory of all newly-chrooted
50133+ applications will be set to the the root directory of the chroot.
50134+ The man page on chroot(2) states:
50135+ Note that this call does not change the current working
50136+ directory, so that `.' can be outside the tree rooted at
50137+ `/'. In particular, the super-user can escape from a
50138+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
16454cff 50139+
fe2de317
MT
50140+ It is recommended that you say Y here, since it's not known to break
50141+ any software. If the sysctl option is enabled, a sysctl option with
50142+ name "chroot_enforce_chdir" is created.
58c5fc13 50143+
fe2de317
MT
50144+config GRKERNSEC_CHROOT_CHMOD
50145+ bool "Deny (f)chmod +s"
50146+ depends on GRKERNSEC_CHROOT
50147+ help
50148+ If you say Y here, processes inside a chroot will not be able to chmod
50149+ or fchmod files to make them have suid or sgid bits. This protects
50150+ against another published method of breaking a chroot. If the sysctl
50151+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
50152+ created.
58c5fc13 50153+
fe2de317
MT
50154+config GRKERNSEC_CHROOT_FCHDIR
50155+ bool "Deny fchdir out of chroot"
50156+ depends on GRKERNSEC_CHROOT
50157+ help
50158+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
50159+ to a file descriptor of the chrooting process that points to a directory
50160+ outside the filesystem will be stopped. If the sysctl option
50161+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
6892158b 50162+
fe2de317
MT
50163+config GRKERNSEC_CHROOT_MKNOD
50164+ bool "Deny mknod"
50165+ depends on GRKERNSEC_CHROOT
50166+ help
50167+ If you say Y here, processes inside a chroot will not be allowed to
50168+ mknod. The problem with using mknod inside a chroot is that it
50169+ would allow an attacker to create a device entry that is the same
50170+ as one on the physical root of your system, which could range from
50171+ anything from the console device to a device for your harddrive (which
50172+ they could then use to wipe the drive or steal data). It is recommended
50173+ that you say Y here, unless you run into software incompatibilities.
50174+ If the sysctl option is enabled, a sysctl option with name
50175+ "chroot_deny_mknod" is created.
58c5fc13 50176+
fe2de317
MT
50177+config GRKERNSEC_CHROOT_SHMAT
50178+ bool "Deny shmat() out of chroot"
50179+ depends on GRKERNSEC_CHROOT
50180+ help
50181+ If you say Y here, processes inside a chroot will not be able to attach
50182+ to shared memory segments that were created outside of the chroot jail.
50183+ It is recommended that you say Y here. If the sysctl option is enabled,
50184+ a sysctl option with name "chroot_deny_shmat" is created.
58c5fc13 50185+
fe2de317
MT
50186+config GRKERNSEC_CHROOT_UNIX
50187+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
50188+ depends on GRKERNSEC_CHROOT
50189+ help
50190+ If you say Y here, processes inside a chroot will not be able to
50191+ connect to abstract (meaning not belonging to a filesystem) Unix
50192+ domain sockets that were bound outside of a chroot. It is recommended
50193+ that you say Y here. If the sysctl option is enabled, a sysctl option
50194+ with name "chroot_deny_unix" is created.
58c5fc13 50195+
fe2de317
MT
50196+config GRKERNSEC_CHROOT_FINDTASK
50197+ bool "Protect outside processes"
50198+ depends on GRKERNSEC_CHROOT
50199+ help
50200+ If you say Y here, processes inside a chroot will not be able to
50201+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
50202+ getsid, or view any process outside of the chroot. If the sysctl
50203+ option is enabled, a sysctl option with name "chroot_findtask" is
50204+ created.
58c5fc13 50205+
fe2de317
MT
50206+config GRKERNSEC_CHROOT_NICE
50207+ bool "Restrict priority changes"
50208+ depends on GRKERNSEC_CHROOT
50209+ help
50210+ If you say Y here, processes inside a chroot will not be able to raise
50211+ the priority of processes in the chroot, or alter the priority of
50212+ processes outside the chroot. This provides more security than simply
50213+ removing CAP_SYS_NICE from the process' capability set. If the
50214+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
50215+ is created.
bc901d79 50216+
fe2de317
MT
50217+config GRKERNSEC_CHROOT_SYSCTL
50218+ bool "Deny sysctl writes"
50219+ depends on GRKERNSEC_CHROOT
50220+ help
50221+ If you say Y here, an attacker in a chroot will not be able to
50222+ write to sysctl entries, either by sysctl(2) or through a /proc
50223+ interface. It is strongly recommended that you say Y here. If the
50224+ sysctl option is enabled, a sysctl option with name
50225+ "chroot_deny_sysctl" is created.
bc901d79 50226+
fe2de317
MT
50227+config GRKERNSEC_CHROOT_CAPS
50228+ bool "Capability restrictions"
50229+ depends on GRKERNSEC_CHROOT
50230+ help
50231+ If you say Y here, the capabilities on all processes within a
50232+ chroot jail will be lowered to stop module insertion, raw i/o,
50233+ system and net admin tasks, rebooting the system, modifying immutable
50234+ files, modifying IPC owned by another, and changing the system time.
50235+ This is left an option because it can break some apps. Disable this
50236+ if your chrooted apps are having problems performing those kinds of
50237+ tasks. If the sysctl option is enabled, a sysctl option with
50238+ name "chroot_caps" is created.
bc901d79 50239+
fe2de317
MT
50240+endmenu
50241+menu "Kernel Auditing"
50242+depends on GRKERNSEC
bc901d79 50243+
fe2de317
MT
50244+config GRKERNSEC_AUDIT_GROUP
50245+ bool "Single group for auditing"
50246+ help
50247+ If you say Y here, the exec, chdir, and (un)mount logging features
50248+ will only operate on a group you specify. This option is recommended
50249+ if you only want to watch certain users instead of having a large
50250+ amount of logs from the entire system. If the sysctl option is enabled,
50251+ a sysctl option with name "audit_group" is created.
bc901d79 50252+
fe2de317
MT
50253+config GRKERNSEC_AUDIT_GID
50254+ int "GID for auditing"
50255+ depends on GRKERNSEC_AUDIT_GROUP
50256+ default 1007
bc901d79 50257+
fe2de317
MT
50258+config GRKERNSEC_EXECLOG
50259+ bool "Exec logging"
50260+ help
50261+ If you say Y here, all execve() calls will be logged (since the
50262+ other exec*() calls are frontends to execve(), all execution
50263+ will be logged). Useful for shell-servers that like to keep track
50264+ of their users. If the sysctl option is enabled, a sysctl option with
50265+ name "exec_logging" is created.
50266+ WARNING: This option when enabled will produce a LOT of logs, especially
50267+ on an active system.
bc901d79 50268+
fe2de317
MT
50269+config GRKERNSEC_RESLOG
50270+ bool "Resource logging"
50271+ help
50272+ If you say Y here, all attempts to overstep resource limits will
50273+ be logged with the resource name, the requested size, and the current
50274+ limit. It is highly recommended that you say Y here. If the sysctl
50275+ option is enabled, a sysctl option with name "resource_logging" is
50276+ created. If the RBAC system is enabled, the sysctl value is ignored.
bc901d79 50277+
fe2de317
MT
50278+config GRKERNSEC_CHROOT_EXECLOG
50279+ bool "Log execs within chroot"
50280+ help
50281+ If you say Y here, all executions inside a chroot jail will be logged
50282+ to syslog. This can cause a large amount of logs if certain
50283+ applications (eg. djb's daemontools) are installed on the system, and
50284+ is therefore left as an option. If the sysctl option is enabled, a
50285+ sysctl option with name "chroot_execlog" is created.
bc901d79 50286+
fe2de317
MT
50287+config GRKERNSEC_AUDIT_PTRACE
50288+ bool "Ptrace logging"
50289+ help
50290+ If you say Y here, all attempts to attach to a process via ptrace
50291+ will be logged. If the sysctl option is enabled, a sysctl option
50292+ with name "audit_ptrace" is created.
bc901d79 50293+
fe2de317
MT
50294+config GRKERNSEC_AUDIT_CHDIR
50295+ bool "Chdir logging"
50296+ help
50297+ If you say Y here, all chdir() calls will be logged. If the sysctl
50298+ option is enabled, a sysctl option with name "audit_chdir" is created.
bc901d79 50299+
fe2de317
MT
50300+config GRKERNSEC_AUDIT_MOUNT
50301+ bool "(Un)Mount logging"
50302+ help
50303+ If you say Y here, all mounts and unmounts will be logged. If the
50304+ sysctl option is enabled, a sysctl option with name "audit_mount" is
50305+ created.
bc901d79 50306+
fe2de317
MT
50307+config GRKERNSEC_SIGNAL
50308+ bool "Signal logging"
50309+ help
50310+ If you say Y here, certain important signals will be logged, such as
50311+ SIGSEGV, which will as a result inform you of when a error in a program
50312+ occurred, which in some cases could mean a possible exploit attempt.
50313+ If the sysctl option is enabled, a sysctl option with name
50314+ "signal_logging" is created.
58c5fc13 50315+
fe2de317
MT
50316+config GRKERNSEC_FORKFAIL
50317+ bool "Fork failure logging"
50318+ help
50319+ If you say Y here, all failed fork() attempts will be logged.
50320+ This could suggest a fork bomb, or someone attempting to overstep
50321+ their process limit. If the sysctl option is enabled, a sysctl option
50322+ with name "forkfail_logging" is created.
58c5fc13 50323+
fe2de317
MT
50324+config GRKERNSEC_TIME
50325+ bool "Time change logging"
50326+ help
50327+ If you say Y here, any changes of the system clock will be logged.
50328+ If the sysctl option is enabled, a sysctl option with name
50329+ "timechange_logging" is created.
58c5fc13 50330+
fe2de317
MT
50331+config GRKERNSEC_PROC_IPADDR
50332+ bool "/proc/<pid>/ipaddr support"
50333+ help
50334+ If you say Y here, a new entry will be added to each /proc/<pid>
50335+ directory that contains the IP address of the person using the task.
50336+ The IP is carried across local TCP and AF_UNIX stream sockets.
50337+ This information can be useful for IDS/IPSes to perform remote response
50338+ to a local attack. The entry is readable by only the owner of the
50339+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
50340+ the RBAC system), and thus does not create privacy concerns.
58c5fc13 50341+
fe2de317
MT
50342+config GRKERNSEC_RWXMAP_LOG
50343+ bool 'Denied RWX mmap/mprotect logging'
50344+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
50345+ help
50346+ If you say Y here, calls to mmap() and mprotect() with explicit
50347+ usage of PROT_WRITE and PROT_EXEC together will be logged when
50348+ denied by the PAX_MPROTECT feature. If the sysctl option is
50349+ enabled, a sysctl option with name "rwxmap_logging" is created.
6892158b 50350+
fe2de317
MT
50351+config GRKERNSEC_AUDIT_TEXTREL
50352+ bool 'ELF text relocations logging (READ HELP)'
50353+ depends on PAX_MPROTECT
50354+ help
50355+ If you say Y here, text relocations will be logged with the filename
50356+ of the offending library or binary. The purpose of the feature is
50357+ to help Linux distribution developers get rid of libraries and
50358+ binaries that need text relocations which hinder the future progress
50359+ of PaX. Only Linux distribution developers should say Y here, and
50360+ never on a production machine, as this option creates an information
50361+ leak that could aid an attacker in defeating the randomization of
50362+ a single memory region. If the sysctl option is enabled, a sysctl
50363+ option with name "audit_textrel" is created.
58c5fc13 50364+
fe2de317 50365+endmenu
58c5fc13 50366+
fe2de317
MT
50367+menu "Executable Protections"
50368+depends on GRKERNSEC
58c5fc13 50369+
fe2de317
MT
50370+config GRKERNSEC_DMESG
50371+ bool "Dmesg(8) restriction"
50372+ help
50373+ If you say Y here, non-root users will not be able to use dmesg(8)
50374+ to view up to the last 4kb of messages in the kernel's log buffer.
50375+ The kernel's log buffer often contains kernel addresses and other
50376+ identifying information useful to an attacker in fingerprinting a
50377+ system for a targeted exploit.
50378+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
50379+ created.
6892158b 50380+
fe2de317
MT
50381+config GRKERNSEC_HARDEN_PTRACE
50382+ bool "Deter ptrace-based process snooping"
50383+ help
50384+ If you say Y here, TTY sniffers and other malicious monitoring
50385+ programs implemented through ptrace will be defeated. If you
50386+ have been using the RBAC system, this option has already been
50387+ enabled for several years for all users, with the ability to make
50388+ fine-grained exceptions.
58c5fc13 50389+
fe2de317
MT
50390+ This option only affects the ability of non-root users to ptrace
50391+ processes that are not a descendent of the ptracing process.
50392+ This means that strace ./binary and gdb ./binary will still work,
50393+ but attaching to arbitrary processes will not. If the sysctl
50394+ option is enabled, a sysctl option with name "harden_ptrace" is
50395+ created.
58c5fc13 50396+
fe2de317
MT
50397+config GRKERNSEC_TPE
50398+ bool "Trusted Path Execution (TPE)"
50399+ help
50400+ If you say Y here, you will be able to choose a gid to add to the
50401+ supplementary groups of users you want to mark as "untrusted."
50402+ These users will not be able to execute any files that are not in
50403+ root-owned directories writable only by root. If the sysctl option
50404+ is enabled, a sysctl option with name "tpe" is created.
58c5fc13 50405+
fe2de317
MT
50406+config GRKERNSEC_TPE_ALL
50407+ bool "Partially restrict all non-root users"
50408+ depends on GRKERNSEC_TPE
50409+ help
50410+ If you say Y here, all non-root users will be covered under
50411+ a weaker TPE restriction. This is separate from, and in addition to,
50412+ the main TPE options that you have selected elsewhere. Thus, if a
50413+ "trusted" GID is chosen, this restriction applies to even that GID.
50414+ Under this restriction, all non-root users will only be allowed to
50415+ execute files in directories they own that are not group or
50416+ world-writable, or in directories owned by root and writable only by
50417+ root. If the sysctl option is enabled, a sysctl option with name
50418+ "tpe_restrict_all" is created.
58c5fc13 50419+
fe2de317
MT
50420+config GRKERNSEC_TPE_INVERT
50421+ bool "Invert GID option"
50422+ depends on GRKERNSEC_TPE
50423+ help
50424+ If you say Y here, the group you specify in the TPE configuration will
50425+ decide what group TPE restrictions will be *disabled* for. This
50426+ option is useful if you want TPE restrictions to be applied to most
50427+ users on the system. If the sysctl option is enabled, a sysctl option
50428+ with name "tpe_invert" is created. Unlike other sysctl options, this
50429+ entry will default to on for backward-compatibility.
6e9df6a3 50430+
fe2de317
MT
50431+config GRKERNSEC_TPE_GID
50432+ int "GID for untrusted users"
50433+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
50434+ default 1005
50435+ help
50436+ Setting this GID determines what group TPE restrictions will be
50437+ *enabled* for. If the sysctl option is enabled, a sysctl option
50438+ with name "tpe_gid" is created.
6e9df6a3 50439+
fe2de317
MT
50440+config GRKERNSEC_TPE_GID
50441+ int "GID for trusted users"
50442+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
50443+ default 1005
50444+ help
50445+ Setting this GID determines what group TPE restrictions will be
50446+ *disabled* for. If the sysctl option is enabled, a sysctl option
50447+ with name "tpe_gid" is created.
58c5fc13 50448+
fe2de317
MT
50449+endmenu
50450+menu "Network Protections"
50451+depends on GRKERNSEC
58c5fc13 50452+
fe2de317
MT
50453+config GRKERNSEC_RANDNET
50454+ bool "Larger entropy pools"
50455+ help
50456+ If you say Y here, the entropy pools used for many features of Linux
50457+ and grsecurity will be doubled in size. Since several grsecurity
50458+ features use additional randomness, it is recommended that you say Y
50459+ here. Saying Y here has a similar effect as modifying
50460+ /proc/sys/kernel/random/poolsize.
58c5fc13 50461+
fe2de317
MT
50462+config GRKERNSEC_BLACKHOLE
50463+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
50464+ depends on NET
50465+ help
50466+ If you say Y here, neither TCP resets nor ICMP
50467+ destination-unreachable packets will be sent in response to packets
50468+ sent to ports for which no associated listening process exists.
50469+ This feature supports both IPV4 and IPV6 and exempts the
50470+ loopback interface from blackholing. Enabling this feature
50471+ makes a host more resilient to DoS attacks and reduces network
50472+ visibility against scanners.
58c5fc13 50473+
fe2de317
MT
50474+ The blackhole feature as-implemented is equivalent to the FreeBSD
50475+ blackhole feature, as it prevents RST responses to all packets, not
50476+ just SYNs. Under most application behavior this causes no
50477+ problems, but applications (like haproxy) may not close certain
50478+ connections in a way that cleanly terminates them on the remote
50479+ end, leaving the remote host in LAST_ACK state. Because of this
50480+ side-effect and to prevent intentional LAST_ACK DoSes, this
50481+ feature also adds automatic mitigation against such attacks.
50482+ The mitigation drastically reduces the amount of time a socket
50483+ can spend in LAST_ACK state. If you're using haproxy and not
50484+ all servers it connects to have this option enabled, consider
50485+ disabling this feature on the haproxy host.
58c5fc13 50486+
fe2de317
MT
50487+ If the sysctl option is enabled, two sysctl options with names
50488+ "ip_blackhole" and "lastack_retries" will be created.
50489+ While "ip_blackhole" takes the standard zero/non-zero on/off
50490+ toggle, "lastack_retries" uses the same kinds of values as
50491+ "tcp_retries1" and "tcp_retries2". The default value of 4
50492+ prevents a socket from lasting more than 45 seconds in LAST_ACK
50493+ state.
58c5fc13 50494+
fe2de317
MT
50495+config GRKERNSEC_SOCKET
50496+ bool "Socket restrictions"
50497+ depends on NET
50498+ help
50499+ If you say Y here, you will be able to choose from several options.
50500+ If you assign a GID on your system and add it to the supplementary
50501+ groups of users you want to restrict socket access to, this patch
50502+ will perform up to three things, based on the option(s) you choose.
58c5fc13 50503+
fe2de317
MT
50504+config GRKERNSEC_SOCKET_ALL
50505+ bool "Deny any sockets to group"
50506+ depends on GRKERNSEC_SOCKET
50507+ help
50508+ If you say Y here, you will be able to choose a GID of whose users will
50509+ be unable to connect to other hosts from your machine or run server
50510+ applications from your machine. If the sysctl option is enabled, a
50511+ sysctl option with name "socket_all" is created.
58c5fc13 50512+
fe2de317
MT
50513+config GRKERNSEC_SOCKET_ALL_GID
50514+ int "GID to deny all sockets for"
50515+ depends on GRKERNSEC_SOCKET_ALL
50516+ default 1004
50517+ help
50518+ Here you can choose the GID to disable socket access for. Remember to
50519+ add the users you want socket access disabled for to the GID
50520+ specified here. If the sysctl option is enabled, a sysctl option
50521+ with name "socket_all_gid" is created.
58c5fc13 50522+
fe2de317
MT
50523+config GRKERNSEC_SOCKET_CLIENT
50524+ bool "Deny client sockets to group"
50525+ depends on GRKERNSEC_SOCKET
50526+ help
50527+ If you say Y here, you will be able to choose a GID of whose users will
50528+ be unable to connect to other hosts from your machine, but will be
50529+ able to run servers. If this option is enabled, all users in the group
50530+ you specify will have to use passive mode when initiating ftp transfers
50531+ from the shell on your machine. If the sysctl option is enabled, a
50532+ sysctl option with name "socket_client" is created.
58c5fc13 50533+
fe2de317
MT
50534+config GRKERNSEC_SOCKET_CLIENT_GID
50535+ int "GID to deny client sockets for"
50536+ depends on GRKERNSEC_SOCKET_CLIENT
50537+ default 1003
50538+ help
50539+ Here you can choose the GID to disable client socket access for.
50540+ Remember to add the users you want client socket access disabled for to
50541+ the GID specified here. If the sysctl option is enabled, a sysctl
50542+ option with name "socket_client_gid" is created.
58c5fc13 50543+
fe2de317
MT
50544+config GRKERNSEC_SOCKET_SERVER
50545+ bool "Deny server sockets to group"
50546+ depends on GRKERNSEC_SOCKET
50547+ help
50548+ If you say Y here, you will be able to choose a GID of whose users will
50549+ be unable to run server applications from your machine. If the sysctl
50550+ option is enabled, a sysctl option with name "socket_server" is created.
58c5fc13 50551+
fe2de317
MT
50552+config GRKERNSEC_SOCKET_SERVER_GID
50553+ int "GID to deny server sockets for"
50554+ depends on GRKERNSEC_SOCKET_SERVER
50555+ default 1002
50556+ help
50557+ Here you can choose the GID to disable server socket access for.
50558+ Remember to add the users you want server socket access disabled for to
50559+ the GID specified here. If the sysctl option is enabled, a sysctl
50560+ option with name "socket_server_gid" is created.
58c5fc13 50561+
fe2de317
MT
50562+endmenu
50563+menu "Sysctl support"
50564+depends on GRKERNSEC && SYSCTL
58c5fc13 50565+
fe2de317
MT
50566+config GRKERNSEC_SYSCTL
50567+ bool "Sysctl support"
50568+ help
50569+ If you say Y here, you will be able to change the options that
50570+ grsecurity runs with at bootup, without having to recompile your
50571+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
50572+ to enable (1) or disable (0) various features. All the sysctl entries
50573+ are mutable until the "grsec_lock" entry is set to a non-zero value.
50574+ All features enabled in the kernel configuration are disabled at boot
50575+ if you do not say Y to the "Turn on features by default" option.
50576+ All options should be set at startup, and the grsec_lock entry should
50577+ be set to a non-zero value after all the options are set.
50578+ *THIS IS EXTREMELY IMPORTANT*
58c5fc13 50579+
fe2de317
MT
50580+config GRKERNSEC_SYSCTL_DISTRO
50581+ bool "Extra sysctl support for distro makers (READ HELP)"
50582+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
50583+ help
50584+ If you say Y here, additional sysctl options will be created
50585+ for features that affect processes running as root. Therefore,
50586+ it is critical when using this option that the grsec_lock entry be
50587+ enabled after boot. Only distros with prebuilt kernel packages
50588+ with this option enabled that can ensure grsec_lock is enabled
50589+ after boot should use this option.
50590+ *Failure to set grsec_lock after boot makes all grsec features
50591+ this option covers useless*
bc901d79 50592+
fe2de317
MT
50593+ Currently this option creates the following sysctl entries:
50594+ "Disable Privileged I/O": "disable_priv_io"
58c5fc13 50595+
fe2de317
MT
50596+config GRKERNSEC_SYSCTL_ON
50597+ bool "Turn on features by default"
50598+ depends on GRKERNSEC_SYSCTL
50599+ help
50600+ If you say Y here, instead of having all features enabled in the
50601+ kernel configuration disabled at boot time, the features will be
50602+ enabled at boot time. It is recommended you say Y here unless
50603+ there is some reason you would want all sysctl-tunable features to
50604+ be disabled by default. As mentioned elsewhere, it is important
50605+ to enable the grsec_lock entry once you have finished modifying
50606+ the sysctl entries.
58c5fc13 50607+
fe2de317
MT
50608+endmenu
50609+menu "Logging Options"
50610+depends on GRKERNSEC
58c5fc13 50611+
fe2de317
MT
50612+config GRKERNSEC_FLOODTIME
50613+ int "Seconds in between log messages (minimum)"
50614+ default 10
50615+ help
50616+ This option allows you to enforce the number of seconds between
50617+ grsecurity log messages. The default should be suitable for most
50618+ people, however, if you choose to change it, choose a value small enough
50619+ to allow informative logs to be produced, but large enough to
50620+ prevent flooding.
58c5fc13 50621+
fe2de317
MT
50622+config GRKERNSEC_FLOODBURST
50623+ int "Number of messages in a burst (maximum)"
50624+ default 6
50625+ help
50626+ This option allows you to choose the maximum number of messages allowed
50627+ within the flood time interval you chose in a separate option. The
50628+ default should be suitable for most people, however if you find that
50629+ many of your logs are being interpreted as flooding, you may want to
50630+ raise this value.
58c5fc13 50631+
fe2de317 50632+endmenu
58c5fc13 50633+
fe2de317
MT
50634+endmenu
50635diff --git a/grsecurity/Makefile b/grsecurity/Makefile
50636new file mode 100644
50637index 0000000..be9ae3a
50638--- /dev/null
50639+++ b/grsecurity/Makefile
50640@@ -0,0 +1,36 @@
50641+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
50642+# during 2001-2009 it has been completely redesigned by Brad Spengler
50643+# into an RBAC system
50644+#
50645+# All code in this directory and various hooks inserted throughout the kernel
50646+# are copyright Brad Spengler - Open Source Security, Inc., and released
50647+# under the GPL v2 or higher
58c5fc13 50648+
fe2de317
MT
50649+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
50650+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
50651+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
58c5fc13 50652+
fe2de317
MT
50653+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
50654+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
50655+ gracl_learn.o grsec_log.o
50656+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
58c5fc13 50657+
fe2de317
MT
50658+ifdef CONFIG_NET
50659+obj-y += grsec_sock.o
50660+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
50661+endif
58c5fc13 50662+
fe2de317
MT
50663+ifndef CONFIG_GRKERNSEC
50664+obj-y += grsec_disabled.o
50665+endif
58c5fc13 50666+
fe2de317
MT
50667+ifdef CONFIG_GRKERNSEC_HIDESYM
50668+extra-y := grsec_hidesym.o
50669+$(obj)/grsec_hidesym.o:
50670+ @-chmod -f 500 /boot
50671+ @-chmod -f 500 /lib/modules
50672+ @-chmod -f 500 /lib64/modules
50673+ @-chmod -f 500 /lib32/modules
50674+ @-chmod -f 700 .
50675+ @echo ' grsec: protected kernel image paths'
50676+endif
50677diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
50678new file mode 100644
50679index 0000000..09258e0
50680--- /dev/null
50681+++ b/grsecurity/gracl.c
50682@@ -0,0 +1,4156 @@
50683+#include <linux/kernel.h>
50684+#include <linux/module.h>
50685+#include <linux/sched.h>
50686+#include <linux/mm.h>
50687+#include <linux/file.h>
50688+#include <linux/fs.h>
50689+#include <linux/namei.h>
50690+#include <linux/mount.h>
50691+#include <linux/tty.h>
50692+#include <linux/proc_fs.h>
50693+#include <linux/lglock.h>
50694+#include <linux/slab.h>
50695+#include <linux/vmalloc.h>
50696+#include <linux/types.h>
50697+#include <linux/sysctl.h>
50698+#include <linux/netdevice.h>
50699+#include <linux/ptrace.h>
50700+#include <linux/gracl.h>
50701+#include <linux/gralloc.h>
50702+#include <linux/grsecurity.h>
50703+#include <linux/grinternal.h>
50704+#include <linux/pid_namespace.h>
50705+#include <linux/fdtable.h>
50706+#include <linux/percpu.h>
58c5fc13 50707+
fe2de317
MT
50708+#include <asm/uaccess.h>
50709+#include <asm/errno.h>
50710+#include <asm/mman.h>
58c5fc13 50711+
fe2de317
MT
50712+static struct acl_role_db acl_role_set;
50713+static struct name_db name_set;
50714+static struct inodev_db inodev_set;
58c5fc13 50715+
fe2de317
MT
50716+/* for keeping track of userspace pointers used for subjects, so we
50717+ can share references in the kernel as well
50718+*/
58c5fc13 50719+
fe2de317 50720+static struct path real_root;
58c5fc13 50721+
fe2de317 50722+static struct acl_subj_map_db subj_map_set;
58c5fc13 50723+
fe2de317 50724+static struct acl_role_label *default_role;
58c5fc13 50725+
fe2de317 50726+static struct acl_role_label *role_list;
58c5fc13 50727+
fe2de317 50728+static u16 acl_sp_role_value;
58c5fc13 50729+
fe2de317
MT
50730+extern char *gr_shared_page[4];
50731+static DEFINE_MUTEX(gr_dev_mutex);
50732+DEFINE_RWLOCK(gr_inode_lock);
58c5fc13 50733+
fe2de317 50734+struct gr_arg *gr_usermode;
58c5fc13 50735+
fe2de317 50736+static unsigned int gr_status __read_only = GR_STATUS_INIT;
58c5fc13 50737+
fe2de317
MT
50738+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
50739+extern void gr_clear_learn_entries(void);
58c5fc13 50740+
fe2de317
MT
50741+#ifdef CONFIG_GRKERNSEC_RESLOG
50742+extern void gr_log_resource(const struct task_struct *task,
50743+ const int res, const unsigned long wanted, const int gt);
50744+#endif
58c5fc13 50745+
fe2de317
MT
50746+unsigned char *gr_system_salt;
50747+unsigned char *gr_system_sum;
58c5fc13 50748+
fe2de317
MT
50749+static struct sprole_pw **acl_special_roles = NULL;
50750+static __u16 num_sprole_pws = 0;
58c5fc13 50751+
fe2de317 50752+static struct acl_role_label *kernel_role = NULL;
58c5fc13 50753+
fe2de317
MT
50754+static unsigned int gr_auth_attempts = 0;
50755+static unsigned long gr_auth_expires = 0UL;
58c5fc13 50756+
fe2de317
MT
50757+#ifdef CONFIG_NET
50758+extern struct vfsmount *sock_mnt;
50759+#endif
58c5fc13 50760+
fe2de317
MT
50761+extern struct vfsmount *pipe_mnt;
50762+extern struct vfsmount *shm_mnt;
50763+#ifdef CONFIG_HUGETLBFS
50764+extern struct vfsmount *hugetlbfs_vfsmount;
50765+#endif
58c5fc13 50766+
fe2de317
MT
50767+static struct acl_object_label *fakefs_obj_rw;
50768+static struct acl_object_label *fakefs_obj_rwx;
58c5fc13 50769+
fe2de317
MT
50770+extern int gr_init_uidset(void);
50771+extern void gr_free_uidset(void);
50772+extern void gr_remove_uid(uid_t uid);
50773+extern int gr_find_uid(uid_t uid);
58c5fc13 50774+
fe2de317 50775+DECLARE_BRLOCK(vfsmount_lock);
58c5fc13 50776+
fe2de317
MT
50777+__inline__ int
50778+gr_acl_is_enabled(void)
57199397 50779+{
fe2de317
MT
50780+ return (gr_status & GR_READY);
50781+}
58c5fc13 50782+
fe2de317
MT
50783+#ifdef CONFIG_BTRFS_FS
50784+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
50785+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
50786+#endif
58c5fc13 50787+
fe2de317
MT
50788+static inline dev_t __get_dev(const struct dentry *dentry)
50789+{
50790+#ifdef CONFIG_BTRFS_FS
50791+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
50792+ return get_btrfs_dev_from_inode(dentry->d_inode);
50793+ else
50794+#endif
50795+ return dentry->d_inode->i_sb->s_dev;
58c5fc13
MT
50796+}
50797+
fe2de317 50798+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
58c5fc13 50799+{
fe2de317
MT
50800+ return __get_dev(dentry);
50801+}
58c5fc13 50802+
fe2de317
MT
50803+static char gr_task_roletype_to_char(struct task_struct *task)
50804+{
50805+ switch (task->role->roletype &
50806+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
50807+ GR_ROLE_SPECIAL)) {
50808+ case GR_ROLE_DEFAULT:
50809+ return 'D';
50810+ case GR_ROLE_USER:
50811+ return 'U';
50812+ case GR_ROLE_GROUP:
50813+ return 'G';
50814+ case GR_ROLE_SPECIAL:
50815+ return 'S';
50816+ }
58c5fc13 50817+
fe2de317
MT
50818+ return 'X';
50819+}
ae4e228f 50820+
fe2de317
MT
50821+char gr_roletype_to_char(void)
50822+{
50823+ return gr_task_roletype_to_char(current);
58c5fc13 50824+}
efbe55a5 50825+
fe2de317
MT
50826+__inline__ int
50827+gr_acl_tpe_check(void)
efbe55a5 50828+{
fe2de317
MT
50829+ if (unlikely(!(gr_status & GR_READY)))
50830+ return 0;
50831+ if (current->role->roletype & GR_ROLE_TPE)
50832+ return 1;
50833+ else
50834+ return 0;
50835+}
efbe55a5 50836+
fe2de317
MT
50837+int
50838+gr_handle_rawio(const struct inode *inode)
50839+{
50840+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
50841+ if (inode && S_ISBLK(inode->i_mode) &&
50842+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
50843+ !capable(CAP_SYS_RAWIO))
50844+ return 1;
50845+#endif
50846+ return 0;
50847+}
efbe55a5 50848+
fe2de317
MT
50849+static int
50850+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
50851+{
50852+ if (likely(lena != lenb))
50853+ return 0;
efbe55a5 50854+
fe2de317 50855+ return !memcmp(a, b, lena);
efbe55a5
MT
50856+}
50857+
fe2de317 50858+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
efbe55a5 50859+{
fe2de317
MT
50860+ *buflen -= namelen;
50861+ if (*buflen < 0)
50862+ return -ENAMETOOLONG;
50863+ *buffer -= namelen;
50864+ memcpy(*buffer, str, namelen);
50865+ return 0;
50866+}
efbe55a5 50867+
fe2de317
MT
50868+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
50869+{
50870+ return prepend(buffer, buflen, name->name, name->len);
efbe55a5 50871+}
fe2de317
MT
50872+
50873+static int prepend_path(const struct path *path, struct path *root,
50874+ char **buffer, int *buflen)
efbe55a5 50875+{
fe2de317
MT
50876+ struct dentry *dentry = path->dentry;
50877+ struct vfsmount *vfsmnt = path->mnt;
50878+ bool slash = false;
50879+ int error = 0;
efbe55a5 50880+
fe2de317
MT
50881+ while (dentry != root->dentry || vfsmnt != root->mnt) {
50882+ struct dentry * parent;
efbe55a5 50883+
fe2de317
MT
50884+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
50885+ /* Global root? */
50886+ if (vfsmnt->mnt_parent == vfsmnt) {
50887+ goto out;
50888+ }
50889+ dentry = vfsmnt->mnt_mountpoint;
50890+ vfsmnt = vfsmnt->mnt_parent;
50891+ continue;
50892+ }
50893+ parent = dentry->d_parent;
50894+ prefetch(parent);
50895+ spin_lock(&dentry->d_lock);
50896+ error = prepend_name(buffer, buflen, &dentry->d_name);
50897+ spin_unlock(&dentry->d_lock);
50898+ if (!error)
50899+ error = prepend(buffer, buflen, "/", 1);
50900+ if (error)
50901+ break;
efbe55a5 50902+
fe2de317
MT
50903+ slash = true;
50904+ dentry = parent;
50905+ }
efbe55a5 50906+
fe2de317
MT
50907+out:
50908+ if (!error && !slash)
50909+ error = prepend(buffer, buflen, "/", 1);
efbe55a5 50910+
fe2de317
MT
50911+ return error;
50912+}
efbe55a5 50913+
fe2de317 50914+/* this must be called with vfsmount_lock and rename_lock held */
efbe55a5 50915+
fe2de317
MT
50916+static char *__our_d_path(const struct path *path, struct path *root,
50917+ char *buf, int buflen)
50918+{
50919+ char *res = buf + buflen;
50920+ int error;
efbe55a5 50921+
fe2de317
MT
50922+ prepend(&res, &buflen, "\0", 1);
50923+ error = prepend_path(path, root, &res, &buflen);
50924+ if (error)
50925+ return ERR_PTR(error);
50926+
50927+ return res;
efbe55a5
MT
50928+}
50929+
fe2de317
MT
50930+static char *
50931+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
efbe55a5 50932+{
fe2de317 50933+ char *retval;
efbe55a5 50934+
fe2de317
MT
50935+ retval = __our_d_path(path, root, buf, buflen);
50936+ if (unlikely(IS_ERR(retval)))
50937+ retval = strcpy(buf, "<path too long>");
50938+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
50939+ retval[1] = '\0';
efbe55a5 50940+
fe2de317 50941+ return retval;
efbe55a5
MT
50942+}
50943+
fe2de317
MT
50944+static char *
50945+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
50946+ char *buf, int buflen)
efbe55a5 50947+{
fe2de317
MT
50948+ struct path path;
50949+ char *res;
efbe55a5 50950+
fe2de317
MT
50951+ path.dentry = (struct dentry *)dentry;
50952+ path.mnt = (struct vfsmount *)vfsmnt;
efbe55a5 50953+
fe2de317
MT
50954+ /* we can use real_root.dentry, real_root.mnt, because this is only called
50955+ by the RBAC system */
50956+ res = gen_full_path(&path, &real_root, buf, buflen);
efbe55a5 50957+
fe2de317 50958+ return res;
efbe55a5
MT
50959+}
50960+
fe2de317
MT
50961+static char *
50962+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
50963+ char *buf, int buflen)
efbe55a5 50964+{
fe2de317
MT
50965+ char *res;
50966+ struct path path;
50967+ struct path root;
50968+ struct task_struct *reaper = &init_task;
50969+
50970+ path.dentry = (struct dentry *)dentry;
50971+ path.mnt = (struct vfsmount *)vfsmnt;
50972+
50973+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
50974+ get_fs_root(reaper->fs, &root);
50975+
50976+ write_seqlock(&rename_lock);
50977+ br_read_lock(vfsmount_lock);
50978+ res = gen_full_path(&path, &root, buf, buflen);
50979+ br_read_unlock(vfsmount_lock);
50980+ write_sequnlock(&rename_lock);
50981+
50982+ path_put(&root);
50983+ return res;
50984+}
50985+
50986+static char *
50987+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
50988+{
50989+ char *ret;
50990+ write_seqlock(&rename_lock);
50991+ br_read_lock(vfsmount_lock);
50992+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
50993+ PAGE_SIZE);
50994+ br_read_unlock(vfsmount_lock);
50995+ write_sequnlock(&rename_lock);
50996+ return ret;
50997+}
50998+
50999+static char *
51000+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
51001+{
51002+ char *ret;
51003+ char *buf;
51004+ int buflen;
51005+
51006+ write_seqlock(&rename_lock);
51007+ br_read_lock(vfsmount_lock);
51008+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
51009+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
51010+ buflen = (int)(ret - buf);
51011+ if (buflen >= 5)
51012+ prepend(&ret, &buflen, "/proc", 5);
51013+ else
51014+ ret = strcpy(buf, "<path too long>");
51015+ br_read_unlock(vfsmount_lock);
51016+ write_sequnlock(&rename_lock);
51017+ return ret;
51018+}
51019+
51020+char *
51021+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
51022+{
51023+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
51024+ PAGE_SIZE);
51025+}
51026+
51027+char *
51028+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
51029+{
51030+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
51031+ PAGE_SIZE);
51032+}
51033+
51034+char *
51035+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
51036+{
51037+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
51038+ PAGE_SIZE);
51039+}
51040+
51041+char *
51042+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
51043+{
51044+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
51045+ PAGE_SIZE);
51046+}
51047+
51048+char *
51049+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
51050+{
51051+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
51052+ PAGE_SIZE);
51053+}
51054+
51055+__inline__ __u32
51056+to_gr_audit(const __u32 reqmode)
51057+{
51058+ /* masks off auditable permission flags, then shifts them to create
51059+ auditing flags, and adds the special case of append auditing if
51060+ we're requesting write */
51061+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
51062+}
51063+
51064+struct acl_subject_label *
51065+lookup_subject_map(const struct acl_subject_label *userp)
51066+{
51067+ unsigned int index = shash(userp, subj_map_set.s_size);
51068+ struct subject_map *match;
51069+
51070+ match = subj_map_set.s_hash[index];
51071+
51072+ while (match && match->user != userp)
51073+ match = match->next;
51074+
51075+ if (match != NULL)
51076+ return match->kernel;
51077+ else
51078+ return NULL;
51079+}
51080+
51081+static void
51082+insert_subj_map_entry(struct subject_map *subjmap)
51083+{
51084+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
51085+ struct subject_map **curr;
51086+
51087+ subjmap->prev = NULL;
51088+
51089+ curr = &subj_map_set.s_hash[index];
51090+ if (*curr != NULL)
51091+ (*curr)->prev = subjmap;
51092+
51093+ subjmap->next = *curr;
51094+ *curr = subjmap;
51095+
51096+ return;
51097+}
51098+
51099+static struct acl_role_label *
51100+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
51101+ const gid_t gid)
51102+{
51103+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
51104+ struct acl_role_label *match;
51105+ struct role_allowed_ip *ipp;
51106+ unsigned int x;
51107+ u32 curr_ip = task->signal->curr_ip;
51108+
51109+ task->signal->saved_ip = curr_ip;
51110+
51111+ match = acl_role_set.r_hash[index];
51112+
51113+ while (match) {
51114+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
51115+ for (x = 0; x < match->domain_child_num; x++) {
51116+ if (match->domain_children[x] == uid)
51117+ goto found;
51118+ }
51119+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
51120+ break;
51121+ match = match->next;
51122+ }
51123+found:
51124+ if (match == NULL) {
51125+ try_group:
51126+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
51127+ match = acl_role_set.r_hash[index];
51128+
51129+ while (match) {
51130+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
51131+ for (x = 0; x < match->domain_child_num; x++) {
51132+ if (match->domain_children[x] == gid)
51133+ goto found2;
51134+ }
51135+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
51136+ break;
51137+ match = match->next;
51138+ }
51139+found2:
51140+ if (match == NULL)
51141+ match = default_role;
51142+ if (match->allowed_ips == NULL)
51143+ return match;
51144+ else {
51145+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
51146+ if (likely
51147+ ((ntohl(curr_ip) & ipp->netmask) ==
51148+ (ntohl(ipp->addr) & ipp->netmask)))
51149+ return match;
51150+ }
51151+ match = default_role;
51152+ }
51153+ } else if (match->allowed_ips == NULL) {
51154+ return match;
51155+ } else {
51156+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
51157+ if (likely
51158+ ((ntohl(curr_ip) & ipp->netmask) ==
51159+ (ntohl(ipp->addr) & ipp->netmask)))
51160+ return match;
51161+ }
51162+ goto try_group;
51163+ }
51164+
51165+ return match;
51166+}
51167+
51168+struct acl_subject_label *
51169+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
51170+ const struct acl_role_label *role)
51171+{
51172+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
51173+ struct acl_subject_label *match;
51174+
51175+ match = role->subj_hash[index];
51176+
51177+ while (match && (match->inode != ino || match->device != dev ||
51178+ (match->mode & GR_DELETED))) {
51179+ match = match->next;
51180+ }
51181+
51182+ if (match && !(match->mode & GR_DELETED))
51183+ return match;
51184+ else
51185+ return NULL;
51186+}
51187+
51188+struct acl_subject_label *
51189+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
51190+ const struct acl_role_label *role)
51191+{
51192+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
51193+ struct acl_subject_label *match;
51194+
51195+ match = role->subj_hash[index];
51196+
51197+ while (match && (match->inode != ino || match->device != dev ||
51198+ !(match->mode & GR_DELETED))) {
51199+ match = match->next;
51200+ }
51201+
51202+ if (match && (match->mode & GR_DELETED))
51203+ return match;
51204+ else
51205+ return NULL;
51206+}
51207+
51208+static struct acl_object_label *
51209+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
51210+ const struct acl_subject_label *subj)
51211+{
51212+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
51213+ struct acl_object_label *match;
51214+
51215+ match = subj->obj_hash[index];
51216+
51217+ while (match && (match->inode != ino || match->device != dev ||
51218+ (match->mode & GR_DELETED))) {
51219+ match = match->next;
51220+ }
51221+
51222+ if (match && !(match->mode & GR_DELETED))
51223+ return match;
51224+ else
51225+ return NULL;
51226+}
51227+
51228+static struct acl_object_label *
51229+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
51230+ const struct acl_subject_label *subj)
51231+{
51232+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
51233+ struct acl_object_label *match;
51234+
51235+ match = subj->obj_hash[index];
51236+
51237+ while (match && (match->inode != ino || match->device != dev ||
51238+ !(match->mode & GR_DELETED))) {
51239+ match = match->next;
51240+ }
51241+
51242+ if (match && (match->mode & GR_DELETED))
51243+ return match;
51244+
51245+ match = subj->obj_hash[index];
51246+
51247+ while (match && (match->inode != ino || match->device != dev ||
51248+ (match->mode & GR_DELETED))) {
51249+ match = match->next;
51250+ }
51251+
51252+ if (match && !(match->mode & GR_DELETED))
51253+ return match;
51254+ else
51255+ return NULL;
51256+}
51257+
51258+static struct name_entry *
51259+lookup_name_entry(const char *name)
51260+{
51261+ unsigned int len = strlen(name);
51262+ unsigned int key = full_name_hash(name, len);
51263+ unsigned int index = key % name_set.n_size;
51264+ struct name_entry *match;
51265+
51266+ match = name_set.n_hash[index];
51267+
51268+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
51269+ match = match->next;
51270+
51271+ return match;
51272+}
51273+
51274+static struct name_entry *
51275+lookup_name_entry_create(const char *name)
51276+{
51277+ unsigned int len = strlen(name);
51278+ unsigned int key = full_name_hash(name, len);
51279+ unsigned int index = key % name_set.n_size;
51280+ struct name_entry *match;
51281+
51282+ match = name_set.n_hash[index];
51283+
51284+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
51285+ !match->deleted))
51286+ match = match->next;
51287+
51288+ if (match && match->deleted)
51289+ return match;
51290+
51291+ match = name_set.n_hash[index];
51292+
51293+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
51294+ match->deleted))
51295+ match = match->next;
51296+
51297+ if (match && !match->deleted)
51298+ return match;
51299+ else
51300+ return NULL;
51301+}
51302+
51303+static struct inodev_entry *
51304+lookup_inodev_entry(const ino_t ino, const dev_t dev)
51305+{
51306+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
51307+ struct inodev_entry *match;
51308+
51309+ match = inodev_set.i_hash[index];
51310+
51311+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
51312+ match = match->next;
51313+
51314+ return match;
51315+}
51316+
51317+static void
51318+insert_inodev_entry(struct inodev_entry *entry)
51319+{
51320+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
51321+ inodev_set.i_size);
51322+ struct inodev_entry **curr;
51323+
51324+ entry->prev = NULL;
51325+
51326+ curr = &inodev_set.i_hash[index];
51327+ if (*curr != NULL)
51328+ (*curr)->prev = entry;
51329+
51330+ entry->next = *curr;
51331+ *curr = entry;
51332+
51333+ return;
51334+}
51335+
51336+static void
51337+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
51338+{
51339+ unsigned int index =
51340+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
51341+ struct acl_role_label **curr;
51342+ struct acl_role_label *tmp;
51343+
51344+ curr = &acl_role_set.r_hash[index];
51345+
51346+ /* if role was already inserted due to domains and already has
51347+ a role in the same bucket as it attached, then we need to
51348+ combine these two buckets
51349+ */
51350+ if (role->next) {
51351+ tmp = role->next;
51352+ while (tmp->next)
51353+ tmp = tmp->next;
51354+ tmp->next = *curr;
51355+ } else
51356+ role->next = *curr;
51357+ *curr = role;
51358+
51359+ return;
51360+}
51361+
51362+static void
51363+insert_acl_role_label(struct acl_role_label *role)
51364+{
51365+ int i;
51366+
51367+ if (role_list == NULL) {
51368+ role_list = role;
51369+ role->prev = NULL;
51370+ } else {
51371+ role->prev = role_list;
51372+ role_list = role;
51373+ }
51374+
51375+ /* used for hash chains */
51376+ role->next = NULL;
51377+
51378+ if (role->roletype & GR_ROLE_DOMAIN) {
51379+ for (i = 0; i < role->domain_child_num; i++)
51380+ __insert_acl_role_label(role, role->domain_children[i]);
51381+ } else
51382+ __insert_acl_role_label(role, role->uidgid);
51383+}
51384+
51385+static int
51386+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
51387+{
51388+ struct name_entry **curr, *nentry;
51389+ struct inodev_entry *ientry;
51390+ unsigned int len = strlen(name);
51391+ unsigned int key = full_name_hash(name, len);
51392+ unsigned int index = key % name_set.n_size;
51393+
51394+ curr = &name_set.n_hash[index];
51395+
51396+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
51397+ curr = &((*curr)->next);
51398+
51399+ if (*curr != NULL)
51400+ return 1;
51401+
51402+ nentry = acl_alloc(sizeof (struct name_entry));
51403+ if (nentry == NULL)
51404+ return 0;
51405+ ientry = acl_alloc(sizeof (struct inodev_entry));
51406+ if (ientry == NULL)
51407+ return 0;
51408+ ientry->nentry = nentry;
51409+
51410+ nentry->key = key;
51411+ nentry->name = name;
51412+ nentry->inode = inode;
51413+ nentry->device = device;
51414+ nentry->len = len;
51415+ nentry->deleted = deleted;
51416+
51417+ nentry->prev = NULL;
51418+ curr = &name_set.n_hash[index];
51419+ if (*curr != NULL)
51420+ (*curr)->prev = nentry;
51421+ nentry->next = *curr;
51422+ *curr = nentry;
51423+
51424+ /* insert us into the table searchable by inode/dev */
51425+ insert_inodev_entry(ientry);
51426+
51427+ return 1;
51428+}
51429+
51430+static void
51431+insert_acl_obj_label(struct acl_object_label *obj,
51432+ struct acl_subject_label *subj)
51433+{
51434+ unsigned int index =
51435+ fhash(obj->inode, obj->device, subj->obj_hash_size);
51436+ struct acl_object_label **curr;
51437+
51438+
51439+ obj->prev = NULL;
51440+
51441+ curr = &subj->obj_hash[index];
51442+ if (*curr != NULL)
51443+ (*curr)->prev = obj;
51444+
51445+ obj->next = *curr;
51446+ *curr = obj;
51447+
51448+ return;
51449+}
51450+
51451+static void
51452+insert_acl_subj_label(struct acl_subject_label *obj,
51453+ struct acl_role_label *role)
51454+{
51455+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
51456+ struct acl_subject_label **curr;
51457+
51458+ obj->prev = NULL;
51459+
51460+ curr = &role->subj_hash[index];
51461+ if (*curr != NULL)
51462+ (*curr)->prev = obj;
51463+
51464+ obj->next = *curr;
51465+ *curr = obj;
51466+
51467+ return;
51468+}
51469+
51470+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
51471+
51472+static void *
51473+create_table(__u32 * len, int elementsize)
51474+{
51475+ unsigned int table_sizes[] = {
efbe55a5
MT
51476+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
51477+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
51478+ 4194301, 8388593, 16777213, 33554393, 67108859
51479+ };
51480+ void *newtable = NULL;
51481+ unsigned int pwr = 0;
51482+
51483+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
51484+ table_sizes[pwr] <= *len)
51485+ pwr++;
51486+
51487+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
51488+ return newtable;
51489+
51490+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
51491+ newtable =
51492+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
51493+ else
51494+ newtable = vmalloc(table_sizes[pwr] * elementsize);
51495+
51496+ *len = table_sizes[pwr];
51497+
51498+ return newtable;
51499+}
51500+
51501+static int
51502+init_variables(const struct gr_arg *arg)
51503+{
51504+ struct task_struct *reaper = &init_task;
51505+ unsigned int stacksize;
58c5fc13
MT
51506+
51507+ subj_map_set.s_size = arg->role_db.num_subjects;
51508+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
51509+ name_set.n_size = arg->role_db.num_objects;
51510+ inodev_set.i_size = arg->role_db.num_objects;
51511+
51512+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
51513+ !name_set.n_size || !inodev_set.i_size)
51514+ return 1;
51515+
51516+ if (!gr_init_uidset())
51517+ return 1;
51518+
51519+ /* set up the stack that holds allocation info */
51520+
51521+ stacksize = arg->role_db.num_pointers + 5;
51522+
51523+ if (!acl_alloc_stack_init(stacksize))
51524+ return 1;
51525+
51526+ /* grab reference for the real root dentry and vfsmount */
ea610fa8 51527+ get_fs_root(reaper->fs, &real_root);
58c5fc13 51528+
16454cff
MT
51529+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51530+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
51531+#endif
51532+
15a11c5b
MT
51533+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
51534+ if (fakefs_obj_rw == NULL)
51535+ return 1;
51536+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
51537+
51538+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
51539+ if (fakefs_obj_rwx == NULL)
58c5fc13 51540+ return 1;
15a11c5b 51541+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
58c5fc13
MT
51542+
51543+ subj_map_set.s_hash =
51544+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
51545+ acl_role_set.r_hash =
51546+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
51547+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
51548+ inodev_set.i_hash =
51549+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
51550+
51551+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
51552+ !name_set.n_hash || !inodev_set.i_hash)
51553+ return 1;
51554+
51555+ memset(subj_map_set.s_hash, 0,
51556+ sizeof(struct subject_map *) * subj_map_set.s_size);
51557+ memset(acl_role_set.r_hash, 0,
51558+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
51559+ memset(name_set.n_hash, 0,
51560+ sizeof (struct name_entry *) * name_set.n_size);
51561+ memset(inodev_set.i_hash, 0,
51562+ sizeof (struct inodev_entry *) * inodev_set.i_size);
51563+
51564+ return 0;
51565+}
51566+
51567+/* free information not needed after startup
51568+ currently contains user->kernel pointer mappings for subjects
51569+*/
51570+
51571+static void
51572+free_init_variables(void)
51573+{
51574+ __u32 i;
51575+
51576+ if (subj_map_set.s_hash) {
51577+ for (i = 0; i < subj_map_set.s_size; i++) {
51578+ if (subj_map_set.s_hash[i]) {
51579+ kfree(subj_map_set.s_hash[i]);
51580+ subj_map_set.s_hash[i] = NULL;
51581+ }
51582+ }
51583+
51584+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
51585+ PAGE_SIZE)
51586+ kfree(subj_map_set.s_hash);
51587+ else
51588+ vfree(subj_map_set.s_hash);
51589+ }
51590+
51591+ return;
51592+}
51593+
51594+static void
51595+free_variables(void)
51596+{
51597+ struct acl_subject_label *s;
51598+ struct acl_role_label *r;
51599+ struct task_struct *task, *task2;
ae4e228f 51600+ unsigned int x;
58c5fc13
MT
51601+
51602+ gr_clear_learn_entries();
51603+
51604+ read_lock(&tasklist_lock);
51605+ do_each_thread(task2, task) {
51606+ task->acl_sp_role = 0;
51607+ task->acl_role_id = 0;
51608+ task->acl = NULL;
51609+ task->role = NULL;
51610+ } while_each_thread(task2, task);
51611+ read_unlock(&tasklist_lock);
51612+
51613+ /* release the reference to the real root dentry and vfsmount */
6892158b 51614+ path_put(&real_root);
58c5fc13
MT
51615+
51616+ /* free all object hash tables */
51617+
ae4e228f 51618+ FOR_EACH_ROLE_START(r)
58c5fc13 51619+ if (r->subj_hash == NULL)
ae4e228f 51620+ goto next_role;
58c5fc13
MT
51621+ FOR_EACH_SUBJECT_START(r, s, x)
51622+ if (s->obj_hash == NULL)
51623+ break;
51624+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
51625+ kfree(s->obj_hash);
51626+ else
51627+ vfree(s->obj_hash);
51628+ FOR_EACH_SUBJECT_END(s, x)
51629+ FOR_EACH_NESTED_SUBJECT_START(r, s)
51630+ if (s->obj_hash == NULL)
51631+ break;
51632+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
51633+ kfree(s->obj_hash);
51634+ else
51635+ vfree(s->obj_hash);
51636+ FOR_EACH_NESTED_SUBJECT_END(s)
51637+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
51638+ kfree(r->subj_hash);
51639+ else
51640+ vfree(r->subj_hash);
51641+ r->subj_hash = NULL;
ae4e228f
MT
51642+next_role:
51643+ FOR_EACH_ROLE_END(r)
58c5fc13
MT
51644+
51645+ acl_free_all();
51646+
51647+ if (acl_role_set.r_hash) {
51648+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
51649+ PAGE_SIZE)
51650+ kfree(acl_role_set.r_hash);
51651+ else
51652+ vfree(acl_role_set.r_hash);
51653+ }
51654+ if (name_set.n_hash) {
51655+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
51656+ PAGE_SIZE)
51657+ kfree(name_set.n_hash);
51658+ else
51659+ vfree(name_set.n_hash);
51660+ }
51661+
51662+ if (inodev_set.i_hash) {
51663+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
51664+ PAGE_SIZE)
51665+ kfree(inodev_set.i_hash);
51666+ else
51667+ vfree(inodev_set.i_hash);
51668+ }
51669+
51670+ gr_free_uidset();
51671+
51672+ memset(&name_set, 0, sizeof (struct name_db));
51673+ memset(&inodev_set, 0, sizeof (struct inodev_db));
51674+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
51675+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
51676+
51677+ default_role = NULL;
ae4e228f 51678+ role_list = NULL;
58c5fc13
MT
51679+
51680+ return;
51681+}
51682+
51683+static __u32
51684+count_user_objs(struct acl_object_label *userp)
51685+{
51686+ struct acl_object_label o_tmp;
51687+ __u32 num = 0;
51688+
51689+ while (userp) {
51690+ if (copy_from_user(&o_tmp, userp,
51691+ sizeof (struct acl_object_label)))
51692+ break;
51693+
51694+ userp = o_tmp.prev;
51695+ num++;
51696+ }
51697+
51698+ return num;
51699+}
51700+
51701+static struct acl_subject_label *
51702+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
51703+
51704+static int
51705+copy_user_glob(struct acl_object_label *obj)
51706+{
51707+ struct acl_object_label *g_tmp, **guser;
51708+ unsigned int len;
51709+ char *tmp;
51710+
51711+ if (obj->globbed == NULL)
51712+ return 0;
51713+
51714+ guser = &obj->globbed;
51715+ while (*guser) {
51716+ g_tmp = (struct acl_object_label *)
51717+ acl_alloc(sizeof (struct acl_object_label));
51718+ if (g_tmp == NULL)
51719+ return -ENOMEM;
51720+
51721+ if (copy_from_user(g_tmp, *guser,
51722+ sizeof (struct acl_object_label)))
51723+ return -EFAULT;
51724+
51725+ len = strnlen_user(g_tmp->filename, PATH_MAX);
51726+
51727+ if (!len || len >= PATH_MAX)
51728+ return -EINVAL;
51729+
51730+ if ((tmp = (char *) acl_alloc(len)) == NULL)
51731+ return -ENOMEM;
51732+
51733+ if (copy_from_user(tmp, g_tmp->filename, len))
51734+ return -EFAULT;
51735+ tmp[len-1] = '\0';
51736+ g_tmp->filename = tmp;
51737+
51738+ *guser = g_tmp;
51739+ guser = &(g_tmp->next);
51740+ }
51741+
51742+ return 0;
51743+}
51744+
51745+static int
51746+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
51747+ struct acl_role_label *role)
51748+{
51749+ struct acl_object_label *o_tmp;
51750+ unsigned int len;
51751+ int ret;
51752+ char *tmp;
51753+
51754+ while (userp) {
51755+ if ((o_tmp = (struct acl_object_label *)
51756+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
51757+ return -ENOMEM;
51758+
51759+ if (copy_from_user(o_tmp, userp,
51760+ sizeof (struct acl_object_label)))
51761+ return -EFAULT;
51762+
51763+ userp = o_tmp->prev;
51764+
51765+ len = strnlen_user(o_tmp->filename, PATH_MAX);
51766+
51767+ if (!len || len >= PATH_MAX)
51768+ return -EINVAL;
51769+
51770+ if ((tmp = (char *) acl_alloc(len)) == NULL)
51771+ return -ENOMEM;
51772+
51773+ if (copy_from_user(tmp, o_tmp->filename, len))
51774+ return -EFAULT;
51775+ tmp[len-1] = '\0';
51776+ o_tmp->filename = tmp;
51777+
51778+ insert_acl_obj_label(o_tmp, subj);
51779+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
51780+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
51781+ return -ENOMEM;
51782+
51783+ ret = copy_user_glob(o_tmp);
51784+ if (ret)
51785+ return ret;
51786+
51787+ if (o_tmp->nested) {
51788+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
51789+ if (IS_ERR(o_tmp->nested))
51790+ return PTR_ERR(o_tmp->nested);
51791+
51792+ /* insert into nested subject list */
51793+ o_tmp->nested->next = role->hash->first;
51794+ role->hash->first = o_tmp->nested;
51795+ }
51796+ }
51797+
51798+ return 0;
51799+}
51800+
51801+static __u32
51802+count_user_subjs(struct acl_subject_label *userp)
51803+{
51804+ struct acl_subject_label s_tmp;
51805+ __u32 num = 0;
51806+
51807+ while (userp) {
51808+ if (copy_from_user(&s_tmp, userp,
51809+ sizeof (struct acl_subject_label)))
51810+ break;
51811+
51812+ userp = s_tmp.prev;
51813+ /* do not count nested subjects against this count, since
51814+ they are not included in the hash table, but are
51815+ attached to objects. We have already counted
51816+ the subjects in userspace for the allocation
51817+ stack
51818+ */
51819+ if (!(s_tmp.mode & GR_NESTED))
51820+ num++;
51821+ }
51822+
51823+ return num;
51824+}
51825+
51826+static int
51827+copy_user_allowedips(struct acl_role_label *rolep)
51828+{
51829+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
51830+
51831+ ruserip = rolep->allowed_ips;
51832+
51833+ while (ruserip) {
51834+ rlast = rtmp;
51835+
51836+ if ((rtmp = (struct role_allowed_ip *)
51837+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
51838+ return -ENOMEM;
51839+
51840+ if (copy_from_user(rtmp, ruserip,
51841+ sizeof (struct role_allowed_ip)))
51842+ return -EFAULT;
51843+
51844+ ruserip = rtmp->prev;
51845+
51846+ if (!rlast) {
51847+ rtmp->prev = NULL;
51848+ rolep->allowed_ips = rtmp;
51849+ } else {
51850+ rlast->next = rtmp;
51851+ rtmp->prev = rlast;
51852+ }
51853+
51854+ if (!ruserip)
51855+ rtmp->next = NULL;
51856+ }
51857+
51858+ return 0;
51859+}
51860+
51861+static int
51862+copy_user_transitions(struct acl_role_label *rolep)
51863+{
51864+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
51865+
51866+ unsigned int len;
51867+ char *tmp;
51868+
51869+ rusertp = rolep->transitions;
51870+
51871+ while (rusertp) {
51872+ rlast = rtmp;
51873+
51874+ if ((rtmp = (struct role_transition *)
51875+ acl_alloc(sizeof (struct role_transition))) == NULL)
51876+ return -ENOMEM;
51877+
51878+ if (copy_from_user(rtmp, rusertp,
51879+ sizeof (struct role_transition)))
51880+ return -EFAULT;
51881+
51882+ rusertp = rtmp->prev;
51883+
51884+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
51885+
51886+ if (!len || len >= GR_SPROLE_LEN)
51887+ return -EINVAL;
51888+
51889+ if ((tmp = (char *) acl_alloc(len)) == NULL)
51890+ return -ENOMEM;
51891+
51892+ if (copy_from_user(tmp, rtmp->rolename, len))
51893+ return -EFAULT;
51894+ tmp[len-1] = '\0';
51895+ rtmp->rolename = tmp;
51896+
51897+ if (!rlast) {
51898+ rtmp->prev = NULL;
51899+ rolep->transitions = rtmp;
51900+ } else {
51901+ rlast->next = rtmp;
51902+ rtmp->prev = rlast;
51903+ }
51904+
51905+ if (!rusertp)
51906+ rtmp->next = NULL;
51907+ }
51908+
51909+ return 0;
51910+}
51911+
51912+static struct acl_subject_label *
51913+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
51914+{
51915+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
51916+ unsigned int len;
51917+ char *tmp;
51918+ __u32 num_objs;
51919+ struct acl_ip_label **i_tmp, *i_utmp2;
51920+ struct gr_hash_struct ghash;
51921+ struct subject_map *subjmap;
51922+ unsigned int i_num;
51923+ int err;
51924+
51925+ s_tmp = lookup_subject_map(userp);
51926+
51927+ /* we've already copied this subject into the kernel, just return
51928+ the reference to it, and don't copy it over again
51929+ */
51930+ if (s_tmp)
51931+ return(s_tmp);
51932+
51933+ if ((s_tmp = (struct acl_subject_label *)
51934+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
51935+ return ERR_PTR(-ENOMEM);
51936+
51937+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
51938+ if (subjmap == NULL)
51939+ return ERR_PTR(-ENOMEM);
51940+
51941+ subjmap->user = userp;
51942+ subjmap->kernel = s_tmp;
51943+ insert_subj_map_entry(subjmap);
51944+
51945+ if (copy_from_user(s_tmp, userp,
51946+ sizeof (struct acl_subject_label)))
51947+ return ERR_PTR(-EFAULT);
51948+
51949+ len = strnlen_user(s_tmp->filename, PATH_MAX);
51950+
51951+ if (!len || len >= PATH_MAX)
51952+ return ERR_PTR(-EINVAL);
51953+
51954+ if ((tmp = (char *) acl_alloc(len)) == NULL)
51955+ return ERR_PTR(-ENOMEM);
51956+
51957+ if (copy_from_user(tmp, s_tmp->filename, len))
51958+ return ERR_PTR(-EFAULT);
51959+ tmp[len-1] = '\0';
51960+ s_tmp->filename = tmp;
51961+
51962+ if (!strcmp(s_tmp->filename, "/"))
51963+ role->root_label = s_tmp;
51964+
51965+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
51966+ return ERR_PTR(-EFAULT);
51967+
51968+ /* copy user and group transition tables */
51969+
51970+ if (s_tmp->user_trans_num) {
51971+ uid_t *uidlist;
51972+
51973+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
51974+ if (uidlist == NULL)
51975+ return ERR_PTR(-ENOMEM);
51976+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
51977+ return ERR_PTR(-EFAULT);
51978+
51979+ s_tmp->user_transitions = uidlist;
51980+ }
51981+
51982+ if (s_tmp->group_trans_num) {
51983+ gid_t *gidlist;
51984+
51985+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
51986+ if (gidlist == NULL)
51987+ return ERR_PTR(-ENOMEM);
51988+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
51989+ return ERR_PTR(-EFAULT);
51990+
51991+ s_tmp->group_transitions = gidlist;
51992+ }
51993+
51994+ /* set up object hash table */
51995+ num_objs = count_user_objs(ghash.first);
51996+
51997+ s_tmp->obj_hash_size = num_objs;
51998+ s_tmp->obj_hash =
51999+ (struct acl_object_label **)
52000+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
52001+
52002+ if (!s_tmp->obj_hash)
52003+ return ERR_PTR(-ENOMEM);
52004+
52005+ memset(s_tmp->obj_hash, 0,
52006+ s_tmp->obj_hash_size *
52007+ sizeof (struct acl_object_label *));
52008+
52009+ /* add in objects */
52010+ err = copy_user_objs(ghash.first, s_tmp, role);
52011+
52012+ if (err)
52013+ return ERR_PTR(err);
52014+
52015+ /* set pointer for parent subject */
52016+ if (s_tmp->parent_subject) {
52017+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
52018+
52019+ if (IS_ERR(s_tmp2))
52020+ return s_tmp2;
52021+
52022+ s_tmp->parent_subject = s_tmp2;
52023+ }
52024+
52025+ /* add in ip acls */
52026+
52027+ if (!s_tmp->ip_num) {
52028+ s_tmp->ips = NULL;
52029+ goto insert;
52030+ }
52031+
52032+ i_tmp =
52033+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
52034+ sizeof (struct acl_ip_label *));
52035+
52036+ if (!i_tmp)
52037+ return ERR_PTR(-ENOMEM);
52038+
52039+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
52040+ *(i_tmp + i_num) =
52041+ (struct acl_ip_label *)
52042+ acl_alloc(sizeof (struct acl_ip_label));
52043+ if (!*(i_tmp + i_num))
52044+ return ERR_PTR(-ENOMEM);
52045+
52046+ if (copy_from_user
52047+ (&i_utmp2, s_tmp->ips + i_num,
52048+ sizeof (struct acl_ip_label *)))
52049+ return ERR_PTR(-EFAULT);
52050+
52051+ if (copy_from_user
52052+ (*(i_tmp + i_num), i_utmp2,
52053+ sizeof (struct acl_ip_label)))
52054+ return ERR_PTR(-EFAULT);
52055+
52056+ if ((*(i_tmp + i_num))->iface == NULL)
52057+ continue;
52058+
52059+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
52060+ if (!len || len >= IFNAMSIZ)
52061+ return ERR_PTR(-EINVAL);
52062+ tmp = acl_alloc(len);
52063+ if (tmp == NULL)
52064+ return ERR_PTR(-ENOMEM);
52065+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
52066+ return ERR_PTR(-EFAULT);
52067+ (*(i_tmp + i_num))->iface = tmp;
52068+ }
52069+
52070+ s_tmp->ips = i_tmp;
52071+
52072+insert:
52073+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
52074+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
52075+ return ERR_PTR(-ENOMEM);
52076+
52077+ return s_tmp;
52078+}
52079+
52080+static int
52081+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
52082+{
52083+ struct acl_subject_label s_pre;
52084+ struct acl_subject_label * ret;
52085+ int err;
52086+
52087+ while (userp) {
52088+ if (copy_from_user(&s_pre, userp,
52089+ sizeof (struct acl_subject_label)))
52090+ return -EFAULT;
52091+
52092+ /* do not add nested subjects here, add
52093+ while parsing objects
52094+ */
52095+
52096+ if (s_pre.mode & GR_NESTED) {
52097+ userp = s_pre.prev;
52098+ continue;
52099+ }
52100+
52101+ ret = do_copy_user_subj(userp, role);
52102+
52103+ err = PTR_ERR(ret);
52104+ if (IS_ERR(ret))
52105+ return err;
52106+
52107+ insert_acl_subj_label(ret, role);
52108+
52109+ userp = s_pre.prev;
52110+ }
52111+
52112+ return 0;
52113+}
52114+
52115+static int
52116+copy_user_acl(struct gr_arg *arg)
52117+{
52118+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
52119+ struct sprole_pw *sptmp;
52120+ struct gr_hash_struct *ghash;
52121+ uid_t *domainlist;
52122+ unsigned int r_num;
52123+ unsigned int len;
52124+ char *tmp;
52125+ int err = 0;
52126+ __u16 i;
52127+ __u32 num_subjs;
52128+
52129+ /* we need a default and kernel role */
52130+ if (arg->role_db.num_roles < 2)
52131+ return -EINVAL;
52132+
52133+ /* copy special role authentication info from userspace */
52134+
52135+ num_sprole_pws = arg->num_sprole_pws;
52136+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
52137+
52138+ if (!acl_special_roles) {
52139+ err = -ENOMEM;
52140+ goto cleanup;
52141+ }
52142+
52143+ for (i = 0; i < num_sprole_pws; i++) {
52144+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
52145+ if (!sptmp) {
52146+ err = -ENOMEM;
52147+ goto cleanup;
52148+ }
52149+ if (copy_from_user(sptmp, arg->sprole_pws + i,
52150+ sizeof (struct sprole_pw))) {
52151+ err = -EFAULT;
52152+ goto cleanup;
52153+ }
52154+
52155+ len =
52156+ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
52157+
52158+ if (!len || len >= GR_SPROLE_LEN) {
52159+ err = -EINVAL;
52160+ goto cleanup;
52161+ }
52162+
52163+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
52164+ err = -ENOMEM;
52165+ goto cleanup;
52166+ }
52167+
52168+ if (copy_from_user(tmp, sptmp->rolename, len)) {
52169+ err = -EFAULT;
52170+ goto cleanup;
52171+ }
52172+ tmp[len-1] = '\0';
16454cff 52173+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58c5fc13
MT
52174+ printk(KERN_ALERT "Copying special role %s\n", tmp);
52175+#endif
52176+ sptmp->rolename = tmp;
52177+ acl_special_roles[i] = sptmp;
52178+ }
52179+
52180+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
52181+
52182+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
52183+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
52184+
52185+ if (!r_tmp) {
52186+ err = -ENOMEM;
52187+ goto cleanup;
52188+ }
52189+
52190+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
52191+ sizeof (struct acl_role_label *))) {
52192+ err = -EFAULT;
52193+ goto cleanup;
52194+ }
52195+
52196+ if (copy_from_user(r_tmp, r_utmp2,
52197+ sizeof (struct acl_role_label))) {
52198+ err = -EFAULT;
52199+ goto cleanup;
52200+ }
52201+
52202+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
52203+
52204+ if (!len || len >= PATH_MAX) {
52205+ err = -EINVAL;
52206+ goto cleanup;
52207+ }
52208+
52209+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
52210+ err = -ENOMEM;
52211+ goto cleanup;
52212+ }
52213+ if (copy_from_user(tmp, r_tmp->rolename, len)) {
52214+ err = -EFAULT;
52215+ goto cleanup;
52216+ }
52217+ tmp[len-1] = '\0';
52218+ r_tmp->rolename = tmp;
52219+
52220+ if (!strcmp(r_tmp->rolename, "default")
52221+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
52222+ default_role = r_tmp;
52223+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
52224+ kernel_role = r_tmp;
52225+ }
52226+
52227+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
52228+ err = -ENOMEM;
52229+ goto cleanup;
52230+ }
52231+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
52232+ err = -EFAULT;
52233+ goto cleanup;
52234+ }
52235+
52236+ r_tmp->hash = ghash;
52237+
52238+ num_subjs = count_user_subjs(r_tmp->hash->first);
52239+
52240+ r_tmp->subj_hash_size = num_subjs;
52241+ r_tmp->subj_hash =
52242+ (struct acl_subject_label **)
52243+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
52244+
52245+ if (!r_tmp->subj_hash) {
52246+ err = -ENOMEM;
52247+ goto cleanup;
52248+ }
52249+
52250+ err = copy_user_allowedips(r_tmp);
52251+ if (err)
52252+ goto cleanup;
52253+
52254+ /* copy domain info */
52255+ if (r_tmp->domain_children != NULL) {
52256+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
52257+ if (domainlist == NULL) {
52258+ err = -ENOMEM;
52259+ goto cleanup;
52260+ }
52261+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
52262+ err = -EFAULT;
52263+ goto cleanup;
52264+ }
52265+ r_tmp->domain_children = domainlist;
52266+ }
52267+
52268+ err = copy_user_transitions(r_tmp);
52269+ if (err)
52270+ goto cleanup;
52271+
52272+ memset(r_tmp->subj_hash, 0,
52273+ r_tmp->subj_hash_size *
52274+ sizeof (struct acl_subject_label *));
52275+
52276+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
52277+
52278+ if (err)
52279+ goto cleanup;
52280+
52281+ /* set nested subject list to null */
52282+ r_tmp->hash->first = NULL;
52283+
52284+ insert_acl_role_label(r_tmp);
52285+ }
52286+
52287+ goto return_err;
52288+ cleanup:
52289+ free_variables();
52290+ return_err:
52291+ return err;
52292+
52293+}
52294+
52295+static int
52296+gracl_init(struct gr_arg *args)
52297+{
52298+ int error = 0;
52299+
52300+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
52301+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
52302+
52303+ if (init_variables(args)) {
52304+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
52305+ error = -ENOMEM;
52306+ free_variables();
52307+ goto out;
52308+ }
52309+
52310+ error = copy_user_acl(args);
52311+ free_init_variables();
52312+ if (error) {
52313+ free_variables();
52314+ goto out;
52315+ }
52316+
52317+ if ((error = gr_set_acls(0))) {
52318+ free_variables();
52319+ goto out;
52320+ }
52321+
ae4e228f 52322+ pax_open_kernel();
58c5fc13 52323+ gr_status |= GR_READY;
ae4e228f 52324+ pax_close_kernel();
58c5fc13
MT
52325+
52326+ out:
52327+ return error;
52328+}
52329+
52330+/* derived from glibc fnmatch() 0: match, 1: no match*/
52331+
52332+static int
52333+glob_match(const char *p, const char *n)
52334+{
52335+ char c;
52336+
52337+ while ((c = *p++) != '\0') {
52338+ switch (c) {
52339+ case '?':
52340+ if (*n == '\0')
52341+ return 1;
52342+ else if (*n == '/')
52343+ return 1;
52344+ break;
52345+ case '\\':
52346+ if (*n != c)
52347+ return 1;
52348+ break;
52349+ case '*':
52350+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
52351+ if (*n == '/')
52352+ return 1;
52353+ else if (c == '?') {
52354+ if (*n == '\0')
52355+ return 1;
52356+ else
52357+ ++n;
52358+ }
52359+ }
52360+ if (c == '\0') {
52361+ return 0;
52362+ } else {
52363+ const char *endp;
52364+
52365+ if ((endp = strchr(n, '/')) == NULL)
52366+ endp = n + strlen(n);
52367+
52368+ if (c == '[') {
52369+ for (--p; n < endp; ++n)
52370+ if (!glob_match(p, n))
52371+ return 0;
52372+ } else if (c == '/') {
52373+ while (*n != '\0' && *n != '/')
52374+ ++n;
52375+ if (*n == '/' && !glob_match(p, n + 1))
52376+ return 0;
52377+ } else {
52378+ for (--p; n < endp; ++n)
52379+ if (*n == c && !glob_match(p, n))
52380+ return 0;
52381+ }
52382+
52383+ return 1;
52384+ }
52385+ case '[':
52386+ {
52387+ int not;
52388+ char cold;
52389+
52390+ if (*n == '\0' || *n == '/')
52391+ return 1;
52392+
52393+ not = (*p == '!' || *p == '^');
52394+ if (not)
52395+ ++p;
52396+
52397+ c = *p++;
52398+ for (;;) {
52399+ unsigned char fn = (unsigned char)*n;
52400+
52401+ if (c == '\0')
52402+ return 1;
52403+ else {
52404+ if (c == fn)
52405+ goto matched;
52406+ cold = c;
52407+ c = *p++;
52408+
52409+ if (c == '-' && *p != ']') {
52410+ unsigned char cend = *p++;
52411+
52412+ if (cend == '\0')
52413+ return 1;
52414+
52415+ if (cold <= fn && fn <= cend)
52416+ goto matched;
52417+
52418+ c = *p++;
52419+ }
52420+ }
52421+
52422+ if (c == ']')
52423+ break;
52424+ }
52425+ if (!not)
52426+ return 1;
52427+ break;
52428+ matched:
52429+ while (c != ']') {
52430+ if (c == '\0')
52431+ return 1;
52432+
52433+ c = *p++;
52434+ }
52435+ if (not)
52436+ return 1;
52437+ }
52438+ break;
52439+ default:
52440+ if (c != *n)
52441+ return 1;
52442+ }
52443+
52444+ ++n;
52445+ }
52446+
52447+ if (*n == '\0')
52448+ return 0;
52449+
52450+ if (*n == '/')
52451+ return 0;
52452+
52453+ return 1;
52454+}
52455+
52456+static struct acl_object_label *
52457+chk_glob_label(struct acl_object_label *globbed,
52458+ struct dentry *dentry, struct vfsmount *mnt, char **path)
52459+{
52460+ struct acl_object_label *tmp;
52461+
52462+ if (*path == NULL)
52463+ *path = gr_to_filename_nolock(dentry, mnt);
52464+
52465+ tmp = globbed;
52466+
52467+ while (tmp) {
52468+ if (!glob_match(tmp->filename, *path))
52469+ return tmp;
52470+ tmp = tmp->next;
52471+ }
52472+
52473+ return NULL;
52474+}
52475+
52476+static struct acl_object_label *
52477+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
52478+ const ino_t curr_ino, const dev_t curr_dev,
52479+ const struct acl_subject_label *subj, char **path, const int checkglob)
52480+{
52481+ struct acl_subject_label *tmpsubj;
52482+ struct acl_object_label *retval;
52483+ struct acl_object_label *retval2;
52484+
52485+ tmpsubj = (struct acl_subject_label *) subj;
52486+ read_lock(&gr_inode_lock);
52487+ do {
52488+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
52489+ if (retval) {
52490+ if (checkglob && retval->globbed) {
52491+ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
52492+ (struct vfsmount *)orig_mnt, path);
52493+ if (retval2)
52494+ retval = retval2;
52495+ }
52496+ break;
52497+ }
52498+ } while ((tmpsubj = tmpsubj->parent_subject));
52499+ read_unlock(&gr_inode_lock);
52500+
52501+ return retval;
52502+}
52503+
52504+static __inline__ struct acl_object_label *
52505+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
16454cff 52506+ struct dentry *curr_dentry,
58c5fc13
MT
52507+ const struct acl_subject_label *subj, char **path, const int checkglob)
52508+{
bc901d79 52509+ int newglob = checkglob;
16454cff
MT
52510+ ino_t inode;
52511+ dev_t device;
bc901d79
MT
52512+
52513+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
52514+ as we don't want a / * rule to match instead of the / object
52515+ don't do this for create lookups that call this function though, since they're looking up
52516+ on the parent and thus need globbing checks on all paths
52517+ */
52518+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
52519+ newglob = GR_NO_GLOB;
52520+
16454cff
MT
52521+ spin_lock(&curr_dentry->d_lock);
52522+ inode = curr_dentry->d_inode->i_ino;
52523+ device = __get_dev(curr_dentry);
52524+ spin_unlock(&curr_dentry->d_lock);
52525+
52526+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
58c5fc13
MT
52527+}
52528+
52529+static struct acl_object_label *
52530+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52531+ const struct acl_subject_label *subj, char *path, const int checkglob)
52532+{
52533+ struct dentry *dentry = (struct dentry *) l_dentry;
52534+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
52535+ struct acl_object_label *retval;
16454cff 52536+ struct dentry *parent;
58c5fc13 52537+
16454cff 52538+ write_seqlock(&rename_lock);
bc901d79 52539+ br_read_lock(vfsmount_lock);
58c5fc13 52540+
15a11c5b
MT
52541+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
52542+#ifdef CONFIG_NET
52543+ mnt == sock_mnt ||
52544+#endif
df50ba0c 52545+#ifdef CONFIG_HUGETLBFS
71d190be 52546+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
df50ba0c 52547+#endif
58c5fc13
MT
52548+ /* ignore Eric Biederman */
52549+ IS_PRIVATE(l_dentry->d_inode))) {
15a11c5b 52550+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
58c5fc13
MT
52551+ goto out;
52552+ }
52553+
52554+ for (;;) {
6892158b 52555+ if (dentry == real_root.dentry && mnt == real_root.mnt)
58c5fc13
MT
52556+ break;
52557+
52558+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
52559+ if (mnt->mnt_parent == mnt)
52560+ break;
52561+
52562+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52563+ if (retval != NULL)
52564+ goto out;
52565+
52566+ dentry = mnt->mnt_mountpoint;
52567+ mnt = mnt->mnt_parent;
52568+ continue;
52569+ }
52570+
16454cff 52571+ parent = dentry->d_parent;
58c5fc13
MT
52572+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52573+ if (retval != NULL)
52574+ goto out;
52575+
16454cff 52576+ dentry = parent;
58c5fc13
MT
52577+ }
52578+
52579+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52580+
16454cff 52581+ /* real_root is pinned so we don't have to hold a reference */
58c5fc13 52582+ if (retval == NULL)
6892158b 52583+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
58c5fc13 52584+out:
bc901d79 52585+ br_read_unlock(vfsmount_lock);
16454cff 52586+ write_sequnlock(&rename_lock);
bc901d79
MT
52587+
52588+ BUG_ON(retval == NULL);
52589+
58c5fc13
MT
52590+ return retval;
52591+}
52592+
52593+static __inline__ struct acl_object_label *
52594+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52595+ const struct acl_subject_label *subj)
52596+{
52597+ char *path = NULL;
bc901d79 52598+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
58c5fc13
MT
52599+}
52600+
52601+static __inline__ struct acl_object_label *
52602+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52603+ const struct acl_subject_label *subj)
52604+{
52605+ char *path = NULL;
bc901d79 52606+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
58c5fc13
MT
52607+}
52608+
52609+static __inline__ struct acl_object_label *
52610+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52611+ const struct acl_subject_label *subj, char *path)
52612+{
bc901d79 52613+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
58c5fc13
MT
52614+}
52615+
52616+static struct acl_subject_label *
52617+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52618+ const struct acl_role_label *role)
52619+{
52620+ struct dentry *dentry = (struct dentry *) l_dentry;
52621+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
52622+ struct acl_subject_label *retval;
16454cff 52623+ struct dentry *parent;
58c5fc13 52624+
16454cff 52625+ write_seqlock(&rename_lock);
bc901d79 52626+ br_read_lock(vfsmount_lock);
58c5fc13
MT
52627+
52628+ for (;;) {
6892158b 52629+ if (dentry == real_root.dentry && mnt == real_root.mnt)
58c5fc13
MT
52630+ break;
52631+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
52632+ if (mnt->mnt_parent == mnt)
52633+ break;
52634+
16454cff 52635+ spin_lock(&dentry->d_lock);
58c5fc13
MT
52636+ read_lock(&gr_inode_lock);
52637+ retval =
52638+ lookup_acl_subj_label(dentry->d_inode->i_ino,
16454cff 52639+ __get_dev(dentry), role);
58c5fc13 52640+ read_unlock(&gr_inode_lock);
16454cff 52641+ spin_unlock(&dentry->d_lock);
58c5fc13
MT
52642+ if (retval != NULL)
52643+ goto out;
52644+
52645+ dentry = mnt->mnt_mountpoint;
52646+ mnt = mnt->mnt_parent;
52647+ continue;
52648+ }
52649+
16454cff 52650+ spin_lock(&dentry->d_lock);
58c5fc13
MT
52651+ read_lock(&gr_inode_lock);
52652+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
16454cff 52653+ __get_dev(dentry), role);
58c5fc13 52654+ read_unlock(&gr_inode_lock);
16454cff
MT
52655+ parent = dentry->d_parent;
52656+ spin_unlock(&dentry->d_lock);
52657+
58c5fc13
MT
52658+ if (retval != NULL)
52659+ goto out;
52660+
16454cff 52661+ dentry = parent;
58c5fc13
MT
52662+ }
52663+
16454cff 52664+ spin_lock(&dentry->d_lock);
58c5fc13
MT
52665+ read_lock(&gr_inode_lock);
52666+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
16454cff 52667+ __get_dev(dentry), role);
58c5fc13 52668+ read_unlock(&gr_inode_lock);
16454cff 52669+ spin_unlock(&dentry->d_lock);
58c5fc13
MT
52670+
52671+ if (unlikely(retval == NULL)) {
16454cff 52672+ /* real_root is pinned, we don't need to hold a reference */
58c5fc13 52673+ read_lock(&gr_inode_lock);
6892158b 52674+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
16454cff 52675+ __get_dev(real_root.dentry), role);
58c5fc13
MT
52676+ read_unlock(&gr_inode_lock);
52677+ }
52678+out:
bc901d79 52679+ br_read_unlock(vfsmount_lock);
16454cff 52680+ write_sequnlock(&rename_lock);
58c5fc13 52681+
bc901d79
MT
52682+ BUG_ON(retval == NULL);
52683+
58c5fc13
MT
52684+ return retval;
52685+}
52686+
52687+static void
52688+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
52689+{
52690+ struct task_struct *task = current;
52691+ const struct cred *cred = current_cred();
52692+
52693+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
52694+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
52695+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
bc901d79 52696+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
58c5fc13
MT
52697+
52698+ return;
52699+}
52700+
52701+static void
52702+gr_log_learn_sysctl(const char *path, const __u32 mode)
52703+{
52704+ struct task_struct *task = current;
52705+ const struct cred *cred = current_cred();
52706+
52707+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
52708+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
52709+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
bc901d79 52710+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
58c5fc13
MT
52711+
52712+ return;
52713+}
52714+
52715+static void
52716+gr_log_learn_id_change(const char type, const unsigned int real,
52717+ const unsigned int effective, const unsigned int fs)
52718+{
52719+ struct task_struct *task = current;
52720+ const struct cred *cred = current_cred();
52721+
52722+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
52723+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
52724+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
bc901d79 52725+ type, real, effective, fs, &task->signal->saved_ip);
58c5fc13
MT
52726+
52727+ return;
52728+}
52729+
52730+__u32
58c5fc13
MT
52731+gr_search_file(const struct dentry * dentry, const __u32 mode,
52732+ const struct vfsmount * mnt)
52733+{
52734+ __u32 retval = mode;
52735+ struct acl_subject_label *curracl;
52736+ struct acl_object_label *currobj;
52737+
52738+ if (unlikely(!(gr_status & GR_READY)))
52739+ return (mode & ~GR_AUDITS);
52740+
52741+ curracl = current->acl;
52742+
52743+ currobj = chk_obj_label(dentry, mnt, curracl);
52744+ retval = currobj->mode & mode;
52745+
16454cff
MT
52746+ /* if we're opening a specified transfer file for writing
52747+ (e.g. /dev/initctl), then transfer our role to init
52748+ */
52749+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
52750+ current->role->roletype & GR_ROLE_PERSIST)) {
52751+ struct task_struct *task = init_pid_ns.child_reaper;
52752+
52753+ if (task->role != current->role) {
52754+ task->acl_sp_role = 0;
52755+ task->acl_role_id = current->acl_role_id;
52756+ task->role = current->role;
52757+ rcu_read_lock();
52758+ read_lock(&grsec_exec_file_lock);
52759+ gr_apply_subject_to_task(task);
52760+ read_unlock(&grsec_exec_file_lock);
52761+ rcu_read_unlock();
52762+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
52763+ }
52764+ }
52765+
58c5fc13
MT
52766+ if (unlikely
52767+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
52768+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
52769+ __u32 new_mode = mode;
52770+
52771+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
52772+
52773+ retval = new_mode;
52774+
52775+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
52776+ new_mode |= GR_INHERIT;
52777+
52778+ if (!(mode & GR_NOLEARN))
52779+ gr_log_learn(dentry, mnt, new_mode);
52780+ }
52781+
52782+ return retval;
52783+}
52784+
6e9df6a3
MT
52785+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
52786+ const struct dentry *parent,
52787+ const struct vfsmount *mnt)
58c5fc13
MT
52788+{
52789+ struct name_entry *match;
52790+ struct acl_object_label *matchpo;
52791+ struct acl_subject_label *curracl;
52792+ char *path;
58c5fc13
MT
52793+
52794+ if (unlikely(!(gr_status & GR_READY)))
6e9df6a3 52795+ return NULL;
58c5fc13
MT
52796+
52797+ preempt_disable();
52798+ path = gr_to_filename_rbac(new_dentry, mnt);
52799+ match = lookup_name_entry_create(path);
52800+
58c5fc13
MT
52801+ curracl = current->acl;
52802+
6e9df6a3
MT
52803+ if (match) {
52804+ read_lock(&gr_inode_lock);
52805+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
52806+ read_unlock(&gr_inode_lock);
58c5fc13 52807+
6e9df6a3 52808+ if (matchpo) {
58c5fc13 52809+ preempt_enable();
6e9df6a3 52810+ return matchpo;
58c5fc13 52811+ }
58c5fc13
MT
52812+ }
52813+
6e9df6a3 52814+ // lookup parent
58c5fc13
MT
52815+
52816+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
6e9df6a3
MT
52817+
52818+ preempt_enable();
52819+ return matchpo;
52820+}
52821+
52822+__u32
52823+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
52824+ const struct vfsmount * mnt, const __u32 mode)
52825+{
52826+ struct acl_object_label *matchpo;
52827+ __u32 retval;
52828+
52829+ if (unlikely(!(gr_status & GR_READY)))
52830+ return (mode & ~GR_AUDITS);
52831+
52832+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
52833+
58c5fc13
MT
52834+ retval = matchpo->mode & mode;
52835+
52836+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
6e9df6a3 52837+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
58c5fc13
MT
52838+ __u32 new_mode = mode;
52839+
52840+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
52841+
52842+ gr_log_learn(new_dentry, mnt, new_mode);
58c5fc13
MT
52843+ return new_mode;
52844+ }
52845+
58c5fc13
MT
52846+ return retval;
52847+}
52848+
6e9df6a3
MT
52849+__u32
52850+gr_check_link(const struct dentry * new_dentry,
52851+ const struct dentry * parent_dentry,
52852+ const struct vfsmount * parent_mnt,
52853+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
52854+{
52855+ struct acl_object_label *obj;
52856+ __u32 oldmode, newmode;
52857+ __u32 needmode;
52858+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
52859+ GR_DELETE | GR_INHERIT;
52860+
52861+ if (unlikely(!(gr_status & GR_READY)))
52862+ return (GR_CREATE | GR_LINK);
52863+
52864+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
52865+ oldmode = obj->mode;
52866+
52867+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
52868+ newmode = obj->mode;
52869+
52870+ needmode = newmode & checkmodes;
52871+
52872+ // old name for hardlink must have at least the permissions of the new name
52873+ if ((oldmode & needmode) != needmode)
52874+ goto bad;
52875+
52876+ // if old name had restrictions/auditing, make sure the new name does as well
52877+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
52878+
52879+ // don't allow hardlinking of suid/sgid files without permission
52880+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
52881+ needmode |= GR_SETID;
52882+
52883+ if ((newmode & needmode) != needmode)
52884+ goto bad;
52885+
52886+ // enforce minimum permissions
52887+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
52888+ return newmode;
52889+bad:
52890+ needmode = oldmode;
52891+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
52892+ needmode |= GR_SETID;
52893+
52894+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
52895+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
52896+ return (GR_CREATE | GR_LINK);
52897+ } else if (newmode & GR_SUPPRESS)
52898+ return GR_SUPPRESS;
52899+ else
52900+ return 0;
52901+}
52902+
58c5fc13
MT
52903+int
52904+gr_check_hidden_task(const struct task_struct *task)
52905+{
52906+ if (unlikely(!(gr_status & GR_READY)))
52907+ return 0;
52908+
52909+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
52910+ return 1;
52911+
52912+ return 0;
52913+}
52914+
52915+int
52916+gr_check_protected_task(const struct task_struct *task)
52917+{
52918+ if (unlikely(!(gr_status & GR_READY) || !task))
52919+ return 0;
52920+
52921+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
52922+ task->acl != current->acl)
52923+ return 1;
52924+
52925+ return 0;
52926+}
52927+
57199397
MT
52928+int
52929+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
52930+{
52931+ struct task_struct *p;
52932+ int ret = 0;
52933+
52934+ if (unlikely(!(gr_status & GR_READY) || !pid))
52935+ return ret;
52936+
52937+ read_lock(&tasklist_lock);
52938+ do_each_pid_task(pid, type, p) {
52939+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
52940+ p->acl != current->acl) {
52941+ ret = 1;
52942+ goto out;
52943+ }
52944+ } while_each_pid_task(pid, type, p);
52945+out:
52946+ read_unlock(&tasklist_lock);
52947+
52948+ return ret;
52949+}
52950+
58c5fc13
MT
52951+void
52952+gr_copy_label(struct task_struct *tsk)
52953+{
52954+ tsk->signal->used_accept = 0;
52955+ tsk->acl_sp_role = 0;
52956+ tsk->acl_role_id = current->acl_role_id;
52957+ tsk->acl = current->acl;
52958+ tsk->role = current->role;
52959+ tsk->signal->curr_ip = current->signal->curr_ip;
bc901d79 52960+ tsk->signal->saved_ip = current->signal->saved_ip;
58c5fc13
MT
52961+ if (current->exec_file)
52962+ get_file(current->exec_file);
52963+ tsk->exec_file = current->exec_file;
52964+ tsk->is_writable = current->is_writable;
bc901d79 52965+ if (unlikely(current->signal->used_accept)) {
58c5fc13 52966+ current->signal->curr_ip = 0;
bc901d79
MT
52967+ current->signal->saved_ip = 0;
52968+ }
58c5fc13
MT
52969+
52970+ return;
52971+}
52972+
52973+static void
52974+gr_set_proc_res(struct task_struct *task)
52975+{
52976+ struct acl_subject_label *proc;
52977+ unsigned short i;
52978+
52979+ proc = task->acl;
52980+
52981+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
52982+ return;
52983+
52984+ for (i = 0; i < RLIM_NLIMITS; i++) {
52985+ if (!(proc->resmask & (1 << i)))
52986+ continue;
52987+
52988+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
52989+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
52990+ }
52991+
52992+ return;
52993+}
52994+
66a7e928
MT
52995+extern int __gr_process_user_ban(struct user_struct *user);
52996+
58c5fc13
MT
52997+int
52998+gr_check_user_change(int real, int effective, int fs)
52999+{
53000+ unsigned int i;
53001+ __u16 num;
53002+ uid_t *uidlist;
53003+ int curuid;
53004+ int realok = 0;
53005+ int effectiveok = 0;
53006+ int fsok = 0;
53007+
66a7e928
MT
53008+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
53009+ struct user_struct *user;
53010+
53011+ if (real == -1)
53012+ goto skipit;
53013+
53014+ user = find_user(real);
53015+ if (user == NULL)
53016+ goto skipit;
53017+
53018+ if (__gr_process_user_ban(user)) {
53019+ /* for find_user */
53020+ free_uid(user);
53021+ return 1;
53022+ }
53023+
53024+ /* for find_user */
53025+ free_uid(user);
53026+
53027+skipit:
53028+#endif
53029+
58c5fc13
MT
53030+ if (unlikely(!(gr_status & GR_READY)))
53031+ return 0;
53032+
53033+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
53034+ gr_log_learn_id_change('u', real, effective, fs);
53035+
53036+ num = current->acl->user_trans_num;
53037+ uidlist = current->acl->user_transitions;
53038+
53039+ if (uidlist == NULL)
53040+ return 0;
53041+
53042+ if (real == -1)
53043+ realok = 1;
53044+ if (effective == -1)
53045+ effectiveok = 1;
53046+ if (fs == -1)
53047+ fsok = 1;
53048+
53049+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
53050+ for (i = 0; i < num; i++) {
53051+ curuid = (int)uidlist[i];
53052+ if (real == curuid)
53053+ realok = 1;
53054+ if (effective == curuid)
53055+ effectiveok = 1;
53056+ if (fs == curuid)
53057+ fsok = 1;
53058+ }
53059+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
53060+ for (i = 0; i < num; i++) {
53061+ curuid = (int)uidlist[i];
53062+ if (real == curuid)
53063+ break;
53064+ if (effective == curuid)
53065+ break;
53066+ if (fs == curuid)
53067+ break;
53068+ }
53069+ /* not in deny list */
53070+ if (i == num) {
53071+ realok = 1;
53072+ effectiveok = 1;
53073+ fsok = 1;
53074+ }
53075+ }
53076+
53077+ if (realok && effectiveok && fsok)
53078+ return 0;
53079+ else {
53080+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
53081+ return 1;
53082+ }
53083+}
53084+
53085+int
53086+gr_check_group_change(int real, int effective, int fs)
53087+{
53088+ unsigned int i;
53089+ __u16 num;
53090+ gid_t *gidlist;
53091+ int curgid;
53092+ int realok = 0;
53093+ int effectiveok = 0;
53094+ int fsok = 0;
53095+
53096+ if (unlikely(!(gr_status & GR_READY)))
53097+ return 0;
53098+
53099+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
53100+ gr_log_learn_id_change('g', real, effective, fs);
53101+
53102+ num = current->acl->group_trans_num;
53103+ gidlist = current->acl->group_transitions;
53104+
53105+ if (gidlist == NULL)
53106+ return 0;
53107+
53108+ if (real == -1)
53109+ realok = 1;
53110+ if (effective == -1)
53111+ effectiveok = 1;
53112+ if (fs == -1)
53113+ fsok = 1;
53114+
53115+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
53116+ for (i = 0; i < num; i++) {
53117+ curgid = (int)gidlist[i];
53118+ if (real == curgid)
53119+ realok = 1;
53120+ if (effective == curgid)
53121+ effectiveok = 1;
53122+ if (fs == curgid)
53123+ fsok = 1;
53124+ }
53125+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
53126+ for (i = 0; i < num; i++) {
53127+ curgid = (int)gidlist[i];
53128+ if (real == curgid)
53129+ break;
53130+ if (effective == curgid)
53131+ break;
53132+ if (fs == curgid)
53133+ break;
53134+ }
53135+ /* not in deny list */
53136+ if (i == num) {
53137+ realok = 1;
53138+ effectiveok = 1;
53139+ fsok = 1;
53140+ }
53141+ }
53142+
53143+ if (realok && effectiveok && fsok)
53144+ return 0;
53145+ else {
53146+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
53147+ return 1;
53148+ }
53149+}
53150+
53151+void
53152+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
53153+{
53154+ struct acl_role_label *role = task->role;
53155+ struct acl_subject_label *subj = NULL;
53156+ struct acl_object_label *obj;
53157+ struct file *filp;
53158+
53159+ if (unlikely(!(gr_status & GR_READY)))
53160+ return;
53161+
53162+ filp = task->exec_file;
53163+
53164+ /* kernel process, we'll give them the kernel role */
53165+ if (unlikely(!filp)) {
53166+ task->role = kernel_role;
53167+ task->acl = kernel_role->root_label;
53168+ return;
53169+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
53170+ role = lookup_acl_role_label(task, uid, gid);
53171+
53172+ /* perform subject lookup in possibly new role
53173+ we can use this result below in the case where role == task->role
53174+ */
53175+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
53176+
53177+ /* if we changed uid/gid, but result in the same role
53178+ and are using inheritance, don't lose the inherited subject
53179+ if current subject is other than what normal lookup
53180+ would result in, we arrived via inheritance, don't
53181+ lose subject
53182+ */
53183+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
53184+ (subj == task->acl)))
53185+ task->acl = subj;
53186+
53187+ task->role = role;
53188+
53189+ task->is_writable = 0;
53190+
53191+ /* ignore additional mmap checks for processes that are writable
53192+ by the default ACL */
53193+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53194+ if (unlikely(obj->mode & GR_WRITE))
53195+ task->is_writable = 1;
53196+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
53197+ if (unlikely(obj->mode & GR_WRITE))
53198+ task->is_writable = 1;
53199+
16454cff 53200+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58c5fc13
MT
53201+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
53202+#endif
53203+
53204+ gr_set_proc_res(task);
53205+
53206+ return;
53207+}
53208+
53209+int
53210+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
53211+ const int unsafe_share)
53212+{
53213+ struct task_struct *task = current;
53214+ struct acl_subject_label *newacl;
53215+ struct acl_object_label *obj;
53216+ __u32 retmode;
53217+
53218+ if (unlikely(!(gr_status & GR_READY)))
53219+ return 0;
53220+
53221+ newacl = chk_subj_label(dentry, mnt, task->role);
53222+
53223+ task_lock(task);
ae4e228f
MT
53224+ if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
53225+ !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
58c5fc13
MT
53226+ !(task->role->roletype & GR_ROLE_GOD) &&
53227+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
ae4e228f 53228+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
58c5fc13 53229+ task_unlock(task);
ae4e228f
MT
53230+ if (unsafe_share)
53231+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
53232+ else
53233+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
58c5fc13
MT
53234+ return -EACCES;
53235+ }
53236+ task_unlock(task);
53237+
53238+ obj = chk_obj_label(dentry, mnt, task->acl);
53239+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
53240+
53241+ if (!(task->acl->mode & GR_INHERITLEARN) &&
53242+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
53243+ if (obj->nested)
53244+ task->acl = obj->nested;
53245+ else
53246+ task->acl = newacl;
53247+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
53248+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
53249+
53250+ task->is_writable = 0;
53251+
53252+ /* ignore additional mmap checks for processes that are writable
53253+ by the default ACL */
53254+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
53255+ if (unlikely(obj->mode & GR_WRITE))
53256+ task->is_writable = 1;
53257+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
53258+ if (unlikely(obj->mode & GR_WRITE))
53259+ task->is_writable = 1;
53260+
53261+ gr_set_proc_res(task);
53262+
16454cff 53263+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58c5fc13
MT
53264+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
53265+#endif
53266+ return 0;
53267+}
53268+
53269+/* always called with valid inodev ptr */
53270+static void
53271+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
53272+{
53273+ struct acl_object_label *matchpo;
53274+ struct acl_subject_label *matchps;
53275+ struct acl_subject_label *subj;
53276+ struct acl_role_label *role;
ae4e228f 53277+ unsigned int x;
58c5fc13 53278+
ae4e228f 53279+ FOR_EACH_ROLE_START(role)
58c5fc13
MT
53280+ FOR_EACH_SUBJECT_START(role, subj, x)
53281+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
53282+ matchpo->mode |= GR_DELETED;
53283+ FOR_EACH_SUBJECT_END(subj,x)
53284+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
53285+ if (subj->inode == ino && subj->device == dev)
53286+ subj->mode |= GR_DELETED;
53287+ FOR_EACH_NESTED_SUBJECT_END(subj)
53288+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
53289+ matchps->mode |= GR_DELETED;
ae4e228f 53290+ FOR_EACH_ROLE_END(role)
58c5fc13
MT
53291+
53292+ inodev->nentry->deleted = 1;
53293+
53294+ return;
53295+}
53296+
53297+void
53298+gr_handle_delete(const ino_t ino, const dev_t dev)
53299+{
53300+ struct inodev_entry *inodev;
53301+
53302+ if (unlikely(!(gr_status & GR_READY)))
53303+ return;
53304+
53305+ write_lock(&gr_inode_lock);
53306+ inodev = lookup_inodev_entry(ino, dev);
53307+ if (inodev != NULL)
53308+ do_handle_delete(inodev, ino, dev);
53309+ write_unlock(&gr_inode_lock);
53310+
53311+ return;
53312+}
53313+
53314+static void
53315+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
53316+ const ino_t newinode, const dev_t newdevice,
53317+ struct acl_subject_label *subj)
53318+{
53319+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
53320+ struct acl_object_label *match;
53321+
53322+ match = subj->obj_hash[index];
53323+
53324+ while (match && (match->inode != oldinode ||
53325+ match->device != olddevice ||
53326+ !(match->mode & GR_DELETED)))
53327+ match = match->next;
53328+
53329+ if (match && (match->inode == oldinode)
53330+ && (match->device == olddevice)
53331+ && (match->mode & GR_DELETED)) {
53332+ if (match->prev == NULL) {
53333+ subj->obj_hash[index] = match->next;
53334+ if (match->next != NULL)
53335+ match->next->prev = NULL;
53336+ } else {
53337+ match->prev->next = match->next;
53338+ if (match->next != NULL)
53339+ match->next->prev = match->prev;
53340+ }
53341+ match->prev = NULL;
53342+ match->next = NULL;
53343+ match->inode = newinode;
53344+ match->device = newdevice;
53345+ match->mode &= ~GR_DELETED;
53346+
53347+ insert_acl_obj_label(match, subj);
53348+ }
53349+
53350+ return;
53351+}
53352+
53353+static void
53354+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
53355+ const ino_t newinode, const dev_t newdevice,
53356+ struct acl_role_label *role)
53357+{
53358+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
53359+ struct acl_subject_label *match;
53360+
53361+ match = role->subj_hash[index];
53362+
53363+ while (match && (match->inode != oldinode ||
53364+ match->device != olddevice ||
53365+ !(match->mode & GR_DELETED)))
53366+ match = match->next;
53367+
53368+ if (match && (match->inode == oldinode)
53369+ && (match->device == olddevice)
53370+ && (match->mode & GR_DELETED)) {
53371+ if (match->prev == NULL) {
53372+ role->subj_hash[index] = match->next;
53373+ if (match->next != NULL)
53374+ match->next->prev = NULL;
53375+ } else {
53376+ match->prev->next = match->next;
53377+ if (match->next != NULL)
53378+ match->next->prev = match->prev;
53379+ }
53380+ match->prev = NULL;
53381+ match->next = NULL;
53382+ match->inode = newinode;
53383+ match->device = newdevice;
53384+ match->mode &= ~GR_DELETED;
53385+
53386+ insert_acl_subj_label(match, role);
53387+ }
53388+
53389+ return;
53390+}
53391+
53392+static void
53393+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
53394+ const ino_t newinode, const dev_t newdevice)
53395+{
53396+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
53397+ struct inodev_entry *match;
53398+
53399+ match = inodev_set.i_hash[index];
53400+
53401+ while (match && (match->nentry->inode != oldinode ||
53402+ match->nentry->device != olddevice || !match->nentry->deleted))
53403+ match = match->next;
53404+
53405+ if (match && (match->nentry->inode == oldinode)
53406+ && (match->nentry->device == olddevice) &&
53407+ match->nentry->deleted) {
53408+ if (match->prev == NULL) {
53409+ inodev_set.i_hash[index] = match->next;
53410+ if (match->next != NULL)
53411+ match->next->prev = NULL;
53412+ } else {
53413+ match->prev->next = match->next;
53414+ if (match->next != NULL)
53415+ match->next->prev = match->prev;
53416+ }
53417+ match->prev = NULL;
53418+ match->next = NULL;
53419+ match->nentry->inode = newinode;
53420+ match->nentry->device = newdevice;
53421+ match->nentry->deleted = 0;
53422+
53423+ insert_inodev_entry(match);
53424+ }
53425+
53426+ return;
53427+}
53428+
53429+static void
6e9df6a3 53430+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
58c5fc13
MT
53431+{
53432+ struct acl_subject_label *subj;
53433+ struct acl_role_label *role;
ae4e228f 53434+ unsigned int x;
6e9df6a3 53435+
ae4e228f 53436+ FOR_EACH_ROLE_START(role)
16454cff 53437+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
58c5fc13
MT
53438+
53439+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
16454cff
MT
53440+ if ((subj->inode == ino) && (subj->device == dev)) {
53441+ subj->inode = ino;
53442+ subj->device = dev;
58c5fc13
MT
53443+ }
53444+ FOR_EACH_NESTED_SUBJECT_END(subj)
53445+ FOR_EACH_SUBJECT_START(role, subj, x)
53446+ update_acl_obj_label(matchn->inode, matchn->device,
16454cff 53447+ ino, dev, subj);
58c5fc13 53448+ FOR_EACH_SUBJECT_END(subj,x)
ae4e228f 53449+ FOR_EACH_ROLE_END(role)
58c5fc13 53450+
16454cff 53451+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
58c5fc13
MT
53452+
53453+ return;
53454+}
53455+
6e9df6a3
MT
53456+static void
53457+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
53458+ const struct vfsmount *mnt)
53459+{
53460+ ino_t ino = dentry->d_inode->i_ino;
53461+ dev_t dev = __get_dev(dentry);
53462+
53463+ __do_handle_create(matchn, ino, dev);
53464+
53465+ return;
53466+}
53467+
58c5fc13
MT
53468+void
53469+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
53470+{
53471+ struct name_entry *matchn;
53472+
53473+ if (unlikely(!(gr_status & GR_READY)))
53474+ return;
53475+
53476+ preempt_disable();
53477+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
53478+
53479+ if (unlikely((unsigned long)matchn)) {
53480+ write_lock(&gr_inode_lock);
53481+ do_handle_create(matchn, dentry, mnt);
53482+ write_unlock(&gr_inode_lock);
53483+ }
53484+ preempt_enable();
53485+
53486+ return;
53487+}
53488+
53489+void
6e9df6a3
MT
53490+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
53491+{
53492+ struct name_entry *matchn;
53493+
53494+ if (unlikely(!(gr_status & GR_READY)))
53495+ return;
53496+
53497+ preempt_disable();
53498+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
53499+
53500+ if (unlikely((unsigned long)matchn)) {
53501+ write_lock(&gr_inode_lock);
53502+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
53503+ write_unlock(&gr_inode_lock);
53504+ }
53505+ preempt_enable();
53506+
53507+ return;
53508+}
53509+
53510+void
58c5fc13
MT
53511+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
53512+ struct dentry *old_dentry,
53513+ struct dentry *new_dentry,
53514+ struct vfsmount *mnt, const __u8 replace)
53515+{
53516+ struct name_entry *matchn;
53517+ struct inodev_entry *inodev;
6e9df6a3 53518+ struct inode *inode = new_dentry->d_inode;
16454cff
MT
53519+ ino_t old_ino = old_dentry->d_inode->i_ino;
53520+ dev_t old_dev = __get_dev(old_dentry);
58c5fc13
MT
53521+
53522+ /* vfs_rename swaps the name and parent link for old_dentry and
53523+ new_dentry
53524+ at this point, old_dentry has the new name, parent link, and inode
53525+ for the renamed file
53526+ if a file is being replaced by a rename, new_dentry has the inode
53527+ and name for the replaced file
53528+ */
53529+
53530+ if (unlikely(!(gr_status & GR_READY)))
53531+ return;
53532+
53533+ preempt_disable();
53534+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
53535+
53536+ /* we wouldn't have to check d_inode if it weren't for
53537+ NFS silly-renaming
53538+ */
53539+
53540+ write_lock(&gr_inode_lock);
6e9df6a3
MT
53541+ if (unlikely(replace && inode)) {
53542+ ino_t new_ino = inode->i_ino;
16454cff
MT
53543+ dev_t new_dev = __get_dev(new_dentry);
53544+
53545+ inodev = lookup_inodev_entry(new_ino, new_dev);
6e9df6a3 53546+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
16454cff 53547+ do_handle_delete(inodev, new_ino, new_dev);
58c5fc13
MT
53548+ }
53549+
16454cff 53550+ inodev = lookup_inodev_entry(old_ino, old_dev);
6e9df6a3 53551+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
16454cff 53552+ do_handle_delete(inodev, old_ino, old_dev);
58c5fc13
MT
53553+
53554+ if (unlikely((unsigned long)matchn))
53555+ do_handle_create(matchn, old_dentry, mnt);
53556+
53557+ write_unlock(&gr_inode_lock);
53558+ preempt_enable();
53559+
53560+ return;
53561+}
53562+
53563+static int
53564+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
53565+ unsigned char **sum)
53566+{
53567+ struct acl_role_label *r;
53568+ struct role_allowed_ip *ipp;
53569+ struct role_transition *trans;
53570+ unsigned int i;
53571+ int found = 0;
bc901d79
MT
53572+ u32 curr_ip = current->signal->curr_ip;
53573+
53574+ current->signal->saved_ip = curr_ip;
58c5fc13
MT
53575+
53576+ /* check transition table */
53577+
53578+ for (trans = current->role->transitions; trans; trans = trans->next) {
53579+ if (!strcmp(rolename, trans->rolename)) {
53580+ found = 1;
53581+ break;
53582+ }
53583+ }
53584+
53585+ if (!found)
53586+ return 0;
53587+
53588+ /* handle special roles that do not require authentication
53589+ and check ip */
53590+
ae4e228f 53591+ FOR_EACH_ROLE_START(r)
58c5fc13
MT
53592+ if (!strcmp(rolename, r->rolename) &&
53593+ (r->roletype & GR_ROLE_SPECIAL)) {
53594+ found = 0;
53595+ if (r->allowed_ips != NULL) {
53596+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
bc901d79 53597+ if ((ntohl(curr_ip) & ipp->netmask) ==
58c5fc13
MT
53598+ (ntohl(ipp->addr) & ipp->netmask))
53599+ found = 1;
53600+ }
53601+ } else
53602+ found = 2;
53603+ if (!found)
53604+ return 0;
53605+
53606+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
53607+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
53608+ *salt = NULL;
53609+ *sum = NULL;
53610+ return 1;
53611+ }
53612+ }
ae4e228f 53613+ FOR_EACH_ROLE_END(r)
58c5fc13
MT
53614+
53615+ for (i = 0; i < num_sprole_pws; i++) {
53616+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
53617+ *salt = acl_special_roles[i]->salt;
53618+ *sum = acl_special_roles[i]->sum;
53619+ return 1;
53620+ }
53621+ }
53622+
53623+ return 0;
53624+}
53625+
53626+static void
53627+assign_special_role(char *rolename)
53628+{
53629+ struct acl_object_label *obj;
53630+ struct acl_role_label *r;
53631+ struct acl_role_label *assigned = NULL;
53632+ struct task_struct *tsk;
53633+ struct file *filp;
58c5fc13 53634+
ae4e228f 53635+ FOR_EACH_ROLE_START(r)
58c5fc13 53636+ if (!strcmp(rolename, r->rolename) &&
ae4e228f 53637+ (r->roletype & GR_ROLE_SPECIAL)) {
58c5fc13 53638+ assigned = r;
ae4e228f
MT
53639+ break;
53640+ }
53641+ FOR_EACH_ROLE_END(r)
58c5fc13
MT
53642+
53643+ if (!assigned)
53644+ return;
53645+
53646+ read_lock(&tasklist_lock);
53647+ read_lock(&grsec_exec_file_lock);
53648+
6892158b 53649+ tsk = current->real_parent;
58c5fc13
MT
53650+ if (tsk == NULL)
53651+ goto out_unlock;
53652+
53653+ filp = tsk->exec_file;
53654+ if (filp == NULL)
53655+ goto out_unlock;
53656+
53657+ tsk->is_writable = 0;
53658+
53659+ tsk->acl_sp_role = 1;
53660+ tsk->acl_role_id = ++acl_sp_role_value;
53661+ tsk->role = assigned;
53662+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
53663+
53664+ /* ignore additional mmap checks for processes that are writable
53665+ by the default ACL */
53666+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53667+ if (unlikely(obj->mode & GR_WRITE))
53668+ tsk->is_writable = 1;
53669+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
53670+ if (unlikely(obj->mode & GR_WRITE))
53671+ tsk->is_writable = 1;
53672+
16454cff 53673+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58c5fc13
MT
53674+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
53675+#endif
53676+
53677+out_unlock:
53678+ read_unlock(&grsec_exec_file_lock);
53679+ read_unlock(&tasklist_lock);
53680+ return;
53681+}
53682+
53683+int gr_check_secure_terminal(struct task_struct *task)
53684+{
53685+ struct task_struct *p, *p2, *p3;
53686+ struct files_struct *files;
53687+ struct fdtable *fdt;
53688+ struct file *our_file = NULL, *file;
53689+ int i;
53690+
53691+ if (task->signal->tty == NULL)
53692+ return 1;
53693+
53694+ files = get_files_struct(task);
53695+ if (files != NULL) {
53696+ rcu_read_lock();
53697+ fdt = files_fdtable(files);
53698+ for (i=0; i < fdt->max_fds; i++) {
53699+ file = fcheck_files(files, i);
53700+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
53701+ get_file(file);
53702+ our_file = file;
53703+ }
53704+ }
53705+ rcu_read_unlock();
53706+ put_files_struct(files);
53707+ }
53708+
53709+ if (our_file == NULL)
53710+ return 1;
53711+
53712+ read_lock(&tasklist_lock);
53713+ do_each_thread(p2, p) {
53714+ files = get_files_struct(p);
53715+ if (files == NULL ||
53716+ (p->signal && p->signal->tty == task->signal->tty)) {
53717+ if (files != NULL)
53718+ put_files_struct(files);
53719+ continue;
53720+ }
53721+ rcu_read_lock();
53722+ fdt = files_fdtable(files);
53723+ for (i=0; i < fdt->max_fds; i++) {
53724+ file = fcheck_files(files, i);
53725+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
53726+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
53727+ p3 = task;
53728+ while (p3->pid > 0) {
53729+ if (p3 == p)
53730+ break;
6892158b 53731+ p3 = p3->real_parent;
58c5fc13
MT
53732+ }
53733+ if (p3 == p)
53734+ break;
53735+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
53736+ gr_handle_alertkill(p);
53737+ rcu_read_unlock();
53738+ put_files_struct(files);
53739+ read_unlock(&tasklist_lock);
53740+ fput(our_file);
53741+ return 0;
53742+ }
53743+ }
53744+ rcu_read_unlock();
53745+ put_files_struct(files);
53746+ } while_each_thread(p2, p);
53747+ read_unlock(&tasklist_lock);
53748+
53749+ fput(our_file);
53750+ return 1;
53751+}
53752+
53753+ssize_t
53754+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
53755+{
53756+ struct gr_arg_wrapper uwrap;
ae4e228f
MT
53757+ unsigned char *sprole_salt = NULL;
53758+ unsigned char *sprole_sum = NULL;
58c5fc13
MT
53759+ int error = sizeof (struct gr_arg_wrapper);
53760+ int error2 = 0;
53761+
bc901d79 53762+ mutex_lock(&gr_dev_mutex);
58c5fc13
MT
53763+
53764+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
53765+ error = -EPERM;
53766+ goto out;
53767+ }
53768+
53769+ if (count != sizeof (struct gr_arg_wrapper)) {
53770+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
53771+ error = -EINVAL;
53772+ goto out;
53773+ }
53774+
53775+
53776+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
53777+ gr_auth_expires = 0;
53778+ gr_auth_attempts = 0;
53779+ }
53780+
53781+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
53782+ error = -EFAULT;
53783+ goto out;
53784+ }
53785+
53786+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
53787+ error = -EINVAL;
53788+ goto out;
53789+ }
53790+
53791+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
53792+ error = -EFAULT;
53793+ goto out;
53794+ }
53795+
53796+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
53797+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
53798+ time_after(gr_auth_expires, get_seconds())) {
53799+ error = -EBUSY;
53800+ goto out;
53801+ }
53802+
53803+ /* if non-root trying to do anything other than use a special role,
53804+ do not attempt authentication, do not count towards authentication
53805+ locking
53806+ */
53807+
53808+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
53809+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
53810+ current_uid()) {
53811+ error = -EPERM;
53812+ goto out;
53813+ }
53814+
53815+ /* ensure pw and special role name are null terminated */
53816+
53817+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
53818+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
53819+
53820+ /* Okay.
53821+ * We have our enough of the argument structure..(we have yet
53822+ * to copy_from_user the tables themselves) . Copy the tables
53823+ * only if we need them, i.e. for loading operations. */
53824+
53825+ switch (gr_usermode->mode) {
53826+ case GR_STATUS:
53827+ if (gr_status & GR_READY) {
53828+ error = 1;
53829+ if (!gr_check_secure_terminal(current))
53830+ error = 3;
53831+ } else
53832+ error = 2;
53833+ goto out;
53834+ case GR_SHUTDOWN:
53835+ if ((gr_status & GR_READY)
53836+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
ae4e228f 53837+ pax_open_kernel();
58c5fc13 53838+ gr_status &= ~GR_READY;
ae4e228f
MT
53839+ pax_close_kernel();
53840+
58c5fc13
MT
53841+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
53842+ free_variables();
53843+ memset(gr_usermode, 0, sizeof (struct gr_arg));
53844+ memset(gr_system_salt, 0, GR_SALT_LEN);
53845+ memset(gr_system_sum, 0, GR_SHA_LEN);
53846+ } else if (gr_status & GR_READY) {
53847+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
53848+ error = -EPERM;
53849+ } else {
53850+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
53851+ error = -EAGAIN;
53852+ }
53853+ break;
53854+ case GR_ENABLE:
53855+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
53856+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
53857+ else {
53858+ if (gr_status & GR_READY)
53859+ error = -EAGAIN;
53860+ else
53861+ error = error2;
53862+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
53863+ }
53864+ break;
53865+ case GR_RELOAD:
53866+ if (!(gr_status & GR_READY)) {
53867+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
53868+ error = -EAGAIN;
53869+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
c52201e0 53870+ preempt_disable();
58c5fc13 53871+
ae4e228f 53872+ pax_open_kernel();
58c5fc13 53873+ gr_status &= ~GR_READY;
ae4e228f
MT
53874+ pax_close_kernel();
53875+
58c5fc13
MT
53876+ free_variables();
53877+ if (!(error2 = gracl_init(gr_usermode))) {
c52201e0 53878+ preempt_enable();
58c5fc13
MT
53879+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
53880+ } else {
c52201e0 53881+ preempt_enable();
58c5fc13
MT
53882+ error = error2;
53883+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
53884+ }
53885+ } else {
53886+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
53887+ error = -EPERM;
53888+ }
53889+ break;
53890+ case GR_SEGVMOD:
53891+ if (unlikely(!(gr_status & GR_READY))) {
53892+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
53893+ error = -EAGAIN;
53894+ break;
53895+ }
53896+
53897+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53898+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
53899+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
53900+ struct acl_subject_label *segvacl;
53901+ segvacl =
53902+ lookup_acl_subj_label(gr_usermode->segv_inode,
53903+ gr_usermode->segv_device,
53904+ current->role);
53905+ if (segvacl) {
53906+ segvacl->crashes = 0;
53907+ segvacl->expires = 0;
53908+ }
53909+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
53910+ gr_remove_uid(gr_usermode->segv_uid);
53911+ }
53912+ } else {
53913+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
53914+ error = -EPERM;
53915+ }
53916+ break;
53917+ case GR_SPROLE:
53918+ case GR_SPROLEPAM:
53919+ if (unlikely(!(gr_status & GR_READY))) {
53920+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
53921+ error = -EAGAIN;
53922+ break;
53923+ }
53924+
53925+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
53926+ current->role->expires = 0;
53927+ current->role->auth_attempts = 0;
53928+ }
53929+
53930+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
53931+ time_after(current->role->expires, get_seconds())) {
53932+ error = -EBUSY;
53933+ goto out;
53934+ }
53935+
53936+ if (lookup_special_role_auth
53937+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
53938+ && ((!sprole_salt && !sprole_sum)
53939+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
53940+ char *p = "";
53941+ assign_special_role(gr_usermode->sp_role);
53942+ read_lock(&tasklist_lock);
6892158b
MT
53943+ if (current->real_parent)
53944+ p = current->real_parent->role->rolename;
58c5fc13
MT
53945+ read_unlock(&tasklist_lock);
53946+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
53947+ p, acl_sp_role_value);
53948+ } else {
53949+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
53950+ error = -EPERM;
53951+ if(!(current->role->auth_attempts++))
53952+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
53953+
53954+ goto out;
53955+ }
53956+ break;
53957+ case GR_UNSPROLE:
53958+ if (unlikely(!(gr_status & GR_READY))) {
53959+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
53960+ error = -EAGAIN;
53961+ break;
53962+ }
53963+
53964+ if (current->role->roletype & GR_ROLE_SPECIAL) {
53965+ char *p = "";
53966+ int i = 0;
53967+
53968+ read_lock(&tasklist_lock);
6892158b
MT
53969+ if (current->real_parent) {
53970+ p = current->real_parent->role->rolename;
53971+ i = current->real_parent->acl_role_id;
58c5fc13
MT
53972+ }
53973+ read_unlock(&tasklist_lock);
53974+
53975+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
53976+ gr_set_acls(1);
53977+ } else {
58c5fc13
MT
53978+ error = -EPERM;
53979+ goto out;
53980+ }
53981+ break;
53982+ default:
53983+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
53984+ error = -EINVAL;
53985+ break;
53986+ }
53987+
53988+ if (error != -EPERM)
53989+ goto out;
53990+
53991+ if(!(gr_auth_attempts++))
53992+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
53993+
53994+ out:
bc901d79 53995+ mutex_unlock(&gr_dev_mutex);
58c5fc13
MT
53996+ return error;
53997+}
53998+
16454cff
MT
53999+/* must be called with
54000+ rcu_read_lock();
54001+ read_lock(&tasklist_lock);
54002+ read_lock(&grsec_exec_file_lock);
54003+*/
54004+int gr_apply_subject_to_task(struct task_struct *task)
54005+{
54006+ struct acl_object_label *obj;
54007+ char *tmpname;
54008+ struct acl_subject_label *tmpsubj;
54009+ struct file *filp;
54010+ struct name_entry *nmatch;
54011+
54012+ filp = task->exec_file;
54013+ if (filp == NULL)
54014+ return 0;
54015+
54016+ /* the following is to apply the correct subject
54017+ on binaries running when the RBAC system
54018+ is enabled, when the binaries have been
54019+ replaced or deleted since their execution
54020+ -----
54021+ when the RBAC system starts, the inode/dev
54022+ from exec_file will be one the RBAC system
54023+ is unaware of. It only knows the inode/dev
54024+ of the present file on disk, or the absence
54025+ of it.
54026+ */
54027+ preempt_disable();
54028+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
54029+
54030+ nmatch = lookup_name_entry(tmpname);
54031+ preempt_enable();
54032+ tmpsubj = NULL;
54033+ if (nmatch) {
54034+ if (nmatch->deleted)
54035+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
54036+ else
54037+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
54038+ if (tmpsubj != NULL)
54039+ task->acl = tmpsubj;
54040+ }
54041+ if (tmpsubj == NULL)
54042+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
54043+ task->role);
54044+ if (task->acl) {
16454cff
MT
54045+ task->is_writable = 0;
54046+ /* ignore additional mmap checks for processes that are writable
54047+ by the default ACL */
54048+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
54049+ if (unlikely(obj->mode & GR_WRITE))
54050+ task->is_writable = 1;
54051+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
54052+ if (unlikely(obj->mode & GR_WRITE))
54053+ task->is_writable = 1;
54054+
54055+ gr_set_proc_res(task);
54056+
54057+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
54058+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
54059+#endif
54060+ } else {
54061+ return 1;
54062+ }
54063+
54064+ return 0;
54065+}
54066+
58c5fc13
MT
54067+int
54068+gr_set_acls(const int type)
54069+{
58c5fc13 54070+ struct task_struct *task, *task2;
58c5fc13
MT
54071+ struct acl_role_label *role = current->role;
54072+ __u16 acl_role_id = current->acl_role_id;
54073+ const struct cred *cred;
16454cff 54074+ int ret;
58c5fc13 54075+
ae4e228f 54076+ rcu_read_lock();
58c5fc13
MT
54077+ read_lock(&tasklist_lock);
54078+ read_lock(&grsec_exec_file_lock);
54079+ do_each_thread(task2, task) {
54080+ /* check to see if we're called from the exit handler,
54081+ if so, only replace ACLs that have inherited the admin
54082+ ACL */
54083+
54084+ if (type && (task->role != role ||
54085+ task->acl_role_id != acl_role_id))
54086+ continue;
54087+
54088+ task->acl_role_id = 0;
54089+ task->acl_sp_role = 0;
54090+
16454cff 54091+ if (task->exec_file) {
58c5fc13
MT
54092+ cred = __task_cred(task);
54093+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
16454cff
MT
54094+ ret = gr_apply_subject_to_task(task);
54095+ if (ret) {
58c5fc13
MT
54096+ read_unlock(&grsec_exec_file_lock);
54097+ read_unlock(&tasklist_lock);
ae4e228f 54098+ rcu_read_unlock();
58c5fc13 54099+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
16454cff 54100+ return ret;
58c5fc13
MT
54101+ }
54102+ } else {
54103+ // it's a kernel process
54104+ task->role = kernel_role;
54105+ task->acl = kernel_role->root_label;
54106+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
54107+ task->acl->mode &= ~GR_PROCFIND;
54108+#endif
54109+ }
54110+ } while_each_thread(task2, task);
54111+ read_unlock(&grsec_exec_file_lock);
54112+ read_unlock(&tasklist_lock);
ae4e228f
MT
54113+ rcu_read_unlock();
54114+
58c5fc13
MT
54115+ return 0;
54116+}
54117+
54118+void
54119+gr_learn_resource(const struct task_struct *task,
54120+ const int res, const unsigned long wanted, const int gt)
54121+{
54122+ struct acl_subject_label *acl;
54123+ const struct cred *cred;
54124+
54125+ if (unlikely((gr_status & GR_READY) &&
54126+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
54127+ goto skip_reslog;
54128+
54129+#ifdef CONFIG_GRKERNSEC_RESLOG
54130+ gr_log_resource(task, res, wanted, gt);
54131+#endif
54132+ skip_reslog:
54133+
54134+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
54135+ return;
54136+
54137+ acl = task->acl;
54138+
54139+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
54140+ !(acl->resmask & (1 << (unsigned short) res))))
54141+ return;
54142+
54143+ if (wanted >= acl->res[res].rlim_cur) {
54144+ unsigned long res_add;
54145+
54146+ res_add = wanted;
54147+ switch (res) {
54148+ case RLIMIT_CPU:
54149+ res_add += GR_RLIM_CPU_BUMP;
54150+ break;
54151+ case RLIMIT_FSIZE:
54152+ res_add += GR_RLIM_FSIZE_BUMP;
54153+ break;
54154+ case RLIMIT_DATA:
54155+ res_add += GR_RLIM_DATA_BUMP;
54156+ break;
54157+ case RLIMIT_STACK:
54158+ res_add += GR_RLIM_STACK_BUMP;
54159+ break;
54160+ case RLIMIT_CORE:
54161+ res_add += GR_RLIM_CORE_BUMP;
54162+ break;
54163+ case RLIMIT_RSS:
54164+ res_add += GR_RLIM_RSS_BUMP;
54165+ break;
54166+ case RLIMIT_NPROC:
54167+ res_add += GR_RLIM_NPROC_BUMP;
54168+ break;
54169+ case RLIMIT_NOFILE:
54170+ res_add += GR_RLIM_NOFILE_BUMP;
54171+ break;
54172+ case RLIMIT_MEMLOCK:
54173+ res_add += GR_RLIM_MEMLOCK_BUMP;
54174+ break;
54175+ case RLIMIT_AS:
54176+ res_add += GR_RLIM_AS_BUMP;
54177+ break;
54178+ case RLIMIT_LOCKS:
54179+ res_add += GR_RLIM_LOCKS_BUMP;
54180+ break;
54181+ case RLIMIT_SIGPENDING:
54182+ res_add += GR_RLIM_SIGPENDING_BUMP;
54183+ break;
54184+ case RLIMIT_MSGQUEUE:
54185+ res_add += GR_RLIM_MSGQUEUE_BUMP;
54186+ break;
54187+ case RLIMIT_NICE:
54188+ res_add += GR_RLIM_NICE_BUMP;
54189+ break;
54190+ case RLIMIT_RTPRIO:
54191+ res_add += GR_RLIM_RTPRIO_BUMP;
54192+ break;
54193+ case RLIMIT_RTTIME:
54194+ res_add += GR_RLIM_RTTIME_BUMP;
54195+ break;
54196+ }
54197+
54198+ acl->res[res].rlim_cur = res_add;
54199+
54200+ if (wanted > acl->res[res].rlim_max)
54201+ acl->res[res].rlim_max = res_add;
54202+
54203+ /* only log the subject filename, since resource logging is supported for
54204+ single-subject learning only */
ae4e228f 54205+ rcu_read_lock();
58c5fc13
MT
54206+ cred = __task_cred(task);
54207+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
54208+ task->role->roletype, cred->uid, cred->gid, acl->filename,
54209+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
bc901d79 54210+ "", (unsigned long) res, &task->signal->saved_ip);
ae4e228f 54211+ rcu_read_unlock();
58c5fc13
MT
54212+ }
54213+
54214+ return;
54215+}
54216+
54217+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
54218+void
54219+pax_set_initial_flags(struct linux_binprm *bprm)
54220+{
54221+ struct task_struct *task = current;
54222+ struct acl_subject_label *proc;
54223+ unsigned long flags;
54224+
54225+ if (unlikely(!(gr_status & GR_READY)))
54226+ return;
54227+
54228+ flags = pax_get_flags(task);
54229+
54230+ proc = task->acl;
54231+
54232+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
54233+ flags &= ~MF_PAX_PAGEEXEC;
54234+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
54235+ flags &= ~MF_PAX_SEGMEXEC;
54236+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
54237+ flags &= ~MF_PAX_RANDMMAP;
54238+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
54239+ flags &= ~MF_PAX_EMUTRAMP;
54240+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
54241+ flags &= ~MF_PAX_MPROTECT;
54242+
54243+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
54244+ flags |= MF_PAX_PAGEEXEC;
54245+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
54246+ flags |= MF_PAX_SEGMEXEC;
54247+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
54248+ flags |= MF_PAX_RANDMMAP;
54249+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
54250+ flags |= MF_PAX_EMUTRAMP;
54251+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
54252+ flags |= MF_PAX_MPROTECT;
54253+
54254+ pax_set_flags(task, flags);
54255+
54256+ return;
54257+}
54258+#endif
54259+
54260+#ifdef CONFIG_SYSCTL
54261+/* Eric Biederman likes breaking userland ABI and every inode-based security
54262+ system to save 35kb of memory */
54263+
54264+/* we modify the passed in filename, but adjust it back before returning */
54265+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
54266+{
54267+ struct name_entry *nmatch;
54268+ char *p, *lastp = NULL;
54269+ struct acl_object_label *obj = NULL, *tmp;
54270+ struct acl_subject_label *tmpsubj;
54271+ char c = '\0';
54272+
54273+ read_lock(&gr_inode_lock);
54274+
54275+ p = name + len - 1;
54276+ do {
54277+ nmatch = lookup_name_entry(name);
54278+ if (lastp != NULL)
54279+ *lastp = c;
54280+
54281+ if (nmatch == NULL)
54282+ goto next_component;
54283+ tmpsubj = current->acl;
54284+ do {
54285+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
54286+ if (obj != NULL) {
54287+ tmp = obj->globbed;
54288+ while (tmp) {
54289+ if (!glob_match(tmp->filename, name)) {
54290+ obj = tmp;
54291+ goto found_obj;
54292+ }
54293+ tmp = tmp->next;
54294+ }
54295+ goto found_obj;
54296+ }
54297+ } while ((tmpsubj = tmpsubj->parent_subject));
54298+next_component:
54299+ /* end case */
54300+ if (p == name)
54301+ break;
54302+
54303+ while (*p != '/')
54304+ p--;
54305+ if (p == name)
54306+ lastp = p + 1;
54307+ else {
54308+ lastp = p;
54309+ p--;
54310+ }
54311+ c = *lastp;
54312+ *lastp = '\0';
54313+ } while (1);
54314+found_obj:
54315+ read_unlock(&gr_inode_lock);
54316+ /* obj returned will always be non-null */
54317+ return obj;
54318+}
54319+
54320+/* returns 0 when allowing, non-zero on error
54321+ op of 0 is used for readdir, so we don't log the names of hidden files
54322+*/
54323+__u32
54324+gr_handle_sysctl(const struct ctl_table *table, const int op)
54325+{
57199397 54326+ struct ctl_table *tmp;
58c5fc13
MT
54327+ const char *proc_sys = "/proc/sys";
54328+ char *path;
54329+ struct acl_object_label *obj;
54330+ unsigned short len = 0, pos = 0, depth = 0, i;
54331+ __u32 err = 0;
54332+ __u32 mode = 0;
54333+
54334+ if (unlikely(!(gr_status & GR_READY)))
54335+ return 0;
54336+
54337+ /* for now, ignore operations on non-sysctl entries if it's not a
54338+ readdir*/
54339+ if (table->child != NULL && op != 0)
54340+ return 0;
54341+
54342+ mode |= GR_FIND;
54343+ /* it's only a read if it's an entry, read on dirs is for readdir */
54344+ if (op & MAY_READ)
54345+ mode |= GR_READ;
54346+ if (op & MAY_WRITE)
54347+ mode |= GR_WRITE;
54348+
54349+ preempt_disable();
54350+
54351+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
54352+
54353+ /* it's only a read/write if it's an actual entry, not a dir
54354+ (which are opened for readdir)
54355+ */
54356+
54357+ /* convert the requested sysctl entry into a pathname */
54358+
57199397 54359+ for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
58c5fc13
MT
54360+ len += strlen(tmp->procname);
54361+ len++;
54362+ depth++;
54363+ }
54364+
54365+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
54366+ /* deny */
54367+ goto out;
54368+ }
54369+
54370+ memset(path, 0, PAGE_SIZE);
54371+
54372+ memcpy(path, proc_sys, strlen(proc_sys));
54373+
54374+ pos += strlen(proc_sys);
54375+
54376+ for (; depth > 0; depth--) {
54377+ path[pos] = '/';
54378+ pos++;
57199397 54379+ for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
58c5fc13
MT
54380+ if (depth == i) {
54381+ memcpy(path + pos, tmp->procname,
54382+ strlen(tmp->procname));
54383+ pos += strlen(tmp->procname);
54384+ }
54385+ i++;
54386+ }
54387+ }
54388+
54389+ obj = gr_lookup_by_name(path, pos);
54390+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
54391+
54392+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
54393+ ((err & mode) != mode))) {
54394+ __u32 new_mode = mode;
54395+
54396+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
54397+
54398+ err = 0;
54399+ gr_log_learn_sysctl(path, new_mode);
54400+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
54401+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
54402+ err = -ENOENT;
54403+ } else if (!(err & GR_FIND)) {
54404+ err = -ENOENT;
54405+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
54406+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
54407+ path, (mode & GR_READ) ? " reading" : "",
54408+ (mode & GR_WRITE) ? " writing" : "");
54409+ err = -EACCES;
54410+ } else if ((err & mode) != mode) {
54411+ err = -EACCES;
54412+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
54413+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
54414+ path, (mode & GR_READ) ? " reading" : "",
54415+ (mode & GR_WRITE) ? " writing" : "");
54416+ err = 0;
54417+ } else
54418+ err = 0;
54419+
54420+ out:
54421+ preempt_enable();
54422+
54423+ return err;
54424+}
54425+#endif
54426+
54427+int
54428+gr_handle_proc_ptrace(struct task_struct *task)
54429+{
54430+ struct file *filp;
54431+ struct task_struct *tmp = task;
54432+ struct task_struct *curtemp = current;
54433+ __u32 retmode;
54434+
54435+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
54436+ if (unlikely(!(gr_status & GR_READY)))
54437+ return 0;
54438+#endif
54439+
54440+ read_lock(&tasklist_lock);
54441+ read_lock(&grsec_exec_file_lock);
54442+ filp = task->exec_file;
54443+
54444+ while (tmp->pid > 0) {
54445+ if (tmp == curtemp)
54446+ break;
6892158b 54447+ tmp = tmp->real_parent;
58c5fc13
MT
54448+ }
54449+
54450+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
54451+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
54452+ read_unlock(&grsec_exec_file_lock);
54453+ read_unlock(&tasklist_lock);
54454+ return 1;
54455+ }
54456+
54457+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
54458+ if (!(gr_status & GR_READY)) {
54459+ read_unlock(&grsec_exec_file_lock);
54460+ read_unlock(&tasklist_lock);
54461+ return 0;
54462+ }
54463+#endif
54464+
54465+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
54466+ read_unlock(&grsec_exec_file_lock);
54467+ read_unlock(&tasklist_lock);
54468+
54469+ if (retmode & GR_NOPTRACE)
54470+ return 1;
54471+
54472+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
54473+ && (current->acl != task->acl || (current->acl != current->role->root_label
54474+ && current->pid != task->pid)))
54475+ return 1;
54476+
54477+ return 0;
54478+}
54479+
6892158b
MT
54480+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
54481+{
54482+ if (unlikely(!(gr_status & GR_READY)))
54483+ return;
54484+
54485+ if (!(current->role->roletype & GR_ROLE_GOD))
54486+ return;
54487+
54488+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
54489+ p->role->rolename, gr_task_roletype_to_char(p),
54490+ p->acl->filename);
54491+}
54492+
58c5fc13
MT
54493+int
54494+gr_handle_ptrace(struct task_struct *task, const long request)
54495+{
54496+ struct task_struct *tmp = task;
54497+ struct task_struct *curtemp = current;
54498+ __u32 retmode;
54499+
54500+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
54501+ if (unlikely(!(gr_status & GR_READY)))
54502+ return 0;
54503+#endif
54504+
54505+ read_lock(&tasklist_lock);
54506+ while (tmp->pid > 0) {
54507+ if (tmp == curtemp)
54508+ break;
6892158b 54509+ tmp = tmp->real_parent;
58c5fc13
MT
54510+ }
54511+
54512+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
54513+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
54514+ read_unlock(&tasklist_lock);
54515+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
54516+ return 1;
54517+ }
54518+ read_unlock(&tasklist_lock);
54519+
54520+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
54521+ if (!(gr_status & GR_READY))
54522+ return 0;
54523+#endif
54524+
54525+ read_lock(&grsec_exec_file_lock);
54526+ if (unlikely(!task->exec_file)) {
54527+ read_unlock(&grsec_exec_file_lock);
54528+ return 0;
54529+ }
54530+
54531+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
54532+ read_unlock(&grsec_exec_file_lock);
54533+
54534+ if (retmode & GR_NOPTRACE) {
54535+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
54536+ return 1;
54537+ }
54538+
54539+ if (retmode & GR_PTRACERD) {
54540+ switch (request) {
6e9df6a3 54541+ case PTRACE_SEIZE:
58c5fc13
MT
54542+ case PTRACE_POKETEXT:
54543+ case PTRACE_POKEDATA:
54544+ case PTRACE_POKEUSR:
54545+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
54546+ case PTRACE_SETREGS:
54547+ case PTRACE_SETFPREGS:
54548+#endif
54549+#ifdef CONFIG_X86
54550+ case PTRACE_SETFPXREGS:
54551+#endif
54552+#ifdef CONFIG_ALTIVEC
54553+ case PTRACE_SETVRREGS:
54554+#endif
54555+ return 1;
54556+ default:
54557+ return 0;
54558+ }
54559+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
54560+ !(current->role->roletype & GR_ROLE_GOD) &&
54561+ (current->acl != task->acl)) {
54562+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
54563+ return 1;
54564+ }
54565+
54566+ return 0;
54567+}
54568+
54569+static int is_writable_mmap(const struct file *filp)
54570+{
54571+ struct task_struct *task = current;
54572+ struct acl_object_label *obj, *obj2;
54573+
54574+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
71d190be 54575+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
58c5fc13
MT
54576+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
54577+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
54578+ task->role->root_label);
54579+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
54580+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
54581+ return 1;
54582+ }
54583+ }
54584+ return 0;
54585+}
54586+
54587+int
54588+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
54589+{
54590+ __u32 mode;
54591+
54592+ if (unlikely(!file || !(prot & PROT_EXEC)))
54593+ return 1;
54594+
54595+ if (is_writable_mmap(file))
54596+ return 0;
54597+
54598+ mode =
54599+ gr_search_file(file->f_path.dentry,
54600+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
54601+ file->f_path.mnt);
54602+
54603+ if (!gr_tpe_allow(file))
54604+ return 0;
54605+
54606+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
54607+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54608+ return 0;
54609+ } else if (unlikely(!(mode & GR_EXEC))) {
54610+ return 0;
54611+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
54612+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54613+ return 1;
54614+ }
54615+
54616+ return 1;
54617+}
54618+
54619+int
54620+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
54621+{
54622+ __u32 mode;
54623+
54624+ if (unlikely(!file || !(prot & PROT_EXEC)))
54625+ return 1;
54626+
54627+ if (is_writable_mmap(file))
54628+ return 0;
54629+
54630+ mode =
54631+ gr_search_file(file->f_path.dentry,
54632+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
54633+ file->f_path.mnt);
54634+
54635+ if (!gr_tpe_allow(file))
54636+ return 0;
54637+
54638+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
54639+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54640+ return 0;
54641+ } else if (unlikely(!(mode & GR_EXEC))) {
54642+ return 0;
54643+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
54644+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54645+ return 1;
54646+ }
54647+
54648+ return 1;
54649+}
54650+
54651+void
54652+gr_acl_handle_psacct(struct task_struct *task, const long code)
54653+{
54654+ unsigned long runtime;
54655+ unsigned long cputime;
54656+ unsigned int wday, cday;
54657+ __u8 whr, chr;
54658+ __u8 wmin, cmin;
54659+ __u8 wsec, csec;
54660+ struct timespec timeval;
54661+
54662+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
54663+ !(task->acl->mode & GR_PROCACCT)))
54664+ return;
54665+
54666+ do_posix_clock_monotonic_gettime(&timeval);
54667+ runtime = timeval.tv_sec - task->start_time.tv_sec;
54668+ wday = runtime / (3600 * 24);
54669+ runtime -= wday * (3600 * 24);
54670+ whr = runtime / 3600;
54671+ runtime -= whr * 3600;
54672+ wmin = runtime / 60;
54673+ runtime -= wmin * 60;
54674+ wsec = runtime;
54675+
54676+ cputime = (task->utime + task->stime) / HZ;
54677+ cday = cputime / (3600 * 24);
54678+ cputime -= cday * (3600 * 24);
54679+ chr = cputime / 3600;
54680+ cputime -= chr * 3600;
54681+ cmin = cputime / 60;
54682+ cputime -= cmin * 60;
54683+ csec = cputime;
54684+
54685+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
54686+
54687+ return;
54688+}
54689+
54690+void gr_set_kernel_label(struct task_struct *task)
54691+{
54692+ if (gr_status & GR_READY) {
54693+ task->role = kernel_role;
54694+ task->acl = kernel_role->root_label;
54695+ }
54696+ return;
54697+}
54698+
54699+#ifdef CONFIG_TASKSTATS
54700+int gr_is_taskstats_denied(int pid)
54701+{
54702+ struct task_struct *task;
54703+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54704+ const struct cred *cred;
54705+#endif
54706+ int ret = 0;
54707+
54708+ /* restrict taskstats viewing to un-chrooted root users
54709+ who have the 'view' subject flag if the RBAC system is enabled
54710+ */
54711+
df50ba0c 54712+ rcu_read_lock();
58c5fc13
MT
54713+ read_lock(&tasklist_lock);
54714+ task = find_task_by_vpid(pid);
54715+ if (task) {
58c5fc13
MT
54716+#ifdef CONFIG_GRKERNSEC_CHROOT
54717+ if (proc_is_chrooted(task))
54718+ ret = -EACCES;
54719+#endif
54720+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54721+ cred = __task_cred(task);
54722+#ifdef CONFIG_GRKERNSEC_PROC_USER
54723+ if (cred->uid != 0)
54724+ ret = -EACCES;
54725+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54726+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
54727+ ret = -EACCES;
54728+#endif
54729+#endif
54730+ if (gr_status & GR_READY) {
54731+ if (!(task->acl->mode & GR_VIEW))
54732+ ret = -EACCES;
54733+ }
58c5fc13
MT
54734+ } else
54735+ ret = -ENOENT;
54736+
54737+ read_unlock(&tasklist_lock);
df50ba0c 54738+ rcu_read_unlock();
58c5fc13
MT
54739+
54740+ return ret;
54741+}
54742+#endif
54743+
bc901d79
MT
54744+/* AUXV entries are filled via a descendant of search_binary_handler
54745+ after we've already applied the subject for the target
54746+*/
54747+int gr_acl_enable_at_secure(void)
54748+{
54749+ if (unlikely(!(gr_status & GR_READY)))
54750+ return 0;
54751+
54752+ if (current->acl->mode & GR_ATSECURE)
54753+ return 1;
54754+
54755+ return 0;
54756+}
54757+
58c5fc13
MT
54758+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
54759+{
54760+ struct task_struct *task = current;
54761+ struct dentry *dentry = file->f_path.dentry;
54762+ struct vfsmount *mnt = file->f_path.mnt;
54763+ struct acl_object_label *obj, *tmp;
54764+ struct acl_subject_label *subj;
54765+ unsigned int bufsize;
54766+ int is_not_root;
54767+ char *path;
16454cff 54768+ dev_t dev = __get_dev(dentry);
58c5fc13
MT
54769+
54770+ if (unlikely(!(gr_status & GR_READY)))
54771+ return 1;
54772+
54773+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
54774+ return 1;
54775+
54776+ /* ignore Eric Biederman */
54777+ if (IS_PRIVATE(dentry->d_inode))
54778+ return 1;
54779+
54780+ subj = task->acl;
54781+ do {
16454cff 54782+ obj = lookup_acl_obj_label(ino, dev, subj);
58c5fc13
MT
54783+ if (obj != NULL)
54784+ return (obj->mode & GR_FIND) ? 1 : 0;
54785+ } while ((subj = subj->parent_subject));
54786+
54787+ /* this is purely an optimization since we're looking for an object
54788+ for the directory we're doing a readdir on
54789+ if it's possible for any globbed object to match the entry we're
54790+ filling into the directory, then the object we find here will be
54791+ an anchor point with attached globbed objects
54792+ */
54793+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
54794+ if (obj->globbed == NULL)
54795+ return (obj->mode & GR_FIND) ? 1 : 0;
54796+
54797+ is_not_root = ((obj->filename[0] == '/') &&
54798+ (obj->filename[1] == '\0')) ? 0 : 1;
54799+ bufsize = PAGE_SIZE - namelen - is_not_root;
54800+
54801+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
54802+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
54803+ return 1;
54804+
54805+ preempt_disable();
54806+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
54807+ bufsize);
54808+
54809+ bufsize = strlen(path);
54810+
54811+ /* if base is "/", don't append an additional slash */
54812+ if (is_not_root)
54813+ *(path + bufsize) = '/';
54814+ memcpy(path + bufsize + is_not_root, name, namelen);
54815+ *(path + bufsize + namelen + is_not_root) = '\0';
54816+
54817+ tmp = obj->globbed;
54818+ while (tmp) {
54819+ if (!glob_match(tmp->filename, path)) {
54820+ preempt_enable();
54821+ return (tmp->mode & GR_FIND) ? 1 : 0;
54822+ }
54823+ tmp = tmp->next;
54824+ }
54825+ preempt_enable();
54826+ return (obj->mode & GR_FIND) ? 1 : 0;
54827+}
54828+
6892158b
MT
54829+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
54830+EXPORT_SYMBOL(gr_acl_is_enabled);
54831+#endif
58c5fc13
MT
54832+EXPORT_SYMBOL(gr_learn_resource);
54833+EXPORT_SYMBOL(gr_set_kernel_label);
54834+#ifdef CONFIG_SECURITY
54835+EXPORT_SYMBOL(gr_check_user_change);
54836+EXPORT_SYMBOL(gr_check_group_change);
54837+#endif
54838+
fe2de317
MT
54839diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
54840new file mode 100644
54841index 0000000..34fefda
54842--- /dev/null
54843+++ b/grsecurity/gracl_alloc.c
54844@@ -0,0 +1,105 @@
54845+#include <linux/kernel.h>
54846+#include <linux/mm.h>
54847+#include <linux/slab.h>
54848+#include <linux/vmalloc.h>
54849+#include <linux/gracl.h>
54850+#include <linux/grsecurity.h>
54851+
54852+static unsigned long alloc_stack_next = 1;
54853+static unsigned long alloc_stack_size = 1;
54854+static void **alloc_stack;
54855+
54856+static __inline__ int
54857+alloc_pop(void)
54858+{
54859+ if (alloc_stack_next == 1)
54860+ return 0;
54861+
54862+ kfree(alloc_stack[alloc_stack_next - 2]);
54863+
54864+ alloc_stack_next--;
54865+
54866+ return 1;
54867+}
54868+
54869+static __inline__ int
54870+alloc_push(void *buf)
54871+{
54872+ if (alloc_stack_next >= alloc_stack_size)
54873+ return 1;
54874+
54875+ alloc_stack[alloc_stack_next - 1] = buf;
54876+
54877+ alloc_stack_next++;
54878+
54879+ return 0;
54880+}
54881+
54882+void *
54883+acl_alloc(unsigned long len)
54884+{
54885+ void *ret = NULL;
54886+
54887+ if (!len || len > PAGE_SIZE)
54888+ goto out;
54889+
54890+ ret = kmalloc(len, GFP_KERNEL);
54891+
54892+ if (ret) {
54893+ if (alloc_push(ret)) {
54894+ kfree(ret);
54895+ ret = NULL;
54896+ }
54897+ }
54898+
54899+out:
54900+ return ret;
54901+}
54902+
54903+void *
54904+acl_alloc_num(unsigned long num, unsigned long len)
54905+{
54906+ if (!len || (num > (PAGE_SIZE / len)))
54907+ return NULL;
54908+
54909+ return acl_alloc(num * len);
54910+}
54911+
54912+void
54913+acl_free_all(void)
54914+{
54915+ if (gr_acl_is_enabled() || !alloc_stack)
54916+ return;
54917+
54918+ while (alloc_pop()) ;
54919+
54920+ if (alloc_stack) {
54921+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
54922+ kfree(alloc_stack);
54923+ else
54924+ vfree(alloc_stack);
54925+ }
54926+
54927+ alloc_stack = NULL;
54928+ alloc_stack_size = 1;
54929+ alloc_stack_next = 1;
54930+
54931+ return;
54932+}
54933+
54934+int
54935+acl_alloc_stack_init(unsigned long size)
54936+{
54937+ if ((size * sizeof (void *)) <= PAGE_SIZE)
54938+ alloc_stack =
54939+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
54940+ else
54941+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
54942+
54943+ alloc_stack_size = size;
54944+
54945+ if (!alloc_stack)
54946+ return 0;
54947+ else
54948+ return 1;
54949+}
54950diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
54951new file mode 100644
54952index 0000000..955ddfb
54953--- /dev/null
54954+++ b/grsecurity/gracl_cap.c
15a11c5b 54955@@ -0,0 +1,101 @@
58c5fc13
MT
54956+#include <linux/kernel.h>
54957+#include <linux/module.h>
54958+#include <linux/sched.h>
54959+#include <linux/gracl.h>
54960+#include <linux/grsecurity.h>
54961+#include <linux/grinternal.h>
54962+
15a11c5b
MT
54963+extern const char *captab_log[];
54964+extern int captab_log_entries;
58c5fc13
MT
54965+
54966+int
15a11c5b 54967+gr_acl_is_capable(const int cap)
58c5fc13
MT
54968+{
54969+ struct task_struct *task = current;
54970+ const struct cred *cred = current_cred();
54971+ struct acl_subject_label *curracl;
54972+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
df50ba0c 54973+ kernel_cap_t cap_audit = __cap_empty_set;
58c5fc13
MT
54974+
54975+ if (!gr_acl_is_enabled())
54976+ return 1;
54977+
54978+ curracl = task->acl;
54979+
54980+ cap_drop = curracl->cap_lower;
54981+ cap_mask = curracl->cap_mask;
df50ba0c 54982+ cap_audit = curracl->cap_invert_audit;
58c5fc13
MT
54983+
54984+ while ((curracl = curracl->parent_subject)) {
54985+ /* if the cap isn't specified in the current computed mask but is specified in the
54986+ current level subject, and is lowered in the current level subject, then add
54987+ it to the set of dropped capabilities
54988+ otherwise, add the current level subject's mask to the current computed mask
54989+ */
54990+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
54991+ cap_raise(cap_mask, cap);
54992+ if (cap_raised(curracl->cap_lower, cap))
54993+ cap_raise(cap_drop, cap);
df50ba0c
MT
54994+ if (cap_raised(curracl->cap_invert_audit, cap))
54995+ cap_raise(cap_audit, cap);
58c5fc13
MT
54996+ }
54997+ }
54998+
df50ba0c
MT
54999+ if (!cap_raised(cap_drop, cap)) {
55000+ if (cap_raised(cap_audit, cap))
55001+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
58c5fc13 55002+ return 1;
df50ba0c 55003+ }
58c5fc13
MT
55004+
55005+ curracl = task->acl;
55006+
55007+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
55008+ && cap_raised(cred->cap_effective, cap)) {
55009+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
55010+ task->role->roletype, cred->uid,
55011+ cred->gid, task->exec_file ?
55012+ gr_to_filename(task->exec_file->f_path.dentry,
55013+ task->exec_file->f_path.mnt) : curracl->filename,
55014+ curracl->filename, 0UL,
bc901d79 55015+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
58c5fc13
MT
55016+ return 1;
55017+ }
55018+
15a11c5b 55019+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
58c5fc13
MT
55020+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
55021+ return 0;
55022+}
55023+
55024+int
15a11c5b 55025+gr_acl_is_capable_nolog(const int cap)
58c5fc13
MT
55026+{
55027+ struct acl_subject_label *curracl;
55028+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
55029+
55030+ if (!gr_acl_is_enabled())
55031+ return 1;
55032+
55033+ curracl = current->acl;
55034+
55035+ cap_drop = curracl->cap_lower;
55036+ cap_mask = curracl->cap_mask;
55037+
55038+ while ((curracl = curracl->parent_subject)) {
55039+ /* if the cap isn't specified in the current computed mask but is specified in the
55040+ current level subject, and is lowered in the current level subject, then add
55041+ it to the set of dropped capabilities
55042+ otherwise, add the current level subject's mask to the current computed mask
55043+ */
55044+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
55045+ cap_raise(cap_mask, cap);
55046+ if (cap_raised(curracl->cap_lower, cap))
55047+ cap_raise(cap_drop, cap);
55048+ }
55049+ }
55050+
55051+ if (!cap_raised(cap_drop, cap))
55052+ return 1;
55053+
55054+ return 0;
55055+}
55056+
fe2de317
MT
55057diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
55058new file mode 100644
55059index 0000000..4eda5c3
55060--- /dev/null
55061+++ b/grsecurity/gracl_fs.c
6e9df6a3 55062@@ -0,0 +1,433 @@
58c5fc13
MT
55063+#include <linux/kernel.h>
55064+#include <linux/sched.h>
55065+#include <linux/types.h>
55066+#include <linux/fs.h>
55067+#include <linux/file.h>
55068+#include <linux/stat.h>
55069+#include <linux/grsecurity.h>
55070+#include <linux/grinternal.h>
55071+#include <linux/gracl.h>
55072+
55073+__u32
55074+gr_acl_handle_hidden_file(const struct dentry * dentry,
55075+ const struct vfsmount * mnt)
55076+{
55077+ __u32 mode;
55078+
55079+ if (unlikely(!dentry->d_inode))
55080+ return GR_FIND;
55081+
55082+ mode =
55083+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
55084+
55085+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
55086+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
55087+ return mode;
55088+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
55089+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
55090+ return 0;
55091+ } else if (unlikely(!(mode & GR_FIND)))
55092+ return 0;
55093+
55094+ return GR_FIND;
55095+}
55096+
55097+__u32
55098+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
6e9df6a3 55099+ int acc_mode)
58c5fc13
MT
55100+{
55101+ __u32 reqmode = GR_FIND;
55102+ __u32 mode;
55103+
55104+ if (unlikely(!dentry->d_inode))
55105+ return reqmode;
55106+
6e9df6a3 55107+ if (acc_mode & MAY_APPEND)
58c5fc13 55108+ reqmode |= GR_APPEND;
6e9df6a3 55109+ else if (acc_mode & MAY_WRITE)
58c5fc13 55110+ reqmode |= GR_WRITE;
6e9df6a3 55111+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
58c5fc13 55112+ reqmode |= GR_READ;
6e9df6a3 55113+
58c5fc13
MT
55114+ mode =
55115+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
55116+ mnt);
55117+
55118+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
55119+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
55120+ reqmode & GR_READ ? " reading" : "",
55121+ reqmode & GR_WRITE ? " writing" : reqmode &
55122+ GR_APPEND ? " appending" : "");
55123+ return reqmode;
55124+ } else
55125+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
55126+ {
55127+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
55128+ reqmode & GR_READ ? " reading" : "",
55129+ reqmode & GR_WRITE ? " writing" : reqmode &
55130+ GR_APPEND ? " appending" : "");
55131+ return 0;
55132+ } else if (unlikely((mode & reqmode) != reqmode))
55133+ return 0;
55134+
55135+ return reqmode;
55136+}
55137+
55138+__u32
55139+gr_acl_handle_creat(const struct dentry * dentry,
55140+ const struct dentry * p_dentry,
6e9df6a3 55141+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
58c5fc13
MT
55142+ const int imode)
55143+{
55144+ __u32 reqmode = GR_WRITE | GR_CREATE;
55145+ __u32 mode;
55146+
6e9df6a3 55147+ if (acc_mode & MAY_APPEND)
58c5fc13 55148+ reqmode |= GR_APPEND;
6e9df6a3
MT
55149+ // if a directory was required or the directory already exists, then
55150+ // don't count this open as a read
55151+ if ((acc_mode & MAY_READ) &&
55152+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
58c5fc13 55153+ reqmode |= GR_READ;
6e9df6a3 55154+ if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
58c5fc13
MT
55155+ reqmode |= GR_SETID;
55156+
55157+ mode =
55158+ gr_check_create(dentry, p_dentry, p_mnt,
55159+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
55160+
55161+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
55162+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
55163+ reqmode & GR_READ ? " reading" : "",
55164+ reqmode & GR_WRITE ? " writing" : reqmode &
55165+ GR_APPEND ? " appending" : "");
55166+ return reqmode;
55167+ } else
55168+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
55169+ {
55170+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
55171+ reqmode & GR_READ ? " reading" : "",
55172+ reqmode & GR_WRITE ? " writing" : reqmode &
55173+ GR_APPEND ? " appending" : "");
55174+ return 0;
55175+ } else if (unlikely((mode & reqmode) != reqmode))
55176+ return 0;
55177+
55178+ return reqmode;
55179+}
55180+
55181+__u32
55182+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
55183+ const int fmode)
55184+{
55185+ __u32 mode, reqmode = GR_FIND;
55186+
55187+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
55188+ reqmode |= GR_EXEC;
55189+ if (fmode & S_IWOTH)
55190+ reqmode |= GR_WRITE;
55191+ if (fmode & S_IROTH)
55192+ reqmode |= GR_READ;
55193+
55194+ mode =
55195+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
55196+ mnt);
55197+
55198+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
55199+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
55200+ reqmode & GR_READ ? " reading" : "",
55201+ reqmode & GR_WRITE ? " writing" : "",
55202+ reqmode & GR_EXEC ? " executing" : "");
55203+ return reqmode;
55204+ } else
55205+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
55206+ {
55207+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
55208+ reqmode & GR_READ ? " reading" : "",
55209+ reqmode & GR_WRITE ? " writing" : "",
55210+ reqmode & GR_EXEC ? " executing" : "");
55211+ return 0;
55212+ } else if (unlikely((mode & reqmode) != reqmode))
55213+ return 0;
55214+
55215+ return reqmode;
55216+}
55217+
55218+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
55219+{
55220+ __u32 mode;
55221+
55222+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
55223+
55224+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
55225+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
55226+ return mode;
55227+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
55228+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
55229+ return 0;
55230+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
55231+ return 0;
55232+
55233+ return (reqmode);
55234+}
55235+
55236+__u32
55237+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
55238+{
55239+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
55240+}
55241+
55242+__u32
55243+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
55244+{
55245+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
55246+}
55247+
55248+__u32
55249+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
55250+{
55251+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
55252+}
55253+
55254+__u32
55255+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
55256+{
55257+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
55258+}
55259+
55260+__u32
55261+gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
55262+ mode_t mode)
55263+{
55264+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
55265+ return 1;
55266+
55267+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
55268+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
55269+ GR_FCHMOD_ACL_MSG);
55270+ } else {
55271+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
55272+ }
55273+}
55274+
55275+__u32
55276+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
55277+ mode_t mode)
55278+{
55279+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
55280+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
55281+ GR_CHMOD_ACL_MSG);
55282+ } else {
55283+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
55284+ }
55285+}
55286+
55287+__u32
55288+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
55289+{
55290+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
55291+}
55292+
55293+__u32
bc901d79
MT
55294+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
55295+{
55296+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
55297+}
55298+
55299+__u32
58c5fc13
MT
55300+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
55301+{
55302+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
55303+}
55304+
55305+__u32
55306+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
55307+{
55308+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
55309+ GR_UNIXCONNECT_ACL_MSG);
55310+}
55311+
6e9df6a3 55312+/* hardlinks require at minimum create and link permission,
58c5fc13
MT
55313+ any additional privilege required is based on the
55314+ privilege of the file being linked to
55315+*/
55316+__u32
55317+gr_acl_handle_link(const struct dentry * new_dentry,
55318+ const struct dentry * parent_dentry,
55319+ const struct vfsmount * parent_mnt,
55320+ const struct dentry * old_dentry,
55321+ const struct vfsmount * old_mnt, const char *to)
55322+{
55323+ __u32 mode;
55324+ __u32 needmode = GR_CREATE | GR_LINK;
55325+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
55326+
55327+ mode =
55328+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
55329+ old_mnt);
55330+
55331+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
55332+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
55333+ return mode;
55334+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
55335+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
55336+ return 0;
55337+ } else if (unlikely((mode & needmode) != needmode))
55338+ return 0;
55339+
55340+ return 1;
55341+}
55342+
55343+__u32
55344+gr_acl_handle_symlink(const struct dentry * new_dentry,
55345+ const struct dentry * parent_dentry,
55346+ const struct vfsmount * parent_mnt, const char *from)
55347+{
55348+ __u32 needmode = GR_WRITE | GR_CREATE;
55349+ __u32 mode;
55350+
55351+ mode =
55352+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
55353+ GR_CREATE | GR_AUDIT_CREATE |
55354+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
55355+
55356+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
55357+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
55358+ return mode;
55359+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
55360+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
55361+ return 0;
55362+ } else if (unlikely((mode & needmode) != needmode))
55363+ return 0;
55364+
55365+ return (GR_WRITE | GR_CREATE);
55366+}
55367+
55368+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
55369+{
55370+ __u32 mode;
55371+
55372+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
55373+
55374+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
55375+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
55376+ return mode;
55377+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
55378+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
55379+ return 0;
55380+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
55381+ return 0;
55382+
55383+ return (reqmode);
55384+}
55385+
55386+__u32
55387+gr_acl_handle_mknod(const struct dentry * new_dentry,
55388+ const struct dentry * parent_dentry,
55389+ const struct vfsmount * parent_mnt,
55390+ const int mode)
55391+{
55392+ __u32 reqmode = GR_WRITE | GR_CREATE;
55393+ if (unlikely(mode & (S_ISUID | S_ISGID)))
55394+ reqmode |= GR_SETID;
55395+
55396+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
55397+ reqmode, GR_MKNOD_ACL_MSG);
55398+}
55399+
55400+__u32
55401+gr_acl_handle_mkdir(const struct dentry *new_dentry,
55402+ const struct dentry *parent_dentry,
55403+ const struct vfsmount *parent_mnt)
55404+{
55405+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
55406+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
55407+}
55408+
55409+#define RENAME_CHECK_SUCCESS(old, new) \
55410+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
55411+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
55412+
55413+int
55414+gr_acl_handle_rename(struct dentry *new_dentry,
55415+ struct dentry *parent_dentry,
55416+ const struct vfsmount *parent_mnt,
55417+ struct dentry *old_dentry,
55418+ struct inode *old_parent_inode,
55419+ struct vfsmount *old_mnt, const char *newname)
55420+{
55421+ __u32 comp1, comp2;
55422+ int error = 0;
55423+
55424+ if (unlikely(!gr_acl_is_enabled()))
55425+ return 0;
55426+
55427+ if (!new_dentry->d_inode) {
55428+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
55429+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
55430+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
55431+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
55432+ GR_DELETE | GR_AUDIT_DELETE |
55433+ GR_AUDIT_READ | GR_AUDIT_WRITE |
55434+ GR_SUPPRESS, old_mnt);
55435+ } else {
55436+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
55437+ GR_CREATE | GR_DELETE |
55438+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
55439+ GR_AUDIT_READ | GR_AUDIT_WRITE |
55440+ GR_SUPPRESS, parent_mnt);
55441+ comp2 =
55442+ gr_search_file(old_dentry,
55443+ GR_READ | GR_WRITE | GR_AUDIT_READ |
55444+ GR_DELETE | GR_AUDIT_DELETE |
55445+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
55446+ }
55447+
55448+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
55449+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
55450+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
55451+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
55452+ && !(comp2 & GR_SUPPRESS)) {
55453+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
55454+ error = -EACCES;
55455+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
55456+ error = -EACCES;
55457+
55458+ return error;
55459+}
55460+
55461+void
55462+gr_acl_handle_exit(void)
55463+{
55464+ u16 id;
55465+ char *rolename;
55466+ struct file *exec_file;
55467+
16454cff
MT
55468+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
55469+ !(current->role->roletype & GR_ROLE_PERSIST))) {
58c5fc13
MT
55470+ id = current->acl_role_id;
55471+ rolename = current->role->rolename;
55472+ gr_set_acls(1);
55473+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
55474+ }
55475+
55476+ write_lock(&grsec_exec_file_lock);
55477+ exec_file = current->exec_file;
55478+ current->exec_file = NULL;
55479+ write_unlock(&grsec_exec_file_lock);
55480+
55481+ if (exec_file)
55482+ fput(exec_file);
55483+}
55484+
55485+int
55486+gr_acl_handle_procpidmem(const struct task_struct *task)
55487+{
55488+ if (unlikely(!gr_acl_is_enabled()))
55489+ return 0;
55490+
55491+ if (task != current && task->acl->mode & GR_PROTPROCFD)
55492+ return -EACCES;
55493+
55494+ return 0;
55495+}
fe2de317
MT
55496diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
55497new file mode 100644
55498index 0000000..17050ca
55499--- /dev/null
55500+++ b/grsecurity/gracl_ip.c
66a7e928 55501@@ -0,0 +1,381 @@
58c5fc13
MT
55502+#include <linux/kernel.h>
55503+#include <asm/uaccess.h>
55504+#include <asm/errno.h>
55505+#include <net/sock.h>
55506+#include <linux/file.h>
55507+#include <linux/fs.h>
55508+#include <linux/net.h>
55509+#include <linux/in.h>
55510+#include <linux/skbuff.h>
55511+#include <linux/ip.h>
55512+#include <linux/udp.h>
58c5fc13
MT
55513+#include <linux/types.h>
55514+#include <linux/sched.h>
55515+#include <linux/netdevice.h>
55516+#include <linux/inetdevice.h>
55517+#include <linux/gracl.h>
55518+#include <linux/grsecurity.h>
55519+#include <linux/grinternal.h>
55520+
55521+#define GR_BIND 0x01
55522+#define GR_CONNECT 0x02
55523+#define GR_INVERT 0x04
55524+#define GR_BINDOVERRIDE 0x08
55525+#define GR_CONNECTOVERRIDE 0x10
bc901d79 55526+#define GR_SOCK_FAMILY 0x20
58c5fc13 55527+
bc901d79 55528+static const char * gr_protocols[IPPROTO_MAX] = {
58c5fc13
MT
55529+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
55530+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
55531+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
55532+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
55533+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
55534+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
55535+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
55536+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
55537+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
55538+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
55539+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
55540+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
55541+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
55542+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
55543+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
55544+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
55545+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
55546+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
55547+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
55548+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
55549+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
55550+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
55551+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
55552+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
55553+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
55554+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
55555+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
55556+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
55557+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
55558+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
55559+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
55560+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
55561+ };
55562+
bc901d79 55563+static const char * gr_socktypes[SOCK_MAX] = {
58c5fc13
MT
55564+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
55565+ "unknown:7", "unknown:8", "unknown:9", "packet"
55566+ };
55567+
bc901d79
MT
55568+static const char * gr_sockfamilies[AF_MAX+1] = {
55569+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
55570+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
c52201e0
MT
55571+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
55572+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
bc901d79
MT
55573+ };
55574+
58c5fc13
MT
55575+const char *
55576+gr_proto_to_name(unsigned char proto)
55577+{
55578+ return gr_protocols[proto];
55579+}
55580+
55581+const char *
55582+gr_socktype_to_name(unsigned char type)
55583+{
55584+ return gr_socktypes[type];
55585+}
55586+
bc901d79
MT
55587+const char *
55588+gr_sockfamily_to_name(unsigned char family)
55589+{
55590+ return gr_sockfamilies[family];
55591+}
55592+
58c5fc13
MT
55593+int
55594+gr_search_socket(const int domain, const int type, const int protocol)
55595+{
55596+ struct acl_subject_label *curr;
55597+ const struct cred *cred = current_cred();
55598+
55599+ if (unlikely(!gr_acl_is_enabled()))
55600+ goto exit;
55601+
bc901d79
MT
55602+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
55603+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
58c5fc13
MT
55604+ goto exit; // let the kernel handle it
55605+
55606+ curr = current->acl;
55607+
bc901d79
MT
55608+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
55609+ /* the family is allowed, if this is PF_INET allow it only if
55610+ the extra sock type/protocol checks pass */
55611+ if (domain == PF_INET)
55612+ goto inet_check;
55613+ goto exit;
55614+ } else {
55615+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
55616+ __u32 fakeip = 0;
55617+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55618+ current->role->roletype, cred->uid,
55619+ cred->gid, current->exec_file ?
55620+ gr_to_filename(current->exec_file->f_path.dentry,
55621+ current->exec_file->f_path.mnt) :
55622+ curr->filename, curr->filename,
55623+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
55624+ &current->signal->saved_ip);
55625+ goto exit;
55626+ }
55627+ goto exit_fail;
55628+ }
55629+
55630+inet_check:
55631+ /* the rest of this checking is for IPv4 only */
58c5fc13
MT
55632+ if (!curr->ips)
55633+ goto exit;
55634+
55635+ if ((curr->ip_type & (1 << type)) &&
55636+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
55637+ goto exit;
55638+
55639+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
55640+ /* we don't place acls on raw sockets , and sometimes
55641+ dgram/ip sockets are opened for ioctl and not
55642+ bind/connect, so we'll fake a bind learn log */
55643+ if (type == SOCK_RAW || type == SOCK_PACKET) {
55644+ __u32 fakeip = 0;
55645+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55646+ current->role->roletype, cred->uid,
55647+ cred->gid, current->exec_file ?
55648+ gr_to_filename(current->exec_file->f_path.dentry,
55649+ current->exec_file->f_path.mnt) :
55650+ curr->filename, curr->filename,
ae4e228f 55651+ &fakeip, 0, type,
bc901d79 55652+ protocol, GR_CONNECT, &current->signal->saved_ip);
58c5fc13
MT
55653+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
55654+ __u32 fakeip = 0;
55655+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55656+ current->role->roletype, cred->uid,
55657+ cred->gid, current->exec_file ?
55658+ gr_to_filename(current->exec_file->f_path.dentry,
55659+ current->exec_file->f_path.mnt) :
55660+ curr->filename, curr->filename,
ae4e228f 55661+ &fakeip, 0, type,
bc901d79 55662+ protocol, GR_BIND, &current->signal->saved_ip);
58c5fc13
MT
55663+ }
55664+ /* we'll log when they use connect or bind */
55665+ goto exit;
55666+ }
55667+
bc901d79
MT
55668+exit_fail:
55669+ if (domain == PF_INET)
55670+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
55671+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
55672+ else
55673+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
55674+ gr_socktype_to_name(type), protocol);
58c5fc13
MT
55675+
55676+ return 0;
bc901d79 55677+exit:
58c5fc13
MT
55678+ return 1;
55679+}
55680+
55681+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
55682+{
55683+ if ((ip->mode & mode) &&
55684+ (ip_port >= ip->low) &&
55685+ (ip_port <= ip->high) &&
55686+ ((ntohl(ip_addr) & our_netmask) ==
55687+ (ntohl(our_addr) & our_netmask))
55688+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
55689+ && (ip->type & (1 << type))) {
55690+ if (ip->mode & GR_INVERT)
55691+ return 2; // specifically denied
55692+ else
55693+ return 1; // allowed
55694+ }
55695+
55696+ return 0; // not specifically allowed, may continue parsing
55697+}
55698+
55699+static int
55700+gr_search_connectbind(const int full_mode, struct sock *sk,
55701+ struct sockaddr_in *addr, const int type)
55702+{
55703+ char iface[IFNAMSIZ] = {0};
55704+ struct acl_subject_label *curr;
55705+ struct acl_ip_label *ip;
55706+ struct inet_sock *isk;
55707+ struct net_device *dev;
55708+ struct in_device *idev;
55709+ unsigned long i;
55710+ int ret;
55711+ int mode = full_mode & (GR_BIND | GR_CONNECT);
55712+ __u32 ip_addr = 0;
55713+ __u32 our_addr;
55714+ __u32 our_netmask;
55715+ char *p;
55716+ __u16 ip_port = 0;
55717+ const struct cred *cred = current_cred();
55718+
55719+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
55720+ return 0;
55721+
55722+ curr = current->acl;
55723+ isk = inet_sk(sk);
55724+
55725+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
55726+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
55727+ addr->sin_addr.s_addr = curr->inaddr_any_override;
ae4e228f 55728+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
58c5fc13
MT
55729+ struct sockaddr_in saddr;
55730+ int err;
55731+
55732+ saddr.sin_family = AF_INET;
55733+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
ae4e228f 55734+ saddr.sin_port = isk->inet_sport;
58c5fc13
MT
55735+
55736+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
55737+ if (err)
55738+ return err;
55739+
55740+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
55741+ if (err)
55742+ return err;
55743+ }
55744+
55745+ if (!curr->ips)
55746+ return 0;
55747+
55748+ ip_addr = addr->sin_addr.s_addr;
55749+ ip_port = ntohs(addr->sin_port);
55750+
55751+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
55752+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55753+ current->role->roletype, cred->uid,
55754+ cred->gid, current->exec_file ?
55755+ gr_to_filename(current->exec_file->f_path.dentry,
55756+ current->exec_file->f_path.mnt) :
55757+ curr->filename, curr->filename,
ae4e228f 55758+ &ip_addr, ip_port, type,
bc901d79 55759+ sk->sk_protocol, mode, &current->signal->saved_ip);
58c5fc13
MT
55760+ return 0;
55761+ }
55762+
55763+ for (i = 0; i < curr->ip_num; i++) {
55764+ ip = *(curr->ips + i);
55765+ if (ip->iface != NULL) {
55766+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
55767+ p = strchr(iface, ':');
55768+ if (p != NULL)
55769+ *p = '\0';
55770+ dev = dev_get_by_name(sock_net(sk), iface);
55771+ if (dev == NULL)
55772+ continue;
55773+ idev = in_dev_get(dev);
55774+ if (idev == NULL) {
55775+ dev_put(dev);
55776+ continue;
55777+ }
55778+ rcu_read_lock();
55779+ for_ifa(idev) {
55780+ if (!strcmp(ip->iface, ifa->ifa_label)) {
55781+ our_addr = ifa->ifa_address;
55782+ our_netmask = 0xffffffff;
55783+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
55784+ if (ret == 1) {
55785+ rcu_read_unlock();
55786+ in_dev_put(idev);
55787+ dev_put(dev);
55788+ return 0;
55789+ } else if (ret == 2) {
55790+ rcu_read_unlock();
55791+ in_dev_put(idev);
55792+ dev_put(dev);
55793+ goto denied;
55794+ }
55795+ }
55796+ } endfor_ifa(idev);
55797+ rcu_read_unlock();
55798+ in_dev_put(idev);
55799+ dev_put(dev);
55800+ } else {
55801+ our_addr = ip->addr;
55802+ our_netmask = ip->netmask;
55803+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
55804+ if (ret == 1)
55805+ return 0;
55806+ else if (ret == 2)
55807+ goto denied;
55808+ }
55809+ }
55810+
55811+denied:
55812+ if (mode == GR_BIND)
ae4e228f 55813+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
58c5fc13 55814+ else if (mode == GR_CONNECT)
ae4e228f 55815+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
58c5fc13
MT
55816+
55817+ return -EACCES;
55818+}
55819+
55820+int
55821+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
55822+{
55823+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
55824+}
55825+
55826+int
55827+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
55828+{
55829+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
55830+}
55831+
55832+int gr_search_listen(struct socket *sock)
55833+{
55834+ struct sock *sk = sock->sk;
55835+ struct sockaddr_in addr;
55836+
ae4e228f
MT
55837+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
55838+ addr.sin_port = inet_sk(sk)->inet_sport;
58c5fc13
MT
55839+
55840+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
55841+}
55842+
55843+int gr_search_accept(struct socket *sock)
55844+{
55845+ struct sock *sk = sock->sk;
55846+ struct sockaddr_in addr;
55847+
ae4e228f
MT
55848+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
55849+ addr.sin_port = inet_sk(sk)->inet_sport;
58c5fc13
MT
55850+
55851+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
55852+}
55853+
55854+int
55855+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
55856+{
55857+ if (addr)
55858+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
55859+ else {
55860+ struct sockaddr_in sin;
55861+ const struct inet_sock *inet = inet_sk(sk);
55862+
ae4e228f
MT
55863+ sin.sin_addr.s_addr = inet->inet_daddr;
55864+ sin.sin_port = inet->inet_dport;
58c5fc13
MT
55865+
55866+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
55867+ }
55868+}
55869+
55870+int
55871+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
55872+{
55873+ struct sockaddr_in sin;
55874+
55875+ if (unlikely(skb->len < sizeof (struct udphdr)))
55876+ return 0; // skip this packet
55877+
55878+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
55879+ sin.sin_port = udp_hdr(skb)->source;
55880+
55881+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
55882+}
fe2de317
MT
55883diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
55884new file mode 100644
55885index 0000000..25f54ef
55886--- /dev/null
55887+++ b/grsecurity/gracl_learn.c
15a11c5b 55888@@ -0,0 +1,207 @@
58c5fc13
MT
55889+#include <linux/kernel.h>
55890+#include <linux/mm.h>
55891+#include <linux/sched.h>
55892+#include <linux/poll.h>
58c5fc13
MT
55893+#include <linux/string.h>
55894+#include <linux/file.h>
55895+#include <linux/types.h>
55896+#include <linux/vmalloc.h>
55897+#include <linux/grinternal.h>
55898+
55899+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
55900+ size_t count, loff_t *ppos);
55901+extern int gr_acl_is_enabled(void);
55902+
55903+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
55904+static int gr_learn_attached;
55905+
55906+/* use a 512k buffer */
55907+#define LEARN_BUFFER_SIZE (512 * 1024)
55908+
55909+static DEFINE_SPINLOCK(gr_learn_lock);
bc901d79 55910+static DEFINE_MUTEX(gr_learn_user_mutex);
58c5fc13
MT
55911+
55912+/* we need to maintain two buffers, so that the kernel context of grlearn
55913+ uses a semaphore around the userspace copying, and the other kernel contexts
55914+ use a spinlock when copying into the buffer, since they cannot sleep
55915+*/
55916+static char *learn_buffer;
55917+static char *learn_buffer_user;
55918+static int learn_buffer_len;
55919+static int learn_buffer_user_len;
55920+
55921+static ssize_t
55922+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
55923+{
55924+ DECLARE_WAITQUEUE(wait, current);
55925+ ssize_t retval = 0;
55926+
55927+ add_wait_queue(&learn_wait, &wait);
55928+ set_current_state(TASK_INTERRUPTIBLE);
55929+ do {
bc901d79 55930+ mutex_lock(&gr_learn_user_mutex);
58c5fc13
MT
55931+ spin_lock(&gr_learn_lock);
55932+ if (learn_buffer_len)
55933+ break;
55934+ spin_unlock(&gr_learn_lock);
bc901d79 55935+ mutex_unlock(&gr_learn_user_mutex);
58c5fc13
MT
55936+ if (file->f_flags & O_NONBLOCK) {
55937+ retval = -EAGAIN;
55938+ goto out;
55939+ }
55940+ if (signal_pending(current)) {
55941+ retval = -ERESTARTSYS;
55942+ goto out;
55943+ }
55944+
55945+ schedule();
55946+ } while (1);
55947+
55948+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
55949+ learn_buffer_user_len = learn_buffer_len;
55950+ retval = learn_buffer_len;
55951+ learn_buffer_len = 0;
55952+
55953+ spin_unlock(&gr_learn_lock);
55954+
55955+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
55956+ retval = -EFAULT;
55957+
bc901d79 55958+ mutex_unlock(&gr_learn_user_mutex);
58c5fc13
MT
55959+out:
55960+ set_current_state(TASK_RUNNING);
55961+ remove_wait_queue(&learn_wait, &wait);
55962+ return retval;
55963+}
55964+
55965+static unsigned int
55966+poll_learn(struct file * file, poll_table * wait)
55967+{
55968+ poll_wait(file, &learn_wait, wait);
55969+
55970+ if (learn_buffer_len)
55971+ return (POLLIN | POLLRDNORM);
55972+
55973+ return 0;
55974+}
55975+
55976+void
55977+gr_clear_learn_entries(void)
55978+{
55979+ char *tmp;
55980+
bc901d79 55981+ mutex_lock(&gr_learn_user_mutex);
15a11c5b
MT
55982+ spin_lock(&gr_learn_lock);
55983+ tmp = learn_buffer;
55984+ learn_buffer = NULL;
55985+ spin_unlock(&gr_learn_lock);
55986+ if (tmp)
55987+ vfree(tmp);
58c5fc13
MT
55988+ if (learn_buffer_user != NULL) {
55989+ vfree(learn_buffer_user);
55990+ learn_buffer_user = NULL;
55991+ }
55992+ learn_buffer_len = 0;
bc901d79 55993+ mutex_unlock(&gr_learn_user_mutex);
58c5fc13
MT
55994+
55995+ return;
55996+}
55997+
55998+void
55999+gr_add_learn_entry(const char *fmt, ...)
56000+{
56001+ va_list args;
56002+ unsigned int len;
56003+
56004+ if (!gr_learn_attached)
56005+ return;
56006+
56007+ spin_lock(&gr_learn_lock);
56008+
56009+ /* leave a gap at the end so we know when it's "full" but don't have to
56010+ compute the exact length of the string we're trying to append
56011+ */
56012+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
56013+ spin_unlock(&gr_learn_lock);
56014+ wake_up_interruptible(&learn_wait);
56015+ return;
56016+ }
56017+ if (learn_buffer == NULL) {
56018+ spin_unlock(&gr_learn_lock);
56019+ return;
56020+ }
56021+
56022+ va_start(args, fmt);
56023+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
56024+ va_end(args);
56025+
56026+ learn_buffer_len += len + 1;
56027+
56028+ spin_unlock(&gr_learn_lock);
56029+ wake_up_interruptible(&learn_wait);
56030+
56031+ return;
56032+}
56033+
56034+static int
56035+open_learn(struct inode *inode, struct file *file)
56036+{
56037+ if (file->f_mode & FMODE_READ && gr_learn_attached)
56038+ return -EBUSY;
56039+ if (file->f_mode & FMODE_READ) {
56040+ int retval = 0;
bc901d79 56041+ mutex_lock(&gr_learn_user_mutex);
58c5fc13
MT
56042+ if (learn_buffer == NULL)
56043+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
56044+ if (learn_buffer_user == NULL)
56045+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
56046+ if (learn_buffer == NULL) {
56047+ retval = -ENOMEM;
56048+ goto out_error;
56049+ }
56050+ if (learn_buffer_user == NULL) {
56051+ retval = -ENOMEM;
56052+ goto out_error;
56053+ }
56054+ learn_buffer_len = 0;
56055+ learn_buffer_user_len = 0;
56056+ gr_learn_attached = 1;
56057+out_error:
bc901d79 56058+ mutex_unlock(&gr_learn_user_mutex);
58c5fc13
MT
56059+ return retval;
56060+ }
56061+ return 0;
56062+}
56063+
56064+static int
56065+close_learn(struct inode *inode, struct file *file)
56066+{
58c5fc13 56067+ if (file->f_mode & FMODE_READ) {
15a11c5b 56068+ char *tmp = NULL;
bc901d79 56069+ mutex_lock(&gr_learn_user_mutex);
15a11c5b
MT
56070+ spin_lock(&gr_learn_lock);
56071+ tmp = learn_buffer;
56072+ learn_buffer = NULL;
56073+ spin_unlock(&gr_learn_lock);
56074+ if (tmp)
58c5fc13 56075+ vfree(tmp);
58c5fc13
MT
56076+ if (learn_buffer_user != NULL) {
56077+ vfree(learn_buffer_user);
56078+ learn_buffer_user = NULL;
56079+ }
56080+ learn_buffer_len = 0;
56081+ learn_buffer_user_len = 0;
56082+ gr_learn_attached = 0;
bc901d79 56083+ mutex_unlock(&gr_learn_user_mutex);
58c5fc13
MT
56084+ }
56085+
56086+ return 0;
56087+}
56088+
56089+const struct file_operations grsec_fops = {
56090+ .read = read_learn,
56091+ .write = write_grsec_handler,
56092+ .open = open_learn,
56093+ .release = close_learn,
56094+ .poll = poll_learn,
56095+};
fe2de317
MT
56096diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
56097new file mode 100644
56098index 0000000..39645c9
56099--- /dev/null
56100+++ b/grsecurity/gracl_res.c
df50ba0c 56101@@ -0,0 +1,68 @@
58c5fc13
MT
56102+#include <linux/kernel.h>
56103+#include <linux/sched.h>
56104+#include <linux/gracl.h>
56105+#include <linux/grinternal.h>
56106+
56107+static const char *restab_log[] = {
56108+ [RLIMIT_CPU] = "RLIMIT_CPU",
56109+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
56110+ [RLIMIT_DATA] = "RLIMIT_DATA",
56111+ [RLIMIT_STACK] = "RLIMIT_STACK",
56112+ [RLIMIT_CORE] = "RLIMIT_CORE",
56113+ [RLIMIT_RSS] = "RLIMIT_RSS",
56114+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
56115+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
56116+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
56117+ [RLIMIT_AS] = "RLIMIT_AS",
56118+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
56119+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
56120+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
56121+ [RLIMIT_NICE] = "RLIMIT_NICE",
56122+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
56123+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
56124+ [GR_CRASH_RES] = "RLIMIT_CRASH"
56125+};
56126+
56127+void
56128+gr_log_resource(const struct task_struct *task,
56129+ const int res, const unsigned long wanted, const int gt)
56130+{
ae4e228f 56131+ const struct cred *cred;
df50ba0c 56132+ unsigned long rlim;
58c5fc13
MT
56133+
56134+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
56135+ return;
56136+
56137+ // not yet supported resource
df50ba0c
MT
56138+ if (unlikely(!restab_log[res]))
56139+ return;
56140+
56141+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
56142+ rlim = task_rlimit_max(task, res);
56143+ else
56144+ rlim = task_rlimit(task, res);
56145+
56146+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
58c5fc13
MT
56147+ return;
56148+
ae4e228f
MT
56149+ rcu_read_lock();
56150+ cred = __task_cred(task);
56151+
56152+ if (res == RLIMIT_NPROC &&
56153+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
56154+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
56155+ goto out_rcu_unlock;
56156+ else if (res == RLIMIT_MEMLOCK &&
56157+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
56158+ goto out_rcu_unlock;
56159+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
56160+ goto out_rcu_unlock;
56161+ rcu_read_unlock();
56162+
df50ba0c 56163+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
58c5fc13
MT
56164+
56165+ return;
ae4e228f
MT
56166+out_rcu_unlock:
56167+ rcu_read_unlock();
56168+ return;
58c5fc13 56169+}
fe2de317
MT
56170diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
56171new file mode 100644
56172index 0000000..5556be3
56173--- /dev/null
56174+++ b/grsecurity/gracl_segv.c
66a7e928 56175@@ -0,0 +1,299 @@
58c5fc13
MT
56176+#include <linux/kernel.h>
56177+#include <linux/mm.h>
56178+#include <asm/uaccess.h>
56179+#include <asm/errno.h>
56180+#include <asm/mman.h>
56181+#include <net/sock.h>
56182+#include <linux/file.h>
56183+#include <linux/fs.h>
56184+#include <linux/net.h>
56185+#include <linux/in.h>
58c5fc13
MT
56186+#include <linux/slab.h>
56187+#include <linux/types.h>
56188+#include <linux/sched.h>
56189+#include <linux/timer.h>
56190+#include <linux/gracl.h>
56191+#include <linux/grsecurity.h>
56192+#include <linux/grinternal.h>
56193+
56194+static struct crash_uid *uid_set;
56195+static unsigned short uid_used;
56196+static DEFINE_SPINLOCK(gr_uid_lock);
56197+extern rwlock_t gr_inode_lock;
56198+extern struct acl_subject_label *
56199+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
56200+ struct acl_role_label *role);
16454cff
MT
56201+
56202+#ifdef CONFIG_BTRFS_FS
56203+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
56204+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
56205+#endif
56206+
56207+static inline dev_t __get_dev(const struct dentry *dentry)
56208+{
56209+#ifdef CONFIG_BTRFS_FS
56210+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
56211+ return get_btrfs_dev_from_inode(dentry->d_inode);
56212+ else
56213+#endif
56214+ return dentry->d_inode->i_sb->s_dev;
56215+}
56216+
58c5fc13
MT
56217+int
56218+gr_init_uidset(void)
56219+{
56220+ uid_set =
56221+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
56222+ uid_used = 0;
56223+
56224+ return uid_set ? 1 : 0;
56225+}
56226+
56227+void
56228+gr_free_uidset(void)
56229+{
56230+ if (uid_set)
56231+ kfree(uid_set);
56232+
56233+ return;
56234+}
56235+
56236+int
56237+gr_find_uid(const uid_t uid)
56238+{
56239+ struct crash_uid *tmp = uid_set;
56240+ uid_t buid;
56241+ int low = 0, high = uid_used - 1, mid;
56242+
56243+ while (high >= low) {
56244+ mid = (low + high) >> 1;
56245+ buid = tmp[mid].uid;
56246+ if (buid == uid)
56247+ return mid;
56248+ if (buid > uid)
56249+ high = mid - 1;
56250+ if (buid < uid)
56251+ low = mid + 1;
56252+ }
56253+
56254+ return -1;
56255+}
56256+
56257+static __inline__ void
56258+gr_insertsort(void)
56259+{
56260+ unsigned short i, j;
56261+ struct crash_uid index;
56262+
56263+ for (i = 1; i < uid_used; i++) {
56264+ index = uid_set[i];
56265+ j = i;
56266+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
56267+ uid_set[j] = uid_set[j - 1];
56268+ j--;
56269+ }
56270+ uid_set[j] = index;
56271+ }
56272+
56273+ return;
56274+}
56275+
56276+static __inline__ void
56277+gr_insert_uid(const uid_t uid, const unsigned long expires)
56278+{
56279+ int loc;
56280+
56281+ if (uid_used == GR_UIDTABLE_MAX)
56282+ return;
56283+
56284+ loc = gr_find_uid(uid);
56285+
56286+ if (loc >= 0) {
56287+ uid_set[loc].expires = expires;
56288+ return;
56289+ }
56290+
56291+ uid_set[uid_used].uid = uid;
56292+ uid_set[uid_used].expires = expires;
56293+ uid_used++;
56294+
56295+ gr_insertsort();
56296+
56297+ return;
56298+}
56299+
56300+void
56301+gr_remove_uid(const unsigned short loc)
56302+{
56303+ unsigned short i;
56304+
56305+ for (i = loc + 1; i < uid_used; i++)
56306+ uid_set[i - 1] = uid_set[i];
56307+
56308+ uid_used--;
56309+
56310+ return;
56311+}
56312+
56313+int
56314+gr_check_crash_uid(const uid_t uid)
56315+{
56316+ int loc;
56317+ int ret = 0;
56318+
56319+ if (unlikely(!gr_acl_is_enabled()))
56320+ return 0;
56321+
56322+ spin_lock(&gr_uid_lock);
56323+ loc = gr_find_uid(uid);
56324+
56325+ if (loc < 0)
56326+ goto out_unlock;
56327+
56328+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
56329+ gr_remove_uid(loc);
56330+ else
56331+ ret = 1;
56332+
56333+out_unlock:
56334+ spin_unlock(&gr_uid_lock);
56335+ return ret;
56336+}
56337+
56338+static __inline__ int
56339+proc_is_setxid(const struct cred *cred)
56340+{
56341+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
56342+ cred->uid != cred->fsuid)
56343+ return 1;
56344+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
56345+ cred->gid != cred->fsgid)
56346+ return 1;
56347+
56348+ return 0;
56349+}
58c5fc13 56350+
71d190be 56351+extern int gr_fake_force_sig(int sig, struct task_struct *t);
58c5fc13
MT
56352+
56353+void
56354+gr_handle_crash(struct task_struct *task, const int sig)
56355+{
56356+ struct acl_subject_label *curr;
58c5fc13 56357+ struct task_struct *tsk, *tsk2;
ae4e228f 56358+ const struct cred *cred;
58c5fc13
MT
56359+ const struct cred *cred2;
56360+
56361+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
56362+ return;
56363+
56364+ if (unlikely(!gr_acl_is_enabled()))
56365+ return;
56366+
56367+ curr = task->acl;
56368+
56369+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
56370+ return;
56371+
56372+ if (time_before_eq(curr->expires, get_seconds())) {
56373+ curr->expires = 0;
56374+ curr->crashes = 0;
56375+ }
56376+
56377+ curr->crashes++;
56378+
56379+ if (!curr->expires)
56380+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
56381+
56382+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
56383+ time_after(curr->expires, get_seconds())) {
ae4e228f
MT
56384+ rcu_read_lock();
56385+ cred = __task_cred(task);
58c5fc13
MT
56386+ if (cred->uid && proc_is_setxid(cred)) {
56387+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
56388+ spin_lock(&gr_uid_lock);
56389+ gr_insert_uid(cred->uid, curr->expires);
56390+ spin_unlock(&gr_uid_lock);
56391+ curr->expires = 0;
56392+ curr->crashes = 0;
56393+ read_lock(&tasklist_lock);
56394+ do_each_thread(tsk2, tsk) {
56395+ cred2 = __task_cred(tsk);
56396+ if (tsk != task && cred2->uid == cred->uid)
56397+ gr_fake_force_sig(SIGKILL, tsk);
56398+ } while_each_thread(tsk2, tsk);
56399+ read_unlock(&tasklist_lock);
56400+ } else {
56401+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
56402+ read_lock(&tasklist_lock);
6e9df6a3 56403+ read_lock(&grsec_exec_file_lock);
58c5fc13
MT
56404+ do_each_thread(tsk2, tsk) {
56405+ if (likely(tsk != task)) {
6e9df6a3
MT
56406+ // if this thread has the same subject as the one that triggered
56407+ // RES_CRASH and it's the same binary, kill it
56408+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
58c5fc13
MT
56409+ gr_fake_force_sig(SIGKILL, tsk);
56410+ }
56411+ } while_each_thread(tsk2, tsk);
6e9df6a3 56412+ read_unlock(&grsec_exec_file_lock);
58c5fc13
MT
56413+ read_unlock(&tasklist_lock);
56414+ }
ae4e228f 56415+ rcu_read_unlock();
58c5fc13
MT
56416+ }
56417+
56418+ return;
56419+}
56420+
56421+int
56422+gr_check_crash_exec(const struct file *filp)
56423+{
56424+ struct acl_subject_label *curr;
56425+
56426+ if (unlikely(!gr_acl_is_enabled()))
56427+ return 0;
56428+
56429+ read_lock(&gr_inode_lock);
56430+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
16454cff 56431+ __get_dev(filp->f_path.dentry),
58c5fc13
MT
56432+ current->role);
56433+ read_unlock(&gr_inode_lock);
56434+
56435+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
56436+ (!curr->crashes && !curr->expires))
56437+ return 0;
56438+
56439+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
56440+ time_after(curr->expires, get_seconds()))
56441+ return 1;
56442+ else if (time_before_eq(curr->expires, get_seconds())) {
56443+ curr->crashes = 0;
56444+ curr->expires = 0;
56445+ }
56446+
56447+ return 0;
56448+}
56449+
56450+void
56451+gr_handle_alertkill(struct task_struct *task)
56452+{
56453+ struct acl_subject_label *curracl;
56454+ __u32 curr_ip;
56455+ struct task_struct *p, *p2;
56456+
56457+ if (unlikely(!gr_acl_is_enabled()))
56458+ return;
56459+
56460+ curracl = task->acl;
56461+ curr_ip = task->signal->curr_ip;
56462+
56463+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
56464+ read_lock(&tasklist_lock);
56465+ do_each_thread(p2, p) {
56466+ if (p->signal->curr_ip == curr_ip)
56467+ gr_fake_force_sig(SIGKILL, p);
56468+ } while_each_thread(p2, p);
56469+ read_unlock(&tasklist_lock);
56470+ } else if (curracl->mode & GR_KILLPROC)
56471+ gr_fake_force_sig(SIGKILL, task);
56472+
56473+ return;
56474+}
fe2de317
MT
56475diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
56476new file mode 100644
56477index 0000000..9d83a69
56478--- /dev/null
56479+++ b/grsecurity/gracl_shm.c
df50ba0c 56480@@ -0,0 +1,40 @@
58c5fc13
MT
56481+#include <linux/kernel.h>
56482+#include <linux/mm.h>
56483+#include <linux/sched.h>
56484+#include <linux/file.h>
56485+#include <linux/ipc.h>
56486+#include <linux/gracl.h>
56487+#include <linux/grsecurity.h>
56488+#include <linux/grinternal.h>
56489+
56490+int
56491+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56492+ const time_t shm_createtime, const uid_t cuid, const int shmid)
56493+{
56494+ struct task_struct *task;
56495+
56496+ if (!gr_acl_is_enabled())
56497+ return 1;
56498+
df50ba0c 56499+ rcu_read_lock();
58c5fc13
MT
56500+ read_lock(&tasklist_lock);
56501+
56502+ task = find_task_by_vpid(shm_cprid);
56503+
56504+ if (unlikely(!task))
56505+ task = find_task_by_vpid(shm_lapid);
56506+
56507+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
56508+ (task->pid == shm_lapid)) &&
56509+ (task->acl->mode & GR_PROTSHM) &&
56510+ (task->acl != current->acl))) {
56511+ read_unlock(&tasklist_lock);
df50ba0c 56512+ rcu_read_unlock();
58c5fc13
MT
56513+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
56514+ return 0;
56515+ }
56516+ read_unlock(&tasklist_lock);
df50ba0c 56517+ rcu_read_unlock();
58c5fc13
MT
56518+
56519+ return 1;
56520+}
fe2de317
MT
56521diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
56522new file mode 100644
56523index 0000000..bc0be01
56524--- /dev/null
56525+++ b/grsecurity/grsec_chdir.c
58c5fc13
MT
56526@@ -0,0 +1,19 @@
56527+#include <linux/kernel.h>
56528+#include <linux/sched.h>
56529+#include <linux/fs.h>
56530+#include <linux/file.h>
56531+#include <linux/grsecurity.h>
56532+#include <linux/grinternal.h>
56533+
56534+void
56535+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
56536+{
56537+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
56538+ if ((grsec_enable_chdir && grsec_enable_group &&
56539+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
56540+ !grsec_enable_group)) {
56541+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
56542+ }
56543+#endif
56544+ return;
56545+}
fe2de317
MT
56546diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
56547new file mode 100644
56548index 0000000..a2dc675
56549--- /dev/null
56550+++ b/grsecurity/grsec_chroot.c
15a11c5b 56551@@ -0,0 +1,351 @@
58c5fc13
MT
56552+#include <linux/kernel.h>
56553+#include <linux/module.h>
56554+#include <linux/sched.h>
56555+#include <linux/file.h>
56556+#include <linux/fs.h>
56557+#include <linux/mount.h>
56558+#include <linux/types.h>
56559+#include <linux/pid_namespace.h>
56560+#include <linux/grsecurity.h>
56561+#include <linux/grinternal.h>
56562+
df50ba0c
MT
56563+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
56564+{
56565+#ifdef CONFIG_GRKERNSEC
56566+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
56567+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
56568+ task->gr_is_chrooted = 1;
56569+ else
56570+ task->gr_is_chrooted = 0;
56571+
56572+ task->gr_chroot_dentry = path->dentry;
56573+#endif
56574+ return;
56575+}
56576+
56577+void gr_clear_chroot_entries(struct task_struct *task)
56578+{
56579+#ifdef CONFIG_GRKERNSEC
56580+ task->gr_is_chrooted = 0;
56581+ task->gr_chroot_dentry = NULL;
56582+#endif
56583+ return;
56584+}
56585+
58c5fc13 56586+int
15a11c5b 56587+gr_handle_chroot_unix(const pid_t pid)
58c5fc13
MT
56588+{
56589+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
6892158b 56590+ struct task_struct *p;
58c5fc13
MT
56591+
56592+ if (unlikely(!grsec_enable_chroot_unix))
56593+ return 1;
56594+
56595+ if (likely(!proc_is_chrooted(current)))
56596+ return 1;
56597+
df50ba0c 56598+ rcu_read_lock();
58c5fc13 56599+ read_lock(&tasklist_lock);
15a11c5b 56600+ p = find_task_by_vpid_unrestricted(pid);
71d190be 56601+ if (unlikely(p && !have_same_root(current, p))) {
6892158b
MT
56602+ read_unlock(&tasklist_lock);
56603+ rcu_read_unlock();
56604+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
56605+ return 0;
58c5fc13
MT
56606+ }
56607+ read_unlock(&tasklist_lock);
df50ba0c 56608+ rcu_read_unlock();
58c5fc13
MT
56609+#endif
56610+ return 1;
56611+}
56612+
56613+int
56614+gr_handle_chroot_nice(void)
56615+{
56616+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
56617+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
56618+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
56619+ return -EPERM;
56620+ }
56621+#endif
56622+ return 0;
56623+}
56624+
56625+int
56626+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
56627+{
56628+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
56629+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
56630+ && proc_is_chrooted(current)) {
56631+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
56632+ return -EACCES;
56633+ }
56634+#endif
56635+ return 0;
56636+}
56637+
56638+int
56639+gr_handle_chroot_rawio(const struct inode *inode)
56640+{
56641+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56642+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
56643+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
56644+ return 1;
56645+#endif
56646+ return 0;
56647+}
56648+
56649+int
57199397
MT
56650+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
56651+{
56652+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
56653+ struct task_struct *p;
56654+ int ret = 0;
56655+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
56656+ return ret;
56657+
56658+ read_lock(&tasklist_lock);
56659+ do_each_pid_task(pid, type, p) {
56660+ if (!have_same_root(current, p)) {
56661+ ret = 1;
56662+ goto out;
56663+ }
56664+ } while_each_pid_task(pid, type, p);
56665+out:
56666+ read_unlock(&tasklist_lock);
56667+ return ret;
56668+#endif
56669+ return 0;
56670+}
56671+
56672+int
58c5fc13
MT
56673+gr_pid_is_chrooted(struct task_struct *p)
56674+{
56675+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
56676+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
56677+ return 0;
56678+
58c5fc13
MT
56679+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
56680+ !have_same_root(current, p)) {
58c5fc13
MT
56681+ return 1;
56682+ }
58c5fc13
MT
56683+#endif
56684+ return 0;
56685+}
56686+
56687+EXPORT_SYMBOL(gr_pid_is_chrooted);
56688+
56689+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
56690+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
56691+{
16454cff
MT
56692+ struct path path, currentroot;
56693+ int ret = 0;
58c5fc13 56694+
16454cff
MT
56695+ path.dentry = (struct dentry *)u_dentry;
56696+ path.mnt = (struct vfsmount *)u_mnt;
6892158b 56697+ get_fs_root(current->fs, &currentroot);
16454cff
MT
56698+ if (path_is_under(&path, &currentroot))
56699+ ret = 1;
6892158b 56700+ path_put(&currentroot);
58c5fc13 56701+
58c5fc13
MT
56702+ return ret;
56703+}
56704+#endif
56705+
56706+int
56707+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
56708+{
56709+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
56710+ if (!grsec_enable_chroot_fchdir)
56711+ return 1;
56712+
56713+ if (!proc_is_chrooted(current))
56714+ return 1;
56715+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
56716+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
56717+ return 0;
56718+ }
56719+#endif
56720+ return 1;
56721+}
56722+
56723+int
56724+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56725+ const time_t shm_createtime)
56726+{
56727+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
15a11c5b 56728+ struct task_struct *p;
58c5fc13
MT
56729+ time_t starttime;
56730+
56731+ if (unlikely(!grsec_enable_chroot_shmat))
56732+ return 1;
56733+
56734+ if (likely(!proc_is_chrooted(current)))
56735+ return 1;
56736+
df50ba0c 56737+ rcu_read_lock();
58c5fc13
MT
56738+ read_lock(&tasklist_lock);
56739+
15a11c5b 56740+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
58c5fc13 56741+ starttime = p->start_time.tv_sec;
15a11c5b
MT
56742+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
56743+ if (have_same_root(current, p)) {
56744+ goto allow;
56745+ } else {
58c5fc13 56746+ read_unlock(&tasklist_lock);
df50ba0c 56747+ rcu_read_unlock();
58c5fc13
MT
56748+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
56749+ return 0;
56750+ }
58c5fc13 56751+ }
15a11c5b 56752+ /* creator exited, pid reuse, fall through to next check */
58c5fc13 56753+ }
15a11c5b
MT
56754+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
56755+ if (unlikely(!have_same_root(current, p))) {
56756+ read_unlock(&tasklist_lock);
56757+ rcu_read_unlock();
56758+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
56759+ return 0;
56760+ }
56761+ }
56762+
56763+allow:
58c5fc13 56764+ read_unlock(&tasklist_lock);
df50ba0c 56765+ rcu_read_unlock();
58c5fc13
MT
56766+#endif
56767+ return 1;
56768+}
56769+
56770+void
56771+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
56772+{
56773+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
56774+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
56775+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
56776+#endif
56777+ return;
56778+}
56779+
56780+int
56781+gr_handle_chroot_mknod(const struct dentry *dentry,
56782+ const struct vfsmount *mnt, const int mode)
56783+{
56784+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
56785+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
56786+ proc_is_chrooted(current)) {
56787+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
56788+ return -EPERM;
56789+ }
56790+#endif
56791+ return 0;
56792+}
56793+
56794+int
56795+gr_handle_chroot_mount(const struct dentry *dentry,
56796+ const struct vfsmount *mnt, const char *dev_name)
56797+{
56798+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
56799+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
15a11c5b 56800+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
58c5fc13
MT
56801+ return -EPERM;
56802+ }
56803+#endif
56804+ return 0;
56805+}
56806+
56807+int
56808+gr_handle_chroot_pivot(void)
56809+{
56810+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
56811+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
56812+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
56813+ return -EPERM;
56814+ }
56815+#endif
56816+ return 0;
56817+}
56818+
56819+int
56820+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
56821+{
56822+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
56823+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
56824+ !gr_is_outside_chroot(dentry, mnt)) {
56825+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
56826+ return -EPERM;
56827+ }
56828+#endif
56829+ return 0;
56830+}
56831+
15a11c5b
MT
56832+extern const char *captab_log[];
56833+extern int captab_log_entries;
56834+
58c5fc13 56835+int
15a11c5b 56836+gr_chroot_is_capable(const int cap)
58c5fc13
MT
56837+{
56838+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
15a11c5b 56839+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
58c5fc13 56840+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
15a11c5b
MT
56841+ if (cap_raised(chroot_caps, cap)) {
56842+ const struct cred *creds = current_cred();
56843+ if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
56844+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
56845+ }
56846+ return 0;
56847+ }
56848+ }
56849+#endif
56850+ return 1;
56851+}
58c5fc13 56852+
15a11c5b
MT
56853+int
56854+gr_chroot_is_capable_nolog(const int cap)
56855+{
56856+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56857+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
56858+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
56859+ if (cap_raised(chroot_caps, cap)) {
56860+ return 0;
56861+ }
58c5fc13
MT
56862+ }
56863+#endif
15a11c5b 56864+ return 1;
58c5fc13
MT
56865+}
56866+
56867+int
56868+gr_handle_chroot_sysctl(const int op)
56869+{
56870+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
ae4e228f
MT
56871+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
56872+ proc_is_chrooted(current))
58c5fc13
MT
56873+ return -EACCES;
56874+#endif
56875+ return 0;
56876+}
56877+
56878+void
56879+gr_handle_chroot_chdir(struct path *path)
56880+{
56881+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
56882+ if (grsec_enable_chroot_chdir)
56883+ set_fs_pwd(current->fs, path);
56884+#endif
56885+ return;
56886+}
56887+
56888+int
56889+gr_handle_chroot_chmod(const struct dentry *dentry,
56890+ const struct vfsmount *mnt, const int mode)
56891+{
56892+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
bc901d79
MT
56893+ /* allow chmod +s on directories, but not files */
56894+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
58c5fc13
MT
56895+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
56896+ proc_is_chrooted(current)) {
56897+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
56898+ return -EPERM;
56899+ }
56900+#endif
56901+ return 0;
56902+}
fe2de317
MT
56903diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
56904new file mode 100644
56905index 0000000..d81a586
56906--- /dev/null
56907+++ b/grsecurity/grsec_disabled.c
6e9df6a3 56908@@ -0,0 +1,439 @@
58c5fc13
MT
56909+#include <linux/kernel.h>
56910+#include <linux/module.h>
56911+#include <linux/sched.h>
56912+#include <linux/file.h>
56913+#include <linux/fs.h>
56914+#include <linux/kdev_t.h>
56915+#include <linux/net.h>
56916+#include <linux/in.h>
56917+#include <linux/ip.h>
56918+#include <linux/skbuff.h>
56919+#include <linux/sysctl.h>
56920+
56921+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
56922+void
56923+pax_set_initial_flags(struct linux_binprm *bprm)
56924+{
56925+ return;
56926+}
56927+#endif
56928+
56929+#ifdef CONFIG_SYSCTL
56930+__u32
56931+gr_handle_sysctl(const struct ctl_table * table, const int op)
56932+{
56933+ return 0;
56934+}
56935+#endif
56936+
56937+#ifdef CONFIG_TASKSTATS
56938+int gr_is_taskstats_denied(int pid)
56939+{
56940+ return 0;
56941+}
56942+#endif
56943+
56944+int
56945+gr_acl_is_enabled(void)
56946+{
56947+ return 0;
56948+}
56949+
6e9df6a3
MT
56950+void
56951+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
56952+{
56953+ return;
56954+}
56955+
58c5fc13
MT
56956+int
56957+gr_handle_rawio(const struct inode *inode)
56958+{
56959+ return 0;
56960+}
56961+
56962+void
56963+gr_acl_handle_psacct(struct task_struct *task, const long code)
56964+{
56965+ return;
56966+}
56967+
56968+int
56969+gr_handle_ptrace(struct task_struct *task, const long request)
56970+{
56971+ return 0;
56972+}
56973+
56974+int
56975+gr_handle_proc_ptrace(struct task_struct *task)
56976+{
56977+ return 0;
56978+}
56979+
56980+void
56981+gr_learn_resource(const struct task_struct *task,
56982+ const int res, const unsigned long wanted, const int gt)
56983+{
56984+ return;
56985+}
56986+
56987+int
56988+gr_set_acls(const int type)
56989+{
56990+ return 0;
56991+}
56992+
56993+int
56994+gr_check_hidden_task(const struct task_struct *tsk)
56995+{
56996+ return 0;
56997+}
56998+
56999+int
57000+gr_check_protected_task(const struct task_struct *task)
57001+{
57002+ return 0;
57003+}
57004+
57199397
MT
57005+int
57006+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
57007+{
57008+ return 0;
57009+}
57010+
58c5fc13
MT
57011+void
57012+gr_copy_label(struct task_struct *tsk)
57013+{
57014+ return;
57015+}
57016+
57017+void
57018+gr_set_pax_flags(struct task_struct *task)
57019+{
57020+ return;
57021+}
57022+
57023+int
57024+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
57025+ const int unsafe_share)
57026+{
57027+ return 0;
57028+}
57029+
57030+void
57031+gr_handle_delete(const ino_t ino, const dev_t dev)
57032+{
57033+ return;
57034+}
57035+
57036+void
57037+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
57038+{
57039+ return;
57040+}
57041+
57042+void
57043+gr_handle_crash(struct task_struct *task, const int sig)
57044+{
57045+ return;
57046+}
57047+
57048+int
57049+gr_check_crash_exec(const struct file *filp)
57050+{
57051+ return 0;
57052+}
57053+
57054+int
57055+gr_check_crash_uid(const uid_t uid)
57056+{
57057+ return 0;
57058+}
57059+
57060+void
57061+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
57062+ struct dentry *old_dentry,
57063+ struct dentry *new_dentry,
57064+ struct vfsmount *mnt, const __u8 replace)
57065+{
57066+ return;
57067+}
57068+
57069+int
57070+gr_search_socket(const int family, const int type, const int protocol)
57071+{
57072+ return 1;
57073+}
57074+
57075+int
57076+gr_search_connectbind(const int mode, const struct socket *sock,
57077+ const struct sockaddr_in *addr)
57078+{
57079+ return 0;
57080+}
57081+
58c5fc13
MT
57082+void
57083+gr_handle_alertkill(struct task_struct *task)
57084+{
57085+ return;
57086+}
57087+
57088+__u32
57089+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
57090+{
57091+ return 1;
57092+}
57093+
57094+__u32
57095+gr_acl_handle_hidden_file(const struct dentry * dentry,
57096+ const struct vfsmount * mnt)
57097+{
57098+ return 1;
57099+}
57100+
57101+__u32
57102+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
6e9df6a3 57103+ int acc_mode)
58c5fc13
MT
57104+{
57105+ return 1;
57106+}
57107+
57108+__u32
57109+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
57110+{
57111+ return 1;
57112+}
57113+
57114+__u32
57115+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
57116+{
57117+ return 1;
57118+}
57119+
57120+int
57121+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
57122+ unsigned int *vm_flags)
57123+{
57124+ return 1;
57125+}
57126+
57127+__u32
57128+gr_acl_handle_truncate(const struct dentry * dentry,
57129+ const struct vfsmount * mnt)
57130+{
57131+ return 1;
57132+}
57133+
57134+__u32
57135+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
57136+{
57137+ return 1;
57138+}
57139+
57140+__u32
57141+gr_acl_handle_access(const struct dentry * dentry,
57142+ const struct vfsmount * mnt, const int fmode)
57143+{
57144+ return 1;
57145+}
57146+
57147+__u32
57148+gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
57149+ mode_t mode)
57150+{
57151+ return 1;
57152+}
57153+
57154+__u32
57155+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
57156+ mode_t mode)
57157+{
57158+ return 1;
57159+}
57160+
57161+__u32
57162+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
57163+{
57164+ return 1;
57165+}
57166+
bc901d79
MT
57167+__u32
57168+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
57169+{
57170+ return 1;
57171+}
57172+
58c5fc13
MT
57173+void
57174+grsecurity_init(void)
57175+{
57176+ return;
57177+}
57178+
57179+__u32
57180+gr_acl_handle_mknod(const struct dentry * new_dentry,
57181+ const struct dentry * parent_dentry,
57182+ const struct vfsmount * parent_mnt,
57183+ const int mode)
57184+{
57185+ return 1;
57186+}
57187+
57188+__u32
57189+gr_acl_handle_mkdir(const struct dentry * new_dentry,
57190+ const struct dentry * parent_dentry,
57191+ const struct vfsmount * parent_mnt)
57192+{
57193+ return 1;
57194+}
57195+
57196+__u32
57197+gr_acl_handle_symlink(const struct dentry * new_dentry,
57198+ const struct dentry * parent_dentry,
57199+ const struct vfsmount * parent_mnt, const char *from)
57200+{
57201+ return 1;
57202+}
57203+
57204+__u32
57205+gr_acl_handle_link(const struct dentry * new_dentry,
57206+ const struct dentry * parent_dentry,
57207+ const struct vfsmount * parent_mnt,
57208+ const struct dentry * old_dentry,
57209+ const struct vfsmount * old_mnt, const char *to)
57210+{
57211+ return 1;
57212+}
57213+
57214+int
57215+gr_acl_handle_rename(const struct dentry *new_dentry,
57216+ const struct dentry *parent_dentry,
57217+ const struct vfsmount *parent_mnt,
57218+ const struct dentry *old_dentry,
57219+ const struct inode *old_parent_inode,
57220+ const struct vfsmount *old_mnt, const char *newname)
57221+{
57222+ return 0;
57223+}
57224+
57225+int
57226+gr_acl_handle_filldir(const struct file *file, const char *name,
57227+ const int namelen, const ino_t ino)
57228+{
57229+ return 1;
57230+}
57231+
57232+int
57233+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
57234+ const time_t shm_createtime, const uid_t cuid, const int shmid)
57235+{
57236+ return 1;
57237+}
57238+
57239+int
57240+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
57241+{
57242+ return 0;
57243+}
57244+
57245+int
57246+gr_search_accept(const struct socket *sock)
57247+{
57248+ return 0;
57249+}
57250+
57251+int
57252+gr_search_listen(const struct socket *sock)
57253+{
57254+ return 0;
57255+}
57256+
57257+int
57258+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
57259+{
57260+ return 0;
57261+}
57262+
57263+__u32
57264+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
57265+{
57266+ return 1;
57267+}
57268+
57269+__u32
57270+gr_acl_handle_creat(const struct dentry * dentry,
57271+ const struct dentry * p_dentry,
6e9df6a3 57272+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
58c5fc13
MT
57273+ const int imode)
57274+{
57275+ return 1;
57276+}
57277+
57278+void
57279+gr_acl_handle_exit(void)
57280+{
57281+ return;
57282+}
57283+
57284+int
57285+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
57286+{
57287+ return 1;
57288+}
57289+
57290+void
57291+gr_set_role_label(const uid_t uid, const gid_t gid)
57292+{
57293+ return;
57294+}
57295+
57296+int
57297+gr_acl_handle_procpidmem(const struct task_struct *task)
57298+{
57299+ return 0;
57300+}
57301+
57302+int
57303+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
57304+{
57305+ return 0;
57306+}
57307+
57308+int
57309+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
57310+{
57311+ return 0;
57312+}
57313+
57314+void
57315+gr_set_kernel_label(struct task_struct *task)
57316+{
57317+ return;
57318+}
57319+
57320+int
57321+gr_check_user_change(int real, int effective, int fs)
57322+{
57323+ return 0;
57324+}
57325+
57326+int
57327+gr_check_group_change(int real, int effective, int fs)
57328+{
57329+ return 0;
57330+}
57331+
bc901d79
MT
57332+int gr_acl_enable_at_secure(void)
57333+{
57334+ return 0;
57335+}
57336+
16454cff
MT
57337+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
57338+{
57339+ return dentry->d_inode->i_sb->s_dev;
57340+}
57341+
58c5fc13
MT
57342+EXPORT_SYMBOL(gr_learn_resource);
57343+EXPORT_SYMBOL(gr_set_kernel_label);
57344+#ifdef CONFIG_SECURITY
57345+EXPORT_SYMBOL(gr_check_user_change);
57346+EXPORT_SYMBOL(gr_check_group_change);
57347+#endif
fe2de317
MT
57348diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
57349new file mode 100644
57350index 0000000..2b05ada
57351--- /dev/null
57352+++ b/grsecurity/grsec_exec.c
6e9df6a3 57353@@ -0,0 +1,146 @@
58c5fc13
MT
57354+#include <linux/kernel.h>
57355+#include <linux/sched.h>
57356+#include <linux/file.h>
57357+#include <linux/binfmts.h>
58c5fc13
MT
57358+#include <linux/fs.h>
57359+#include <linux/types.h>
57360+#include <linux/grdefs.h>
15a11c5b 57361+#include <linux/grsecurity.h>
58c5fc13
MT
57362+#include <linux/grinternal.h>
57363+#include <linux/capability.h>
15a11c5b 57364+#include <linux/module.h>
58c5fc13
MT
57365+
57366+#include <asm/uaccess.h>
57367+
57368+#ifdef CONFIG_GRKERNSEC_EXECLOG
57369+static char gr_exec_arg_buf[132];
bc901d79 57370+static DEFINE_MUTEX(gr_exec_arg_mutex);
58c5fc13
MT
57371+#endif
57372+
15a11c5b 57373+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
58c5fc13
MT
57374+
57375+void
15a11c5b 57376+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
58c5fc13
MT
57377+{
57378+#ifdef CONFIG_GRKERNSEC_EXECLOG
57379+ char *grarg = gr_exec_arg_buf;
57380+ unsigned int i, x, execlen = 0;
57381+ char c;
57382+
57383+ if (!((grsec_enable_execlog && grsec_enable_group &&
57384+ in_group_p(grsec_audit_gid))
57385+ || (grsec_enable_execlog && !grsec_enable_group)))
57386+ return;
57387+
bc901d79 57388+ mutex_lock(&gr_exec_arg_mutex);
58c5fc13
MT
57389+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
57390+
58c5fc13
MT
57391+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
57392+ const char __user *p;
57393+ unsigned int len;
57394+
15a11c5b
MT
57395+ p = get_user_arg_ptr(argv, i);
57396+ if (IS_ERR(p))
58c5fc13 57397+ goto log;
15a11c5b 57398+
58c5fc13
MT
57399+ len = strnlen_user(p, 128 - execlen);
57400+ if (len > 128 - execlen)
57401+ len = 128 - execlen;
57402+ else if (len > 0)
57403+ len--;
57404+ if (copy_from_user(grarg + execlen, p, len))
57405+ goto log;
57406+
57407+ /* rewrite unprintable characters */
57408+ for (x = 0; x < len; x++) {
57409+ c = *(grarg + execlen + x);
57410+ if (c < 32 || c > 126)
57411+ *(grarg + execlen + x) = ' ';
57412+ }
57413+
57414+ execlen += len;
57415+ *(grarg + execlen) = ' ';
57416+ *(grarg + execlen + 1) = '\0';
57417+ execlen++;
57418+ }
57419+
57420+ log:
57421+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
57422+ bprm->file->f_path.mnt, grarg);
bc901d79 57423+ mutex_unlock(&gr_exec_arg_mutex);
58c5fc13
MT
57424+#endif
57425+ return;
57426+}
bc901d79 57427+
15a11c5b
MT
57428+#ifdef CONFIG_GRKERNSEC
57429+extern int gr_acl_is_capable(const int cap);
57430+extern int gr_acl_is_capable_nolog(const int cap);
57431+extern int gr_chroot_is_capable(const int cap);
57432+extern int gr_chroot_is_capable_nolog(const int cap);
57433+#endif
bc901d79 57434+
15a11c5b
MT
57435+const char *captab_log[] = {
57436+ "CAP_CHOWN",
57437+ "CAP_DAC_OVERRIDE",
57438+ "CAP_DAC_READ_SEARCH",
57439+ "CAP_FOWNER",
57440+ "CAP_FSETID",
57441+ "CAP_KILL",
57442+ "CAP_SETGID",
57443+ "CAP_SETUID",
57444+ "CAP_SETPCAP",
57445+ "CAP_LINUX_IMMUTABLE",
57446+ "CAP_NET_BIND_SERVICE",
57447+ "CAP_NET_BROADCAST",
57448+ "CAP_NET_ADMIN",
57449+ "CAP_NET_RAW",
57450+ "CAP_IPC_LOCK",
57451+ "CAP_IPC_OWNER",
57452+ "CAP_SYS_MODULE",
57453+ "CAP_SYS_RAWIO",
57454+ "CAP_SYS_CHROOT",
57455+ "CAP_SYS_PTRACE",
57456+ "CAP_SYS_PACCT",
57457+ "CAP_SYS_ADMIN",
57458+ "CAP_SYS_BOOT",
57459+ "CAP_SYS_NICE",
57460+ "CAP_SYS_RESOURCE",
57461+ "CAP_SYS_TIME",
57462+ "CAP_SYS_TTY_CONFIG",
57463+ "CAP_MKNOD",
57464+ "CAP_LEASE",
57465+ "CAP_AUDIT_WRITE",
57466+ "CAP_AUDIT_CONTROL",
57467+ "CAP_SETFCAP",
57468+ "CAP_MAC_OVERRIDE",
57469+ "CAP_MAC_ADMIN",
6e9df6a3
MT
57470+ "CAP_SYSLOG",
57471+ "CAP_WAKE_ALARM"
15a11c5b 57472+};
bc901d79 57473+
15a11c5b 57474+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
bc901d79 57475+
15a11c5b
MT
57476+int gr_is_capable(const int cap)
57477+{
57478+#ifdef CONFIG_GRKERNSEC
57479+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
57480+ return 1;
57481+ return 0;
57482+#else
57483+ return 1;
bc901d79 57484+#endif
bc901d79 57485+}
15a11c5b
MT
57486+
57487+int gr_is_capable_nolog(const int cap)
57488+{
57489+#ifdef CONFIG_GRKERNSEC
57490+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
57491+ return 1;
57492+ return 0;
57493+#else
57494+ return 1;
bc901d79 57495+#endif
15a11c5b
MT
57496+}
57497+
57498+EXPORT_SYMBOL(gr_is_capable);
57499+EXPORT_SYMBOL(gr_is_capable_nolog);
fe2de317
MT
57500diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
57501new file mode 100644
57502index 0000000..d3ee748
57503--- /dev/null
57504+++ b/grsecurity/grsec_fifo.c
58c5fc13
MT
57505@@ -0,0 +1,24 @@
57506+#include <linux/kernel.h>
57507+#include <linux/sched.h>
57508+#include <linux/fs.h>
57509+#include <linux/file.h>
57510+#include <linux/grinternal.h>
57511+
57512+int
57513+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
57514+ const struct dentry *dir, const int flag, const int acc_mode)
57515+{
57516+#ifdef CONFIG_GRKERNSEC_FIFO
57517+ const struct cred *cred = current_cred();
57518+
57519+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
57520+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
57521+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
57522+ (cred->fsuid != dentry->d_inode->i_uid)) {
16454cff 57523+ if (!inode_permission(dentry->d_inode, acc_mode))
58c5fc13
MT
57524+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
57525+ return -EACCES;
57526+ }
57527+#endif
57528+ return 0;
57529+}
fe2de317
MT
57530diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
57531new file mode 100644
57532index 0000000..8ca18bf
57533--- /dev/null
57534+++ b/grsecurity/grsec_fork.c
6892158b 57535@@ -0,0 +1,23 @@
58c5fc13
MT
57536+#include <linux/kernel.h>
57537+#include <linux/sched.h>
57538+#include <linux/grsecurity.h>
57539+#include <linux/grinternal.h>
57540+#include <linux/errno.h>
57541+
57542+void
57543+gr_log_forkfail(const int retval)
57544+{
57545+#ifdef CONFIG_GRKERNSEC_FORKFAIL
6892158b
MT
57546+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
57547+ switch (retval) {
57548+ case -EAGAIN:
57549+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
57550+ break;
57551+ case -ENOMEM:
57552+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
57553+ break;
57554+ }
57555+ }
58c5fc13
MT
57556+#endif
57557+ return;
57558+}
fe2de317
MT
57559diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
57560new file mode 100644
57561index 0000000..356ef00
57562--- /dev/null
57563+++ b/grsecurity/grsec_init.c
66a7e928 57564@@ -0,0 +1,269 @@
58c5fc13
MT
57565+#include <linux/kernel.h>
57566+#include <linux/sched.h>
57567+#include <linux/mm.h>
58c5fc13
MT
57568+#include <linux/gracl.h>
57569+#include <linux/slab.h>
57570+#include <linux/vmalloc.h>
57571+#include <linux/percpu.h>
df50ba0c 57572+#include <linux/module.h>
58c5fc13 57573+
15a11c5b 57574+int grsec_enable_brute;
58c5fc13
MT
57575+int grsec_enable_link;
57576+int grsec_enable_dmesg;
57577+int grsec_enable_harden_ptrace;
57578+int grsec_enable_fifo;
58c5fc13
MT
57579+int grsec_enable_execlog;
57580+int grsec_enable_signal;
57581+int grsec_enable_forkfail;
ae4e228f 57582+int grsec_enable_audit_ptrace;
58c5fc13
MT
57583+int grsec_enable_time;
57584+int grsec_enable_audit_textrel;
57585+int grsec_enable_group;
57586+int grsec_audit_gid;
57587+int grsec_enable_chdir;
57588+int grsec_enable_mount;
ae4e228f 57589+int grsec_enable_rofs;
58c5fc13
MT
57590+int grsec_enable_chroot_findtask;
57591+int grsec_enable_chroot_mount;
57592+int grsec_enable_chroot_shmat;
57593+int grsec_enable_chroot_fchdir;
57594+int grsec_enable_chroot_double;
57595+int grsec_enable_chroot_pivot;
57596+int grsec_enable_chroot_chdir;
57597+int grsec_enable_chroot_chmod;
57598+int grsec_enable_chroot_mknod;
57599+int grsec_enable_chroot_nice;
57600+int grsec_enable_chroot_execlog;
57601+int grsec_enable_chroot_caps;
57602+int grsec_enable_chroot_sysctl;
57603+int grsec_enable_chroot_unix;
57604+int grsec_enable_tpe;
57605+int grsec_tpe_gid;
ae4e228f 57606+int grsec_enable_blackhole;
df50ba0c
MT
57607+#ifdef CONFIG_IPV6_MODULE
57608+EXPORT_SYMBOL(grsec_enable_blackhole);
57609+#endif
ae4e228f 57610+int grsec_lastack_retries;
58c5fc13 57611+int grsec_enable_tpe_all;
57199397 57612+int grsec_enable_tpe_invert;
58c5fc13
MT
57613+int grsec_enable_socket_all;
57614+int grsec_socket_all_gid;
57615+int grsec_enable_socket_client;
57616+int grsec_socket_client_gid;
57617+int grsec_enable_socket_server;
57618+int grsec_socket_server_gid;
57619+int grsec_resource_logging;
df50ba0c 57620+int grsec_disable_privio;
6892158b 57621+int grsec_enable_log_rwxmaps;
58c5fc13
MT
57622+int grsec_lock;
57623+
57624+DEFINE_SPINLOCK(grsec_alert_lock);
57625+unsigned long grsec_alert_wtime = 0;
57626+unsigned long grsec_alert_fyet = 0;
57627+
57628+DEFINE_SPINLOCK(grsec_audit_lock);
57629+
57630+DEFINE_RWLOCK(grsec_exec_file_lock);
57631+
57632+char *gr_shared_page[4];
57633+
57634+char *gr_alert_log_fmt;
57635+char *gr_audit_log_fmt;
57636+char *gr_alert_log_buf;
57637+char *gr_audit_log_buf;
57638+
57639+extern struct gr_arg *gr_usermode;
57640+extern unsigned char *gr_system_salt;
57641+extern unsigned char *gr_system_sum;
57642+
57643+void __init
57644+grsecurity_init(void)
57645+{
57646+ int j;
57647+ /* create the per-cpu shared pages */
57648+
57649+#ifdef CONFIG_X86
57650+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
57651+#endif
57652+
57653+ for (j = 0; j < 4; j++) {
57654+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
57655+ if (gr_shared_page[j] == NULL) {
57656+ panic("Unable to allocate grsecurity shared page");
57657+ return;
57658+ }
57659+ }
57660+
57661+ /* allocate log buffers */
57662+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
57663+ if (!gr_alert_log_fmt) {
57664+ panic("Unable to allocate grsecurity alert log format buffer");
57665+ return;
57666+ }
57667+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
57668+ if (!gr_audit_log_fmt) {
57669+ panic("Unable to allocate grsecurity audit log format buffer");
57670+ return;
57671+ }
57672+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
57673+ if (!gr_alert_log_buf) {
57674+ panic("Unable to allocate grsecurity alert log buffer");
57675+ return;
57676+ }
57677+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
57678+ if (!gr_audit_log_buf) {
57679+ panic("Unable to allocate grsecurity audit log buffer");
57680+ return;
57681+ }
57682+
57683+ /* allocate memory for authentication structure */
57684+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
57685+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
57686+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
57687+
57688+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
57689+ panic("Unable to allocate grsecurity authentication structure");
57690+ return;
57691+ }
57692+
df50ba0c
MT
57693+
57694+#ifdef CONFIG_GRKERNSEC_IO
57695+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
57696+ grsec_disable_privio = 1;
57697+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
57698+ grsec_disable_privio = 1;
57699+#else
57700+ grsec_disable_privio = 0;
57701+#endif
57702+#endif
57703+
57199397
MT
57704+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
57705+ /* for backward compatibility, tpe_invert always defaults to on if
57706+ enabled in the kernel
57707+ */
57708+ grsec_enable_tpe_invert = 1;
57709+#endif
57710+
58c5fc13
MT
57711+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
57712+#ifndef CONFIG_GRKERNSEC_SYSCTL
57713+ grsec_lock = 1;
57714+#endif
df50ba0c 57715+
58c5fc13
MT
57716+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
57717+ grsec_enable_audit_textrel = 1;
57718+#endif
6892158b
MT
57719+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
57720+ grsec_enable_log_rwxmaps = 1;
57721+#endif
58c5fc13
MT
57722+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
57723+ grsec_enable_group = 1;
57724+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
57725+#endif
57726+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
57727+ grsec_enable_chdir = 1;
57728+#endif
57729+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
57730+ grsec_enable_harden_ptrace = 1;
57731+#endif
57732+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57733+ grsec_enable_mount = 1;
57734+#endif
57735+#ifdef CONFIG_GRKERNSEC_LINK
57736+ grsec_enable_link = 1;
57737+#endif
15a11c5b
MT
57738+#ifdef CONFIG_GRKERNSEC_BRUTE
57739+ grsec_enable_brute = 1;
57740+#endif
58c5fc13
MT
57741+#ifdef CONFIG_GRKERNSEC_DMESG
57742+ grsec_enable_dmesg = 1;
57743+#endif
ae4e228f
MT
57744+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
57745+ grsec_enable_blackhole = 1;
57746+ grsec_lastack_retries = 4;
57747+#endif
58c5fc13
MT
57748+#ifdef CONFIG_GRKERNSEC_FIFO
57749+ grsec_enable_fifo = 1;
57750+#endif
58c5fc13
MT
57751+#ifdef CONFIG_GRKERNSEC_EXECLOG
57752+ grsec_enable_execlog = 1;
57753+#endif
57754+#ifdef CONFIG_GRKERNSEC_SIGNAL
57755+ grsec_enable_signal = 1;
57756+#endif
57757+#ifdef CONFIG_GRKERNSEC_FORKFAIL
57758+ grsec_enable_forkfail = 1;
57759+#endif
57760+#ifdef CONFIG_GRKERNSEC_TIME
57761+ grsec_enable_time = 1;
57762+#endif
57763+#ifdef CONFIG_GRKERNSEC_RESLOG
57764+ grsec_resource_logging = 1;
57765+#endif
57766+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
57767+ grsec_enable_chroot_findtask = 1;
57768+#endif
57769+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
57770+ grsec_enable_chroot_unix = 1;
57771+#endif
57772+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
57773+ grsec_enable_chroot_mount = 1;
57774+#endif
57775+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
57776+ grsec_enable_chroot_fchdir = 1;
57777+#endif
57778+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
57779+ grsec_enable_chroot_shmat = 1;
57780+#endif
ae4e228f
MT
57781+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
57782+ grsec_enable_audit_ptrace = 1;
57783+#endif
58c5fc13
MT
57784+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
57785+ grsec_enable_chroot_double = 1;
57786+#endif
57787+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
57788+ grsec_enable_chroot_pivot = 1;
57789+#endif
57790+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
57791+ grsec_enable_chroot_chdir = 1;
57792+#endif
57793+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
57794+ grsec_enable_chroot_chmod = 1;
57795+#endif
57796+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
57797+ grsec_enable_chroot_mknod = 1;
57798+#endif
57799+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
57800+ grsec_enable_chroot_nice = 1;
57801+#endif
57802+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
57803+ grsec_enable_chroot_execlog = 1;
57804+#endif
57805+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57806+ grsec_enable_chroot_caps = 1;
57807+#endif
57808+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
57809+ grsec_enable_chroot_sysctl = 1;
57810+#endif
57811+#ifdef CONFIG_GRKERNSEC_TPE
57812+ grsec_enable_tpe = 1;
57813+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
57814+#ifdef CONFIG_GRKERNSEC_TPE_ALL
57815+ grsec_enable_tpe_all = 1;
57816+#endif
57817+#endif
57818+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
57819+ grsec_enable_socket_all = 1;
57820+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
57821+#endif
57822+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
57823+ grsec_enable_socket_client = 1;
57824+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
57825+#endif
57826+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
57827+ grsec_enable_socket_server = 1;
57828+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
57829+#endif
57830+#endif
57831+
57832+ return;
57833+}
fe2de317
MT
57834diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
57835new file mode 100644
57836index 0000000..3efe141
57837--- /dev/null
57838+++ b/grsecurity/grsec_link.c
58c5fc13
MT
57839@@ -0,0 +1,43 @@
57840+#include <linux/kernel.h>
57841+#include <linux/sched.h>
57842+#include <linux/fs.h>
57843+#include <linux/file.h>
57844+#include <linux/grinternal.h>
57845+
57846+int
57847+gr_handle_follow_link(const struct inode *parent,
57848+ const struct inode *inode,
57849+ const struct dentry *dentry, const struct vfsmount *mnt)
57850+{
57851+#ifdef CONFIG_GRKERNSEC_LINK
57852+ const struct cred *cred = current_cred();
57853+
57854+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
57855+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
57856+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
57857+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
57858+ return -EACCES;
57859+ }
57860+#endif
57861+ return 0;
57862+}
57863+
57864+int
57865+gr_handle_hardlink(const struct dentry *dentry,
57866+ const struct vfsmount *mnt,
57867+ struct inode *inode, const int mode, const char *to)
57868+{
57869+#ifdef CONFIG_GRKERNSEC_LINK
57870+ const struct cred *cred = current_cred();
57871+
57872+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
57873+ (!S_ISREG(mode) || (mode & S_ISUID) ||
57874+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
16454cff 57875+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
58c5fc13
MT
57876+ !capable(CAP_FOWNER) && cred->uid) {
57877+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
57878+ return -EPERM;
57879+ }
57880+#endif
57881+ return 0;
57882+}
fe2de317
MT
57883diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
57884new file mode 100644
57885index 0000000..a45d2e9
57886--- /dev/null
57887+++ b/grsecurity/grsec_log.c
6e9df6a3 57888@@ -0,0 +1,322 @@
58c5fc13
MT
57889+#include <linux/kernel.h>
57890+#include <linux/sched.h>
57891+#include <linux/file.h>
57892+#include <linux/tty.h>
57893+#include <linux/fs.h>
57894+#include <linux/grinternal.h>
57895+
df50ba0c
MT
57896+#ifdef CONFIG_TREE_PREEMPT_RCU
57897+#define DISABLE_PREEMPT() preempt_disable()
57898+#define ENABLE_PREEMPT() preempt_enable()
57899+#else
57900+#define DISABLE_PREEMPT()
57901+#define ENABLE_PREEMPT()
57902+#endif
57903+
58c5fc13 57904+#define BEGIN_LOCKS(x) \
df50ba0c 57905+ DISABLE_PREEMPT(); \
ae4e228f 57906+ rcu_read_lock(); \
58c5fc13
MT
57907+ read_lock(&tasklist_lock); \
57908+ read_lock(&grsec_exec_file_lock); \
57909+ if (x != GR_DO_AUDIT) \
57910+ spin_lock(&grsec_alert_lock); \
57911+ else \
57912+ spin_lock(&grsec_audit_lock)
57913+
57914+#define END_LOCKS(x) \
57915+ if (x != GR_DO_AUDIT) \
57916+ spin_unlock(&grsec_alert_lock); \
57917+ else \
57918+ spin_unlock(&grsec_audit_lock); \
57919+ read_unlock(&grsec_exec_file_lock); \
57920+ read_unlock(&tasklist_lock); \
ae4e228f 57921+ rcu_read_unlock(); \
df50ba0c 57922+ ENABLE_PREEMPT(); \
58c5fc13
MT
57923+ if (x == GR_DONT_AUDIT) \
57924+ gr_handle_alertkill(current)
57925+
57926+enum {
57927+ FLOODING,
57928+ NO_FLOODING
57929+};
57930+
57931+extern char *gr_alert_log_fmt;
57932+extern char *gr_audit_log_fmt;
57933+extern char *gr_alert_log_buf;
57934+extern char *gr_audit_log_buf;
57935+
57936+static int gr_log_start(int audit)
57937+{
57938+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
57939+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
57940+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
15a11c5b
MT
57941+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
57942+ unsigned long curr_secs = get_seconds();
58c5fc13
MT
57943+
57944+ if (audit == GR_DO_AUDIT)
57945+ goto set_fmt;
57946+
15a11c5b
MT
57947+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
57948+ grsec_alert_wtime = curr_secs;
58c5fc13 57949+ grsec_alert_fyet = 0;
15a11c5b
MT
57950+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
57951+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
58c5fc13
MT
57952+ grsec_alert_fyet++;
57953+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
15a11c5b 57954+ grsec_alert_wtime = curr_secs;
58c5fc13
MT
57955+ grsec_alert_fyet++;
57956+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
57957+ return FLOODING;
15a11c5b
MT
57958+ }
57959+ else return FLOODING;
58c5fc13
MT
57960+
57961+set_fmt:
15a11c5b 57962+#endif
58c5fc13
MT
57963+ memset(buf, 0, PAGE_SIZE);
57964+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
ae4e228f
MT
57965+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
57966+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
58c5fc13 57967+ } else if (current->signal->curr_ip) {
ae4e228f
MT
57968+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
57969+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
58c5fc13
MT
57970+ } else if (gr_acl_is_enabled()) {
57971+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
57972+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
57973+ } else {
57974+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
57975+ strcpy(buf, fmt);
57976+ }
57977+
57978+ return NO_FLOODING;
57979+}
57980+
57981+static void gr_log_middle(int audit, const char *msg, va_list ap)
57982+ __attribute__ ((format (printf, 2, 0)));
57983+
57984+static void gr_log_middle(int audit, const char *msg, va_list ap)
57985+{
57986+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57987+ unsigned int len = strlen(buf);
57988+
57989+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
57990+
57991+ return;
57992+}
57993+
57994+static void gr_log_middle_varargs(int audit, const char *msg, ...)
57995+ __attribute__ ((format (printf, 2, 3)));
57996+
57997+static void gr_log_middle_varargs(int audit, const char *msg, ...)
57998+{
57999+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
58000+ unsigned int len = strlen(buf);
58001+ va_list ap;
58002+
58003+ va_start(ap, msg);
58004+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
58005+ va_end(ap);
58006+
58007+ return;
58008+}
58009+
6e9df6a3 58010+static void gr_log_end(int audit, int append_default)
58c5fc13
MT
58011+{
58012+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
58c5fc13 58013+
6e9df6a3
MT
58014+ if (append_default) {
58015+ unsigned int len = strlen(buf);
58016+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
58017+ }
58018+
58c5fc13
MT
58019+ printk("%s\n", buf);
58020+
58021+ return;
58022+}
58023+
58024+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
58025+{
58026+ int logtype;
58027+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
66a7e928
MT
58028+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
58029+ void *voidptr = NULL;
58030+ int num1 = 0, num2 = 0;
58031+ unsigned long ulong1 = 0, ulong2 = 0;
58032+ struct dentry *dentry = NULL;
58033+ struct vfsmount *mnt = NULL;
58034+ struct file *file = NULL;
58035+ struct task_struct *task = NULL;
58c5fc13
MT
58036+ const struct cred *cred, *pcred;
58037+ va_list ap;
58038+
58039+ BEGIN_LOCKS(audit);
58040+ logtype = gr_log_start(audit);
58041+ if (logtype == FLOODING) {
58042+ END_LOCKS(audit);
58043+ return;
58044+ }
58045+ va_start(ap, argtypes);
58046+ switch (argtypes) {
58047+ case GR_TTYSNIFF:
58048+ task = va_arg(ap, struct task_struct *);
6892158b 58049+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
58c5fc13
MT
58050+ break;
58051+ case GR_SYSCTL_HIDDEN:
58052+ str1 = va_arg(ap, char *);
58053+ gr_log_middle_varargs(audit, msg, result, str1);
58054+ break;
58055+ case GR_RBAC:
58056+ dentry = va_arg(ap, struct dentry *);
58057+ mnt = va_arg(ap, struct vfsmount *);
58058+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
58059+ break;
58060+ case GR_RBAC_STR:
58061+ dentry = va_arg(ap, struct dentry *);
58062+ mnt = va_arg(ap, struct vfsmount *);
58063+ str1 = va_arg(ap, char *);
58064+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
58065+ break;
58066+ case GR_STR_RBAC:
58067+ str1 = va_arg(ap, char *);
58068+ dentry = va_arg(ap, struct dentry *);
58069+ mnt = va_arg(ap, struct vfsmount *);
58070+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
58071+ break;
58072+ case GR_RBAC_MODE2:
58073+ dentry = va_arg(ap, struct dentry *);
58074+ mnt = va_arg(ap, struct vfsmount *);
58075+ str1 = va_arg(ap, char *);
58076+ str2 = va_arg(ap, char *);
58077+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
58078+ break;
58079+ case GR_RBAC_MODE3:
58080+ dentry = va_arg(ap, struct dentry *);
58081+ mnt = va_arg(ap, struct vfsmount *);
58082+ str1 = va_arg(ap, char *);
58083+ str2 = va_arg(ap, char *);
58084+ str3 = va_arg(ap, char *);
58085+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
58086+ break;
58087+ case GR_FILENAME:
58088+ dentry = va_arg(ap, struct dentry *);
58089+ mnt = va_arg(ap, struct vfsmount *);
58090+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
58091+ break;
58092+ case GR_STR_FILENAME:
58093+ str1 = va_arg(ap, char *);
58094+ dentry = va_arg(ap, struct dentry *);
58095+ mnt = va_arg(ap, struct vfsmount *);
58096+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
58097+ break;
58098+ case GR_FILENAME_STR:
58099+ dentry = va_arg(ap, struct dentry *);
58100+ mnt = va_arg(ap, struct vfsmount *);
58101+ str1 = va_arg(ap, char *);
58102+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
58103+ break;
58104+ case GR_FILENAME_TWO_INT:
58105+ dentry = va_arg(ap, struct dentry *);
58106+ mnt = va_arg(ap, struct vfsmount *);
58107+ num1 = va_arg(ap, int);
58108+ num2 = va_arg(ap, int);
58109+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
58110+ break;
58111+ case GR_FILENAME_TWO_INT_STR:
58112+ dentry = va_arg(ap, struct dentry *);
58113+ mnt = va_arg(ap, struct vfsmount *);
58114+ num1 = va_arg(ap, int);
58115+ num2 = va_arg(ap, int);
58116+ str1 = va_arg(ap, char *);
58117+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
58118+ break;
58119+ case GR_TEXTREL:
58120+ file = va_arg(ap, struct file *);
58121+ ulong1 = va_arg(ap, unsigned long);
58122+ ulong2 = va_arg(ap, unsigned long);
58123+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
58124+ break;
58125+ case GR_PTRACE:
58126+ task = va_arg(ap, struct task_struct *);
58127+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
58128+ break;
58129+ case GR_RESOURCE:
58130+ task = va_arg(ap, struct task_struct *);
58131+ cred = __task_cred(task);
6892158b 58132+ pcred = __task_cred(task->real_parent);
58c5fc13
MT
58133+ ulong1 = va_arg(ap, unsigned long);
58134+ str1 = va_arg(ap, char *);
58135+ ulong2 = va_arg(ap, unsigned long);
6892158b 58136+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58c5fc13
MT
58137+ break;
58138+ case GR_CAP:
58139+ task = va_arg(ap, struct task_struct *);
58140+ cred = __task_cred(task);
6892158b 58141+ pcred = __task_cred(task->real_parent);
58c5fc13 58142+ str1 = va_arg(ap, char *);
6892158b 58143+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58c5fc13
MT
58144+ break;
58145+ case GR_SIG:
58146+ str1 = va_arg(ap, char *);
58147+ voidptr = va_arg(ap, void *);
58148+ gr_log_middle_varargs(audit, msg, str1, voidptr);
58149+ break;
58150+ case GR_SIG2:
58151+ task = va_arg(ap, struct task_struct *);
58152+ cred = __task_cred(task);
6892158b 58153+ pcred = __task_cred(task->real_parent);
58c5fc13 58154+ num1 = va_arg(ap, int);
6892158b 58155+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58c5fc13
MT
58156+ break;
58157+ case GR_CRASH1:
58158+ task = va_arg(ap, struct task_struct *);
58159+ cred = __task_cred(task);
6892158b 58160+ pcred = __task_cred(task->real_parent);
58c5fc13 58161+ ulong1 = va_arg(ap, unsigned long);
6892158b 58162+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
58c5fc13
MT
58163+ break;
58164+ case GR_CRASH2:
58165+ task = va_arg(ap, struct task_struct *);
58166+ cred = __task_cred(task);
6892158b 58167+ pcred = __task_cred(task->real_parent);
58c5fc13 58168+ ulong1 = va_arg(ap, unsigned long);
6892158b
MT
58169+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
58170+ break;
58171+ case GR_RWXMAP:
58172+ file = va_arg(ap, struct file *);
58173+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
58c5fc13
MT
58174+ break;
58175+ case GR_PSACCT:
58176+ {
58177+ unsigned int wday, cday;
58178+ __u8 whr, chr;
58179+ __u8 wmin, cmin;
58180+ __u8 wsec, csec;
58181+ char cur_tty[64] = { 0 };
58182+ char parent_tty[64] = { 0 };
58183+
58184+ task = va_arg(ap, struct task_struct *);
58185+ wday = va_arg(ap, unsigned int);
58186+ cday = va_arg(ap, unsigned int);
58187+ whr = va_arg(ap, int);
58188+ chr = va_arg(ap, int);
58189+ wmin = va_arg(ap, int);
58190+ cmin = va_arg(ap, int);
58191+ wsec = va_arg(ap, int);
58192+ csec = va_arg(ap, int);
58193+ ulong1 = va_arg(ap, unsigned long);
58194+ cred = __task_cred(task);
6892158b 58195+ pcred = __task_cred(task->real_parent);
58c5fc13 58196+
6892158b 58197+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58c5fc13
MT
58198+ }
58199+ break;
58200+ default:
58201+ gr_log_middle(audit, msg, ap);
58202+ }
58203+ va_end(ap);
6e9df6a3
MT
58204+ // these don't need DEFAULTSECARGS printed on the end
58205+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
58206+ gr_log_end(audit, 0);
58207+ else
58208+ gr_log_end(audit, 1);
58c5fc13
MT
58209+ END_LOCKS(audit);
58210+}
fe2de317
MT
58211diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
58212new file mode 100644
58213index 0000000..6c0416b
58214--- /dev/null
58215+++ b/grsecurity/grsec_mem.c
71d190be 58216@@ -0,0 +1,33 @@
58c5fc13
MT
58217+#include <linux/kernel.h>
58218+#include <linux/sched.h>
58219+#include <linux/mm.h>
58220+#include <linux/mman.h>
58221+#include <linux/grinternal.h>
58222+
58223+void
58224+gr_handle_ioperm(void)
58225+{
58226+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
58227+ return;
58228+}
58229+
58230+void
58231+gr_handle_iopl(void)
58232+{
58233+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
58234+ return;
58235+}
58236+
58237+void
71d190be 58238+gr_handle_mem_readwrite(u64 from, u64 to)
58c5fc13 58239+{
71d190be 58240+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
58c5fc13
MT
58241+ return;
58242+}
58243+
58244+void
ae4e228f
MT
58245+gr_handle_vm86(void)
58246+{
58247+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
58248+ return;
58249+}
fe2de317
MT
58250diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
58251new file mode 100644
58252index 0000000..2131422
58253--- /dev/null
58254+++ b/grsecurity/grsec_mount.c
ae4e228f 58255@@ -0,0 +1,62 @@
58c5fc13
MT
58256+#include <linux/kernel.h>
58257+#include <linux/sched.h>
ae4e228f 58258+#include <linux/mount.h>
58c5fc13
MT
58259+#include <linux/grsecurity.h>
58260+#include <linux/grinternal.h>
58261+
58262+void
58263+gr_log_remount(const char *devname, const int retval)
58264+{
58265+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58266+ if (grsec_enable_mount && (retval >= 0))
58267+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
58268+#endif
58269+ return;
58270+}
58271+
58272+void
58273+gr_log_unmount(const char *devname, const int retval)
58274+{
58275+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58276+ if (grsec_enable_mount && (retval >= 0))
58277+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
58278+#endif
58279+ return;
58280+}
58281+
58282+void
58283+gr_log_mount(const char *from, const char *to, const int retval)
58284+{
58285+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58286+ if (grsec_enable_mount && (retval >= 0))
15a11c5b 58287+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
58c5fc13
MT
58288+#endif
58289+ return;
58290+}
ae4e228f
MT
58291+
58292+int
58293+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
58294+{
58295+#ifdef CONFIG_GRKERNSEC_ROFS
58296+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
58297+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
58298+ return -EPERM;
58299+ } else
58300+ return 0;
58301+#endif
58302+ return 0;
58303+}
58304+
58305+int
58306+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
58307+{
58308+#ifdef CONFIG_GRKERNSEC_ROFS
58309+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
58310+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
58311+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
58312+ return -EPERM;
58313+ } else
58314+ return 0;
58315+#endif
58316+ return 0;
58317+}
fe2de317
MT
58318diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
58319new file mode 100644
58320index 0000000..a3b12a0
58321--- /dev/null
58322+++ b/grsecurity/grsec_pax.c
6892158b
MT
58323@@ -0,0 +1,36 @@
58324+#include <linux/kernel.h>
58325+#include <linux/sched.h>
58326+#include <linux/mm.h>
58327+#include <linux/file.h>
58328+#include <linux/grinternal.h>
58329+#include <linux/grsecurity.h>
58330+
58331+void
58332+gr_log_textrel(struct vm_area_struct * vma)
58333+{
58334+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
58335+ if (grsec_enable_audit_textrel)
58336+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
58337+#endif
58338+ return;
58339+}
58340+
58341+void
58342+gr_log_rwxmmap(struct file *file)
58343+{
58344+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58345+ if (grsec_enable_log_rwxmaps)
58346+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
58347+#endif
58348+ return;
58349+}
58350+
58351+void
58352+gr_log_rwxmprotect(struct file *file)
58353+{
58354+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58355+ if (grsec_enable_log_rwxmaps)
58356+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
58357+#endif
58358+ return;
58359+}
fe2de317
MT
58360diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
58361new file mode 100644
58362index 0000000..472c1d6
58363--- /dev/null
58364+++ b/grsecurity/grsec_ptrace.c
ae4e228f
MT
58365@@ -0,0 +1,14 @@
58366+#include <linux/kernel.h>
58367+#include <linux/sched.h>
58368+#include <linux/grinternal.h>
58369+#include <linux/grsecurity.h>
58370+
58371+void
58372+gr_audit_ptrace(struct task_struct *task)
58373+{
58374+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
58375+ if (grsec_enable_audit_ptrace)
58376+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
58377+#endif
58378+ return;
58379+}
fe2de317
MT
58380diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
58381new file mode 100644
58382index 0000000..cf090b3
58383--- /dev/null
58384+++ b/grsecurity/grsec_sig.c
15a11c5b 58385@@ -0,0 +1,206 @@
58c5fc13
MT
58386+#include <linux/kernel.h>
58387+#include <linux/sched.h>
58388+#include <linux/delay.h>
58389+#include <linux/grsecurity.h>
58390+#include <linux/grinternal.h>
71d190be 58391+#include <linux/hardirq.h>
58c5fc13
MT
58392+
58393+char *signames[] = {
58394+ [SIGSEGV] = "Segmentation fault",
58395+ [SIGILL] = "Illegal instruction",
58396+ [SIGABRT] = "Abort",
58397+ [SIGBUS] = "Invalid alignment/Bus error"
58398+};
58399+
58400+void
58401+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
58402+{
58403+#ifdef CONFIG_GRKERNSEC_SIGNAL
58404+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
58405+ (sig == SIGABRT) || (sig == SIGBUS))) {
58406+ if (t->pid == current->pid) {
58407+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
58408+ } else {
58409+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
58410+ }
58411+ }
58412+#endif
58413+ return;
58414+}
58415+
58416+int
58417+gr_handle_signal(const struct task_struct *p, const int sig)
58418+{
58419+#ifdef CONFIG_GRKERNSEC
58420+ if (current->pid > 1 && gr_check_protected_task(p)) {
58421+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
58422+ return -EPERM;
58423+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
58424+ return -EPERM;
58425+ }
58426+#endif
58427+ return 0;
58428+}
58429+
71d190be
MT
58430+#ifdef CONFIG_GRKERNSEC
58431+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
58432+
58433+int gr_fake_force_sig(int sig, struct task_struct *t)
58434+{
58435+ unsigned long int flags;
58436+ int ret, blocked, ignored;
58437+ struct k_sigaction *action;
58438+
58439+ spin_lock_irqsave(&t->sighand->siglock, flags);
58440+ action = &t->sighand->action[sig-1];
58441+ ignored = action->sa.sa_handler == SIG_IGN;
58442+ blocked = sigismember(&t->blocked, sig);
58443+ if (blocked || ignored) {
58444+ action->sa.sa_handler = SIG_DFL;
58445+ if (blocked) {
58446+ sigdelset(&t->blocked, sig);
58447+ recalc_sigpending_and_wake(t);
58448+ }
58449+ }
58450+ if (action->sa.sa_handler == SIG_DFL)
58451+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
58452+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
58453+
58454+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
58455+
58456+ return ret;
58457+}
58458+#endif
58459+
58460+#ifdef CONFIG_GRKERNSEC_BRUTE
58461+#define GR_USER_BAN_TIME (15 * 60)
58462+
58463+static int __get_dumpable(unsigned long mm_flags)
58464+{
58465+ int ret;
58466+
58467+ ret = mm_flags & MMF_DUMPABLE_MASK;
58468+ return (ret >= 2) ? 2 : ret;
58469+}
58470+#endif
58471+
58472+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
58c5fc13
MT
58473+{
58474+#ifdef CONFIG_GRKERNSEC_BRUTE
71d190be
MT
58475+ uid_t uid = 0;
58476+
15a11c5b
MT
58477+ if (!grsec_enable_brute)
58478+ return;
58479+
71d190be 58480+ rcu_read_lock();
58c5fc13
MT
58481+ read_lock(&tasklist_lock);
58482+ read_lock(&grsec_exec_file_lock);
6892158b
MT
58483+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
58484+ p->real_parent->brute = 1;
71d190be
MT
58485+ else {
58486+ const struct cred *cred = __task_cred(p), *cred2;
58487+ struct task_struct *tsk, *tsk2;
58488+
58489+ if (!__get_dumpable(mm_flags) && cred->uid) {
58490+ struct user_struct *user;
58491+
58492+ uid = cred->uid;
58493+
58494+ /* this is put upon execution past expiration */
58495+ user = find_user(uid);
58496+ if (user == NULL)
58497+ goto unlock;
58498+ user->banned = 1;
58499+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
58500+ if (user->ban_expires == ~0UL)
58501+ user->ban_expires--;
58502+
58503+ do_each_thread(tsk2, tsk) {
58504+ cred2 = __task_cred(tsk);
58505+ if (tsk != p && cred2->uid == uid)
58506+ gr_fake_force_sig(SIGKILL, tsk);
58507+ } while_each_thread(tsk2, tsk);
58508+ }
58509+ }
58510+unlock:
58c5fc13
MT
58511+ read_unlock(&grsec_exec_file_lock);
58512+ read_unlock(&tasklist_lock);
71d190be
MT
58513+ rcu_read_unlock();
58514+
58515+ if (uid)
58516+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
58517+
58c5fc13
MT
58518+#endif
58519+ return;
58520+}
58521+
58522+void gr_handle_brute_check(void)
58523+{
58524+#ifdef CONFIG_GRKERNSEC_BRUTE
58525+ if (current->brute)
58526+ msleep(30 * 1000);
58527+#endif
58528+ return;
58529+}
58530+
71d190be
MT
58531+void gr_handle_kernel_exploit(void)
58532+{
58533+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
58534+ const struct cred *cred;
58535+ struct task_struct *tsk, *tsk2;
58536+ struct user_struct *user;
58537+ uid_t uid;
58538+
58539+ if (in_irq() || in_serving_softirq() || in_nmi())
58540+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
58541+
58542+ uid = current_uid();
58543+
58544+ if (uid == 0)
58545+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
58546+ else {
58547+ /* kill all the processes of this user, hold a reference
58548+ to their creds struct, and prevent them from creating
58549+ another process until system reset
58550+ */
58551+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
58552+ /* we intentionally leak this ref */
58553+ user = get_uid(current->cred->user);
58554+ if (user) {
58555+ user->banned = 1;
58556+ user->ban_expires = ~0UL;
58557+ }
58558+
58559+ read_lock(&tasklist_lock);
58560+ do_each_thread(tsk2, tsk) {
58561+ cred = __task_cred(tsk);
58562+ if (cred->uid == uid)
58563+ gr_fake_force_sig(SIGKILL, tsk);
58564+ } while_each_thread(tsk2, tsk);
58565+ read_unlock(&tasklist_lock);
58566+ }
58567+#endif
58568+}
58569+
66a7e928 58570+int __gr_process_user_ban(struct user_struct *user)
71d190be
MT
58571+{
58572+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
66a7e928 58573+ if (unlikely(user->banned)) {
71d190be
MT
58574+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
58575+ user->banned = 0;
58576+ user->ban_expires = 0;
58577+ free_uid(user);
58578+ } else
58579+ return -EPERM;
58580+ }
58581+#endif
58582+ return 0;
66a7e928 58583+}
71d190be 58584+
66a7e928
MT
58585+int gr_process_user_ban(void)
58586+{
58587+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
58588+ return __gr_process_user_ban(current->cred->user);
58589+#endif
58590+ return 0;
71d190be 58591+}
fe2de317
MT
58592diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
58593new file mode 100644
58594index 0000000..4030d57
58595--- /dev/null
58596+++ b/grsecurity/grsec_sock.c
66a7e928 58597@@ -0,0 +1,244 @@
58c5fc13
MT
58598+#include <linux/kernel.h>
58599+#include <linux/module.h>
58600+#include <linux/sched.h>
58601+#include <linux/file.h>
58602+#include <linux/net.h>
58603+#include <linux/in.h>
58604+#include <linux/ip.h>
58605+#include <net/sock.h>
58606+#include <net/inet_sock.h>
58607+#include <linux/grsecurity.h>
58608+#include <linux/grinternal.h>
58609+#include <linux/gracl.h>
58610+
58c5fc13
MT
58611+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
58612+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
58613+
58614+EXPORT_SYMBOL(gr_search_udp_recvmsg);
58615+EXPORT_SYMBOL(gr_search_udp_sendmsg);
58616+
58617+#ifdef CONFIG_UNIX_MODULE
58618+EXPORT_SYMBOL(gr_acl_handle_unix);
58619+EXPORT_SYMBOL(gr_acl_handle_mknod);
58620+EXPORT_SYMBOL(gr_handle_chroot_unix);
58621+EXPORT_SYMBOL(gr_handle_create);
58622+#endif
58623+
58624+#ifdef CONFIG_GRKERNSEC
58625+#define gr_conn_table_size 32749
58626+struct conn_table_entry {
58627+ struct conn_table_entry *next;
58628+ struct signal_struct *sig;
58629+};
58630+
58631+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
58632+DEFINE_SPINLOCK(gr_conn_table_lock);
58633+
58634+extern const char * gr_socktype_to_name(unsigned char type);
58635+extern const char * gr_proto_to_name(unsigned char proto);
bc901d79 58636+extern const char * gr_sockfamily_to_name(unsigned char family);
58c5fc13
MT
58637+
58638+static __inline__ int
58639+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
58640+{
58641+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
58642+}
58643+
58644+static __inline__ int
58645+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
58646+ __u16 sport, __u16 dport)
58647+{
58648+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
58649+ sig->gr_sport == sport && sig->gr_dport == dport))
58650+ return 1;
58651+ else
58652+ return 0;
58653+}
58654+
58655+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
58656+{
58657+ struct conn_table_entry **match;
58658+ unsigned int index;
58659+
58660+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
58661+ sig->gr_sport, sig->gr_dport,
58662+ gr_conn_table_size);
58663+
58664+ newent->sig = sig;
58665+
58666+ match = &gr_conn_table[index];
58667+ newent->next = *match;
58668+ *match = newent;
58669+
58670+ return;
58671+}
58672+
58673+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
58674+{
58675+ struct conn_table_entry *match, *last = NULL;
58676+ unsigned int index;
58677+
58678+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
58679+ sig->gr_sport, sig->gr_dport,
58680+ gr_conn_table_size);
58681+
58682+ match = gr_conn_table[index];
58683+ while (match && !conn_match(match->sig,
58684+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
58685+ sig->gr_dport)) {
58686+ last = match;
58687+ match = match->next;
58688+ }
58689+
58690+ if (match) {
58691+ if (last)
58692+ last->next = match->next;
58693+ else
58694+ gr_conn_table[index] = NULL;
58695+ kfree(match);
58696+ }
58697+
58698+ return;
58699+}
58700+
58701+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
58702+ __u16 sport, __u16 dport)
58703+{
58704+ struct conn_table_entry *match;
58705+ unsigned int index;
58706+
58707+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
58708+
58709+ match = gr_conn_table[index];
58710+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
58711+ match = match->next;
58712+
58713+ if (match)
58714+ return match->sig;
58715+ else
58716+ return NULL;
58717+}
58718+
58719+#endif
58720+
58721+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
58722+{
58723+#ifdef CONFIG_GRKERNSEC
58724+ struct signal_struct *sig = task->signal;
58725+ struct conn_table_entry *newent;
58726+
58727+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
58728+ if (newent == NULL)
58729+ return;
58730+ /* no bh lock needed since we are called with bh disabled */
58731+ spin_lock(&gr_conn_table_lock);
58732+ gr_del_task_from_ip_table_nolock(sig);
ae4e228f
MT
58733+ sig->gr_saddr = inet->inet_rcv_saddr;
58734+ sig->gr_daddr = inet->inet_daddr;
58735+ sig->gr_sport = inet->inet_sport;
58736+ sig->gr_dport = inet->inet_dport;
58c5fc13
MT
58737+ gr_add_to_task_ip_table_nolock(sig, newent);
58738+ spin_unlock(&gr_conn_table_lock);
58739+#endif
58740+ return;
58741+}
58742+
58743+void gr_del_task_from_ip_table(struct task_struct *task)
58744+{
58745+#ifdef CONFIG_GRKERNSEC
58746+ spin_lock_bh(&gr_conn_table_lock);
58747+ gr_del_task_from_ip_table_nolock(task->signal);
58748+ spin_unlock_bh(&gr_conn_table_lock);
58749+#endif
58750+ return;
58751+}
58752+
58753+void
58754+gr_attach_curr_ip(const struct sock *sk)
58755+{
58756+#ifdef CONFIG_GRKERNSEC
58757+ struct signal_struct *p, *set;
58758+ const struct inet_sock *inet = inet_sk(sk);
58759+
58760+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
58761+ return;
58762+
58763+ set = current->signal;
58764+
58765+ spin_lock_bh(&gr_conn_table_lock);
ae4e228f
MT
58766+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
58767+ inet->inet_dport, inet->inet_sport);
58c5fc13
MT
58768+ if (unlikely(p != NULL)) {
58769+ set->curr_ip = p->curr_ip;
58770+ set->used_accept = 1;
58771+ gr_del_task_from_ip_table_nolock(p);
58772+ spin_unlock_bh(&gr_conn_table_lock);
58773+ return;
58774+ }
58775+ spin_unlock_bh(&gr_conn_table_lock);
58776+
ae4e228f 58777+ set->curr_ip = inet->inet_daddr;
58c5fc13
MT
58778+ set->used_accept = 1;
58779+#endif
58780+ return;
58781+}
58782+
58783+int
58784+gr_handle_sock_all(const int family, const int type, const int protocol)
58785+{
58786+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
58787+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
bc901d79
MT
58788+ (family != AF_UNIX)) {
58789+ if (family == AF_INET)
58790+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
58791+ else
58792+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
58c5fc13
MT
58793+ return -EACCES;
58794+ }
58795+#endif
58796+ return 0;
58797+}
58798+
58799+int
58800+gr_handle_sock_server(const struct sockaddr *sck)
58801+{
58802+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58803+ if (grsec_enable_socket_server &&
58804+ in_group_p(grsec_socket_server_gid) &&
58805+ sck && (sck->sa_family != AF_UNIX) &&
58806+ (sck->sa_family != AF_LOCAL)) {
58807+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
58808+ return -EACCES;
58809+ }
58810+#endif
58811+ return 0;
58812+}
58813+
58814+int
58815+gr_handle_sock_server_other(const struct sock *sck)
58816+{
58817+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58818+ if (grsec_enable_socket_server &&
58819+ in_group_p(grsec_socket_server_gid) &&
58820+ sck && (sck->sk_family != AF_UNIX) &&
58821+ (sck->sk_family != AF_LOCAL)) {
58822+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
58823+ return -EACCES;
58824+ }
58825+#endif
58826+ return 0;
58827+}
58828+
58829+int
58830+gr_handle_sock_client(const struct sockaddr *sck)
58831+{
58832+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
58833+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
58834+ sck && (sck->sa_family != AF_UNIX) &&
58835+ (sck->sa_family != AF_LOCAL)) {
58836+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
58837+ return -EACCES;
58838+ }
58839+#endif
58840+ return 0;
58841+}
fe2de317
MT
58842diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
58843new file mode 100644
58844index 0000000..174668f
58845--- /dev/null
58846+++ b/grsecurity/grsec_sysctl.c
6892158b 58847@@ -0,0 +1,433 @@
58c5fc13
MT
58848+#include <linux/kernel.h>
58849+#include <linux/sched.h>
58850+#include <linux/sysctl.h>
58851+#include <linux/grsecurity.h>
58852+#include <linux/grinternal.h>
58853+
58854+int
58855+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
58856+{
58857+#ifdef CONFIG_GRKERNSEC_SYSCTL
58858+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
58859+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
58860+ return -EACCES;
58861+ }
58862+#endif
58863+ return 0;
58864+}
58865+
ae4e228f
MT
58866+#ifdef CONFIG_GRKERNSEC_ROFS
58867+static int __maybe_unused one = 1;
58868+#endif
58869+
58870+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
57199397 58871+struct ctl_table grsecurity_table[] = {
58c5fc13 58872+#ifdef CONFIG_GRKERNSEC_SYSCTL
df50ba0c
MT
58873+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
58874+#ifdef CONFIG_GRKERNSEC_IO
58875+ {
58876+ .procname = "disable_priv_io",
58877+ .data = &grsec_disable_privio,
58878+ .maxlen = sizeof(int),
58879+ .mode = 0600,
58880+ .proc_handler = &proc_dointvec,
58881+ },
58882+#endif
58883+#endif
58c5fc13
MT
58884+#ifdef CONFIG_GRKERNSEC_LINK
58885+ {
58c5fc13
MT
58886+ .procname = "linking_restrictions",
58887+ .data = &grsec_enable_link,
58888+ .maxlen = sizeof(int),
58889+ .mode = 0600,
58890+ .proc_handler = &proc_dointvec,
58891+ },
58892+#endif
15a11c5b 58893+#ifdef CONFIG_GRKERNSEC_BRUTE
58c5fc13 58894+ {
15a11c5b
MT
58895+ .procname = "deter_bruteforce",
58896+ .data = &grsec_enable_brute,
58c5fc13
MT
58897+ .maxlen = sizeof(int),
58898+ .mode = 0600,
58899+ .proc_handler = &proc_dointvec,
58900+ },
58901+#endif
15a11c5b 58902+#ifdef CONFIG_GRKERNSEC_FIFO
58c5fc13 58903+ {
15a11c5b
MT
58904+ .procname = "fifo_restrictions",
58905+ .data = &grsec_enable_fifo,
58c5fc13
MT
58906+ .maxlen = sizeof(int),
58907+ .mode = 0600,
58908+ .proc_handler = &proc_dointvec,
58909+ },
58910+#endif
ae4e228f
MT
58911+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
58912+ {
58913+ .procname = "ip_blackhole",
58914+ .data = &grsec_enable_blackhole,
58915+ .maxlen = sizeof(int),
58916+ .mode = 0600,
58917+ .proc_handler = &proc_dointvec,
58918+ },
58919+ {
58920+ .procname = "lastack_retries",
58921+ .data = &grsec_lastack_retries,
58922+ .maxlen = sizeof(int),
58923+ .mode = 0600,
58924+ .proc_handler = &proc_dointvec,
58925+ },
58926+#endif
58c5fc13
MT
58927+#ifdef CONFIG_GRKERNSEC_EXECLOG
58928+ {
58c5fc13
MT
58929+ .procname = "exec_logging",
58930+ .data = &grsec_enable_execlog,
58931+ .maxlen = sizeof(int),
58932+ .mode = 0600,
58933+ .proc_handler = &proc_dointvec,
58934+ },
58935+#endif
6892158b
MT
58936+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58937+ {
58938+ .procname = "rwxmap_logging",
58939+ .data = &grsec_enable_log_rwxmaps,
58940+ .maxlen = sizeof(int),
58941+ .mode = 0600,
58942+ .proc_handler = &proc_dointvec,
58943+ },
58944+#endif
58c5fc13
MT
58945+#ifdef CONFIG_GRKERNSEC_SIGNAL
58946+ {
58c5fc13
MT
58947+ .procname = "signal_logging",
58948+ .data = &grsec_enable_signal,
58949+ .maxlen = sizeof(int),
58950+ .mode = 0600,
58951+ .proc_handler = &proc_dointvec,
58952+ },
58953+#endif
58954+#ifdef CONFIG_GRKERNSEC_FORKFAIL
58955+ {
58c5fc13
MT
58956+ .procname = "forkfail_logging",
58957+ .data = &grsec_enable_forkfail,
58958+ .maxlen = sizeof(int),
58959+ .mode = 0600,
58960+ .proc_handler = &proc_dointvec,
58961+ },
58962+#endif
58963+#ifdef CONFIG_GRKERNSEC_TIME
58964+ {
58c5fc13
MT
58965+ .procname = "timechange_logging",
58966+ .data = &grsec_enable_time,
58967+ .maxlen = sizeof(int),
58968+ .mode = 0600,
58969+ .proc_handler = &proc_dointvec,
58970+ },
58971+#endif
58972+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
58973+ {
58c5fc13
MT
58974+ .procname = "chroot_deny_shmat",
58975+ .data = &grsec_enable_chroot_shmat,
58976+ .maxlen = sizeof(int),
58977+ .mode = 0600,
58978+ .proc_handler = &proc_dointvec,
58979+ },
58980+#endif
58981+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
58982+ {
58c5fc13
MT
58983+ .procname = "chroot_deny_unix",
58984+ .data = &grsec_enable_chroot_unix,
58985+ .maxlen = sizeof(int),
58986+ .mode = 0600,
58987+ .proc_handler = &proc_dointvec,
58988+ },
58989+#endif
58990+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
58991+ {
58c5fc13
MT
58992+ .procname = "chroot_deny_mount",
58993+ .data = &grsec_enable_chroot_mount,
58994+ .maxlen = sizeof(int),
58995+ .mode = 0600,
58996+ .proc_handler = &proc_dointvec,
58997+ },
58998+#endif
58999+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
59000+ {
58c5fc13
MT
59001+ .procname = "chroot_deny_fchdir",
59002+ .data = &grsec_enable_chroot_fchdir,
59003+ .maxlen = sizeof(int),
59004+ .mode = 0600,
59005+ .proc_handler = &proc_dointvec,
59006+ },
59007+#endif
59008+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
59009+ {
58c5fc13
MT
59010+ .procname = "chroot_deny_chroot",
59011+ .data = &grsec_enable_chroot_double,
59012+ .maxlen = sizeof(int),
59013+ .mode = 0600,
59014+ .proc_handler = &proc_dointvec,
59015+ },
59016+#endif
59017+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
59018+ {
58c5fc13
MT
59019+ .procname = "chroot_deny_pivot",
59020+ .data = &grsec_enable_chroot_pivot,
59021+ .maxlen = sizeof(int),
59022+ .mode = 0600,
59023+ .proc_handler = &proc_dointvec,
59024+ },
59025+#endif
59026+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
59027+ {
58c5fc13
MT
59028+ .procname = "chroot_enforce_chdir",
59029+ .data = &grsec_enable_chroot_chdir,
59030+ .maxlen = sizeof(int),
59031+ .mode = 0600,
59032+ .proc_handler = &proc_dointvec,
59033+ },
59034+#endif
59035+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
59036+ {
58c5fc13
MT
59037+ .procname = "chroot_deny_chmod",
59038+ .data = &grsec_enable_chroot_chmod,
59039+ .maxlen = sizeof(int),
59040+ .mode = 0600,
59041+ .proc_handler = &proc_dointvec,
59042+ },
59043+#endif
59044+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
59045+ {
58c5fc13
MT
59046+ .procname = "chroot_deny_mknod",
59047+ .data = &grsec_enable_chroot_mknod,
59048+ .maxlen = sizeof(int),
59049+ .mode = 0600,
59050+ .proc_handler = &proc_dointvec,
59051+ },
59052+#endif
59053+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
59054+ {
58c5fc13
MT
59055+ .procname = "chroot_restrict_nice",
59056+ .data = &grsec_enable_chroot_nice,
59057+ .maxlen = sizeof(int),
59058+ .mode = 0600,
59059+ .proc_handler = &proc_dointvec,
59060+ },
59061+#endif
59062+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
59063+ {
58c5fc13
MT
59064+ .procname = "chroot_execlog",
59065+ .data = &grsec_enable_chroot_execlog,
59066+ .maxlen = sizeof(int),
59067+ .mode = 0600,
59068+ .proc_handler = &proc_dointvec,
59069+ },
59070+#endif
59071+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
fe2de317
MT
59072+ {
59073+ .procname = "chroot_caps",
59074+ .data = &grsec_enable_chroot_caps,
59075+ .maxlen = sizeof(int),
59076+ .mode = 0600,
59077+ .proc_handler = &proc_dointvec,
59078+ },
59079+#endif
59080+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
59081+ {
59082+ .procname = "chroot_deny_sysctl",
59083+ .data = &grsec_enable_chroot_sysctl,
59084+ .maxlen = sizeof(int),
59085+ .mode = 0600,
59086+ .proc_handler = &proc_dointvec,
59087+ },
59088+#endif
59089+#ifdef CONFIG_GRKERNSEC_TPE
59090+ {
59091+ .procname = "tpe",
59092+ .data = &grsec_enable_tpe,
59093+ .maxlen = sizeof(int),
59094+ .mode = 0600,
59095+ .proc_handler = &proc_dointvec,
59096+ },
59097+ {
59098+ .procname = "tpe_gid",
59099+ .data = &grsec_tpe_gid,
59100+ .maxlen = sizeof(int),
59101+ .mode = 0600,
59102+ .proc_handler = &proc_dointvec,
59103+ },
59104+#endif
59105+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
59106+ {
59107+ .procname = "tpe_invert",
59108+ .data = &grsec_enable_tpe_invert,
59109+ .maxlen = sizeof(int),
59110+ .mode = 0600,
59111+ .proc_handler = &proc_dointvec,
59112+ },
59113+#endif
59114+#ifdef CONFIG_GRKERNSEC_TPE_ALL
59115+ {
59116+ .procname = "tpe_restrict_all",
59117+ .data = &grsec_enable_tpe_all,
59118+ .maxlen = sizeof(int),
59119+ .mode = 0600,
59120+ .proc_handler = &proc_dointvec,
59121+ },
59122+#endif
59123+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
59124+ {
59125+ .procname = "socket_all",
59126+ .data = &grsec_enable_socket_all,
59127+ .maxlen = sizeof(int),
59128+ .mode = 0600,
59129+ .proc_handler = &proc_dointvec,
59130+ },
59131+ {
59132+ .procname = "socket_all_gid",
59133+ .data = &grsec_socket_all_gid,
59134+ .maxlen = sizeof(int),
59135+ .mode = 0600,
59136+ .proc_handler = &proc_dointvec,
59137+ },
59138+#endif
59139+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
59140+ {
59141+ .procname = "socket_client",
59142+ .data = &grsec_enable_socket_client,
59143+ .maxlen = sizeof(int),
59144+ .mode = 0600,
59145+ .proc_handler = &proc_dointvec,
59146+ },
59147+ {
59148+ .procname = "socket_client_gid",
59149+ .data = &grsec_socket_client_gid,
59150+ .maxlen = sizeof(int),
59151+ .mode = 0600,
59152+ .proc_handler = &proc_dointvec,
59153+ },
59154+#endif
59155+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
59156+ {
59157+ .procname = "socket_server",
59158+ .data = &grsec_enable_socket_server,
59159+ .maxlen = sizeof(int),
59160+ .mode = 0600,
59161+ .proc_handler = &proc_dointvec,
59162+ },
59163+ {
59164+ .procname = "socket_server_gid",
59165+ .data = &grsec_socket_server_gid,
59166+ .maxlen = sizeof(int),
59167+ .mode = 0600,
59168+ .proc_handler = &proc_dointvec,
59169+ },
59170+#endif
59171+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
59172+ {
59173+ .procname = "audit_group",
59174+ .data = &grsec_enable_group,
59175+ .maxlen = sizeof(int),
59176+ .mode = 0600,
59177+ .proc_handler = &proc_dointvec,
59178+ },
59179+ {
59180+ .procname = "audit_gid",
59181+ .data = &grsec_audit_gid,
59182+ .maxlen = sizeof(int),
59183+ .mode = 0600,
59184+ .proc_handler = &proc_dointvec,
59185+ },
59186+#endif
59187+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
59188+ {
59189+ .procname = "audit_chdir",
59190+ .data = &grsec_enable_chdir,
59191+ .maxlen = sizeof(int),
59192+ .mode = 0600,
59193+ .proc_handler = &proc_dointvec,
59194+ },
59195+#endif
59196+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
59197+ {
59198+ .procname = "audit_mount",
59199+ .data = &grsec_enable_mount,
59200+ .maxlen = sizeof(int),
59201+ .mode = 0600,
59202+ .proc_handler = &proc_dointvec,
59203+ },
59204+#endif
59205+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
59206+ {
59207+ .procname = "audit_textrel",
59208+ .data = &grsec_enable_audit_textrel,
59209+ .maxlen = sizeof(int),
59210+ .mode = 0600,
59211+ .proc_handler = &proc_dointvec,
59212+ },
59213+#endif
59214+#ifdef CONFIG_GRKERNSEC_DMESG
59215+ {
59216+ .procname = "dmesg",
59217+ .data = &grsec_enable_dmesg,
59218+ .maxlen = sizeof(int),
59219+ .mode = 0600,
59220+ .proc_handler = &proc_dointvec,
59221+ },
59222+#endif
59223+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59224+ {
59225+ .procname = "chroot_findtask",
59226+ .data = &grsec_enable_chroot_findtask,
59227+ .maxlen = sizeof(int),
59228+ .mode = 0600,
59229+ .proc_handler = &proc_dointvec,
59230+ },
59231+#endif
59232+#ifdef CONFIG_GRKERNSEC_RESLOG
59233+ {
59234+ .procname = "resource_logging",
59235+ .data = &grsec_resource_logging,
59236+ .maxlen = sizeof(int),
59237+ .mode = 0600,
59238+ .proc_handler = &proc_dointvec,
59239+ },
59240+#endif
59241+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
59242+ {
59243+ .procname = "audit_ptrace",
59244+ .data = &grsec_enable_audit_ptrace,
59245+ .maxlen = sizeof(int),
59246+ .mode = 0600,
59247+ .proc_handler = &proc_dointvec,
59248+ },
59249+#endif
59250+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
59251+ {
59252+ .procname = "harden_ptrace",
59253+ .data = &grsec_enable_harden_ptrace,
59254+ .maxlen = sizeof(int),
59255+ .mode = 0600,
59256+ .proc_handler = &proc_dointvec,
59257+ },
59258+#endif
59259+ {
59260+ .procname = "grsec_lock",
59261+ .data = &grsec_lock,
59262+ .maxlen = sizeof(int),
59263+ .mode = 0600,
59264+ .proc_handler = &proc_dointvec,
59265+ },
59266+#endif
59267+#ifdef CONFIG_GRKERNSEC_ROFS
59268+ {
59269+ .procname = "romount_protect",
59270+ .data = &grsec_enable_rofs,
59271+ .maxlen = sizeof(int),
59272+ .mode = 0600,
59273+ .proc_handler = &proc_dointvec_minmax,
59274+ .extra1 = &one,
59275+ .extra2 = &one,
59276+ },
59277+#endif
59278+ { }
59279+};
59280+#endif
59281diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
59282new file mode 100644
59283index 0000000..0dc13c3
59284--- /dev/null
59285+++ b/grsecurity/grsec_time.c
59286@@ -0,0 +1,16 @@
59287+#include <linux/kernel.h>
59288+#include <linux/sched.h>
59289+#include <linux/grinternal.h>
59290+#include <linux/module.h>
58c5fc13 59291+
fe2de317
MT
59292+void
59293+gr_log_timechange(void)
59294+{
59295+#ifdef CONFIG_GRKERNSEC_TIME
59296+ if (grsec_enable_time)
59297+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
59298+#endif
59299+ return;
59300+}
58c5fc13 59301+
fe2de317
MT
59302+EXPORT_SYMBOL(gr_log_timechange);
59303diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
59304new file mode 100644
59305index 0000000..4a78774
59306--- /dev/null
59307+++ b/grsecurity/grsec_tpe.c
59308@@ -0,0 +1,39 @@
59309+#include <linux/kernel.h>
59310+#include <linux/sched.h>
59311+#include <linux/file.h>
59312+#include <linux/fs.h>
59313+#include <linux/grinternal.h>
58c5fc13 59314+
fe2de317 59315+extern int gr_acl_tpe_check(void);
58c5fc13 59316+
fe2de317
MT
59317+int
59318+gr_tpe_allow(const struct file *file)
59319+{
59320+#ifdef CONFIG_GRKERNSEC
59321+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
59322+ const struct cred *cred = current_cred();
58c5fc13 59323+
fe2de317
MT
59324+ if (cred->uid && ((grsec_enable_tpe &&
59325+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
59326+ ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
59327+ (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
59328+#else
59329+ in_group_p(grsec_tpe_gid)
59330+#endif
59331+ ) || gr_acl_tpe_check()) &&
59332+ (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
59333+ (inode->i_mode & S_IWOTH))))) {
59334+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
59335+ return 0;
59336+ }
59337+#ifdef CONFIG_GRKERNSEC_TPE_ALL
59338+ if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
59339+ ((inode->i_uid && (inode->i_uid != cred->uid)) ||
59340+ (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
59341+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
59342+ return 0;
59343+ }
59344+#endif
59345+#endif
59346+ return 1;
59347+}
59348diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
59349new file mode 100644
59350index 0000000..9f7b1ac
59351--- /dev/null
59352+++ b/grsecurity/grsum.c
59353@@ -0,0 +1,61 @@
59354+#include <linux/err.h>
59355+#include <linux/kernel.h>
59356+#include <linux/sched.h>
59357+#include <linux/mm.h>
59358+#include <linux/scatterlist.h>
59359+#include <linux/crypto.h>
59360+#include <linux/gracl.h>
58c5fc13 59361+
58c5fc13 59362+
fe2de317
MT
59363+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
59364+#error "crypto and sha256 must be built into the kernel"
59365+#endif
58c5fc13 59366+
fe2de317
MT
59367+int
59368+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
59369+{
59370+ char *p;
59371+ struct crypto_hash *tfm;
59372+ struct hash_desc desc;
59373+ struct scatterlist sg;
59374+ unsigned char temp_sum[GR_SHA_LEN];
59375+ volatile int retval = 0;
59376+ volatile int dummy = 0;
59377+ unsigned int i;
57199397 59378+
fe2de317 59379+ sg_init_table(&sg, 1);
57199397 59380+
fe2de317
MT
59381+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
59382+ if (IS_ERR(tfm)) {
59383+ /* should never happen, since sha256 should be built in */
59384+ return 1;
59385+ }
57199397 59386+
fe2de317
MT
59387+ desc.tfm = tfm;
59388+ desc.flags = 0;
57199397 59389+
fe2de317 59390+ crypto_hash_init(&desc);
57199397 59391+
fe2de317
MT
59392+ p = salt;
59393+ sg_set_buf(&sg, p, GR_SALT_LEN);
59394+ crypto_hash_update(&desc, &sg, sg.length);
57199397 59395+
fe2de317
MT
59396+ p = entry->pw;
59397+ sg_set_buf(&sg, p, strlen(p));
59398+
59399+ crypto_hash_update(&desc, &sg, sg.length);
57199397 59400+
fe2de317 59401+ crypto_hash_final(&desc, temp_sum);
57199397 59402+
fe2de317 59403+ memset(entry->pw, 0, GR_PW_LEN);
57199397 59404+
fe2de317
MT
59405+ for (i = 0; i < GR_SHA_LEN; i++)
59406+ if (sum[i] != temp_sum[i])
59407+ retval = 1;
59408+ else
59409+ dummy = 1; // waste a cycle
15a11c5b 59410+
fe2de317 59411+ crypto_free_hash(tfm);
57199397 59412+
fe2de317
MT
59413+ return retval;
59414+}
59415diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
59416index 6cd5b64..f620d2d 100644
59417--- a/include/acpi/acpi_bus.h
59418+++ b/include/acpi/acpi_bus.h
15a11c5b
MT
59419@@ -107,7 +107,7 @@ struct acpi_device_ops {
59420 acpi_op_bind bind;
59421 acpi_op_unbind unbind;
59422 acpi_op_notify notify;
59423-};
59424+} __no_const;
59425
59426 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
59427
fe2de317
MT
59428diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
59429index b7babf0..71e4e74 100644
59430--- a/include/asm-generic/atomic-long.h
59431+++ b/include/asm-generic/atomic-long.h
ae4e228f
MT
59432@@ -22,6 +22,12 @@
59433
59434 typedef atomic64_t atomic_long_t;
59435
59436+#ifdef CONFIG_PAX_REFCOUNT
59437+typedef atomic64_unchecked_t atomic_long_unchecked_t;
59438+#else
59439+typedef atomic64_t atomic_long_unchecked_t;
59440+#endif
58c5fc13 59441+
ae4e228f 59442 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
58c5fc13 59443
ae4e228f 59444 static inline long atomic_long_read(atomic_long_t *l)
fe2de317 59445@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
ae4e228f
MT
59446 return (long)atomic64_read(v);
59447 }
59448
59449+#ifdef CONFIG_PAX_REFCOUNT
59450+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
59451+{
59452+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58c5fc13 59453+
ae4e228f
MT
59454+ return (long)atomic64_read_unchecked(v);
59455+}
59456+#endif
59457+
59458 static inline void atomic_long_set(atomic_long_t *l, long i)
59459 {
59460 atomic64_t *v = (atomic64_t *)l;
fe2de317 59461@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
ae4e228f
MT
59462 atomic64_set(v, i);
59463 }
58c5fc13 59464
ae4e228f
MT
59465+#ifdef CONFIG_PAX_REFCOUNT
59466+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
59467+{
59468+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59469+
59470+ atomic64_set_unchecked(v, i);
59471+}
59472+#endif
59473+
59474 static inline void atomic_long_inc(atomic_long_t *l)
59475 {
59476 atomic64_t *v = (atomic64_t *)l;
fe2de317 59477@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
ae4e228f 59478 atomic64_inc(v);
58c5fc13
MT
59479 }
59480
ae4e228f
MT
59481+#ifdef CONFIG_PAX_REFCOUNT
59482+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
58c5fc13 59483+{
ae4e228f
MT
59484+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59485+
59486+ atomic64_inc_unchecked(v);
58c5fc13 59487+}
ae4e228f 59488+#endif
58c5fc13 59489+
ae4e228f 59490 static inline void atomic_long_dec(atomic_long_t *l)
58c5fc13 59491 {
ae4e228f 59492 atomic64_t *v = (atomic64_t *)l;
fe2de317 59493@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
df50ba0c
MT
59494 atomic64_dec(v);
59495 }
59496
59497+#ifdef CONFIG_PAX_REFCOUNT
59498+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
59499+{
59500+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59501+
59502+ atomic64_dec_unchecked(v);
59503+}
59504+#endif
59505+
59506 static inline void atomic_long_add(long i, atomic_long_t *l)
59507 {
59508 atomic64_t *v = (atomic64_t *)l;
fe2de317 59509@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
ae4e228f 59510 atomic64_add(i, v);
58c5fc13
MT
59511 }
59512
ae4e228f
MT
59513+#ifdef CONFIG_PAX_REFCOUNT
59514+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
58c5fc13 59515+{
ae4e228f
MT
59516+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59517+
59518+ atomic64_add_unchecked(i, v);
58c5fc13 59519+}
ae4e228f 59520+#endif
58c5fc13 59521+
ae4e228f 59522 static inline void atomic_long_sub(long i, atomic_long_t *l)
58c5fc13 59523 {
ae4e228f 59524 atomic64_t *v = (atomic64_t *)l;
fe2de317 59525@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
6892158b
MT
59526 atomic64_sub(i, v);
59527 }
59528
59529+#ifdef CONFIG_PAX_REFCOUNT
59530+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
59531+{
59532+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59533+
59534+ atomic64_sub_unchecked(i, v);
59535+}
59536+#endif
59537+
59538 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
59539 {
59540 atomic64_t *v = (atomic64_t *)l;
fe2de317 59541@@ -115,6 +175,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
ae4e228f 59542 return (long)atomic64_inc_return(v);
58c5fc13
MT
59543 }
59544
ae4e228f
MT
59545+#ifdef CONFIG_PAX_REFCOUNT
59546+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
58c5fc13 59547+{
ae4e228f
MT
59548+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59549+
59550+ return (long)atomic64_inc_return_unchecked(v);
58c5fc13 59551+}
ae4e228f 59552+#endif
58c5fc13 59553+
ae4e228f
MT
59554 static inline long atomic_long_dec_return(atomic_long_t *l)
59555 {
59556 atomic64_t *v = (atomic64_t *)l;
fe2de317 59557@@ -140,6 +209,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
ae4e228f
MT
59558
59559 typedef atomic_t atomic_long_t;
59560
59561+#ifdef CONFIG_PAX_REFCOUNT
59562+typedef atomic_unchecked_t atomic_long_unchecked_t;
59563+#else
59564+typedef atomic_t atomic_long_unchecked_t;
59565+#endif
59566+
59567 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
59568 static inline long atomic_long_read(atomic_long_t *l)
59569 {
fe2de317 59570@@ -148,6 +223,15 @@ static inline long atomic_long_read(atomic_long_t *l)
ae4e228f
MT
59571 return (long)atomic_read(v);
59572 }
59573
59574+#ifdef CONFIG_PAX_REFCOUNT
59575+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
59576+{
59577+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59578+
59579+ return (long)atomic_read_unchecked(v);
59580+}
59581+#endif
59582+
59583 static inline void atomic_long_set(atomic_long_t *l, long i)
59584 {
59585 atomic_t *v = (atomic_t *)l;
fe2de317 59586@@ -155,6 +239,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
ae4e228f
MT
59587 atomic_set(v, i);
59588 }
59589
59590+#ifdef CONFIG_PAX_REFCOUNT
59591+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
59592+{
59593+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59594+
59595+ atomic_set_unchecked(v, i);
59596+}
59597+#endif
59598+
59599 static inline void atomic_long_inc(atomic_long_t *l)
59600 {
59601 atomic_t *v = (atomic_t *)l;
fe2de317 59602@@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
ae4e228f
MT
59603 atomic_inc(v);
59604 }
59605
59606+#ifdef CONFIG_PAX_REFCOUNT
59607+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
59608+{
59609+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59610+
59611+ atomic_inc_unchecked(v);
59612+}
59613+#endif
59614+
59615 static inline void atomic_long_dec(atomic_long_t *l)
59616 {
59617 atomic_t *v = (atomic_t *)l;
fe2de317 59618@@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
df50ba0c
MT
59619 atomic_dec(v);
59620 }
59621
59622+#ifdef CONFIG_PAX_REFCOUNT
59623+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
59624+{
59625+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59626+
59627+ atomic_dec_unchecked(v);
59628+}
59629+#endif
59630+
59631 static inline void atomic_long_add(long i, atomic_long_t *l)
59632 {
59633 atomic_t *v = (atomic_t *)l;
fe2de317 59634@@ -176,6 +287,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
ae4e228f
MT
59635 atomic_add(i, v);
59636 }
59637
59638+#ifdef CONFIG_PAX_REFCOUNT
59639+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
59640+{
59641+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59642+
59643+ atomic_add_unchecked(i, v);
59644+}
59645+#endif
59646+
59647 static inline void atomic_long_sub(long i, atomic_long_t *l)
58c5fc13 59648 {
ae4e228f 59649 atomic_t *v = (atomic_t *)l;
fe2de317 59650@@ -183,6 +303,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
6892158b
MT
59651 atomic_sub(i, v);
59652 }
59653
59654+#ifdef CONFIG_PAX_REFCOUNT
59655+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
59656+{
59657+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59658+
59659+ atomic_sub_unchecked(i, v);
59660+}
59661+#endif
59662+
59663 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
59664 {
59665 atomic_t *v = (atomic_t *)l;
fe2de317 59666@@ -232,6 +361,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
ae4e228f
MT
59667 return (long)atomic_inc_return(v);
59668 }
59669
59670+#ifdef CONFIG_PAX_REFCOUNT
59671+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
59672+{
59673+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59674+
59675+ return (long)atomic_inc_return_unchecked(v);
59676+}
59677+#endif
59678+
59679 static inline long atomic_long_dec_return(atomic_long_t *l)
59680 {
59681 atomic_t *v = (atomic_t *)l;
fe2de317 59682@@ -255,4 +393,49 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
ae4e228f
MT
59683
59684 #endif /* BITS_PER_LONG == 64 */
59685
59686+#ifdef CONFIG_PAX_REFCOUNT
59687+static inline void pax_refcount_needs_these_functions(void)
59688+{
59689+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
59690+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
59691+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
59692+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
59693+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
15a11c5b 59694+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
57199397 59695+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
6892158b 59696+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
8308f9c9
MT
59697+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
59698+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
15a11c5b 59699+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
ae4e228f
MT
59700+
59701+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
59702+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
59703+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
6892158b 59704+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
ae4e228f
MT
59705+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
59706+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
df50ba0c 59707+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
ae4e228f
MT
59708+}
59709+#else
59710+#define atomic_read_unchecked(v) atomic_read(v)
59711+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
59712+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
59713+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
59714+#define atomic_inc_unchecked(v) atomic_inc(v)
66a7e928 59715+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
57199397 59716+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
6892158b 59717+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
8308f9c9
MT
59718+#define atomic_dec_unchecked(v) atomic_dec(v)
59719+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
59720+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
ae4e228f
MT
59721+
59722+#define atomic_long_read_unchecked(v) atomic_long_read(v)
59723+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
59724+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
6892158b 59725+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
ae4e228f
MT
59726+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
59727+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
df50ba0c 59728+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
ae4e228f
MT
59729+#endif
59730+
59731 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
fe2de317
MT
59732diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
59733index b18ce4f..2ee2843 100644
59734--- a/include/asm-generic/atomic64.h
59735+++ b/include/asm-generic/atomic64.h
59736@@ -16,6 +16,8 @@ typedef struct {
59737 long long counter;
59738 } atomic64_t;
59739
59740+typedef atomic64_t atomic64_unchecked_t;
59741+
59742 #define ATOMIC64_INIT(i) { (i) }
59743
59744 extern long long atomic64_read(const atomic64_t *v);
59745@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
59746 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
59747 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
59748
59749+#define atomic64_read_unchecked(v) atomic64_read(v)
59750+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
59751+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
59752+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
59753+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
59754+#define atomic64_inc_unchecked(v) atomic64_inc(v)
59755+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
59756+#define atomic64_dec_unchecked(v) atomic64_dec(v)
59757+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
59758+
59759 #endif /* _ASM_GENERIC_ATOMIC64_H */
59760diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
59761index 1bfcfe5..e04c5c9 100644
59762--- a/include/asm-generic/cache.h
59763+++ b/include/asm-generic/cache.h
8308f9c9
MT
59764@@ -6,7 +6,7 @@
59765 * cache lines need to provide their own cache.h.
59766 */
59767
59768-#define L1_CACHE_SHIFT 5
59769-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
15a11c5b
MT
59770+#define L1_CACHE_SHIFT 5UL
59771+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
8308f9c9
MT
59772
59773 #endif /* __ASM_GENERIC_CACHE_H */
fe2de317
MT
59774diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h
59775index 1ca3efc..e3dc852 100644
59776--- a/include/asm-generic/int-l64.h
59777+++ b/include/asm-generic/int-l64.h
58c5fc13
MT
59778@@ -46,6 +46,8 @@ typedef unsigned int u32;
59779 typedef signed long s64;
59780 typedef unsigned long u64;
59781
59782+typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
59783+
59784 #define S8_C(x) x
59785 #define U8_C(x) x ## U
59786 #define S16_C(x) x
fe2de317
MT
59787diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
59788index f394147..b6152b9 100644
59789--- a/include/asm-generic/int-ll64.h
59790+++ b/include/asm-generic/int-ll64.h
58c5fc13
MT
59791@@ -51,6 +51,8 @@ typedef unsigned int u32;
59792 typedef signed long long s64;
59793 typedef unsigned long long u64;
59794
59795+typedef unsigned long long intoverflow_t;
59796+
59797 #define S8_C(x) x
59798 #define U8_C(x) x ## U
59799 #define S16_C(x) x
fe2de317
MT
59800diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
59801index 0232ccb..13d9165 100644
59802--- a/include/asm-generic/kmap_types.h
59803+++ b/include/asm-generic/kmap_types.h
57199397 59804@@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
ae4e228f
MT
59805 KMAP_D(17) KM_NMI,
59806 KMAP_D(18) KM_NMI_PTE,
57199397
MT
59807 KMAP_D(19) KM_KDB,
59808+KMAP_D(20) KM_CLEARPAGE,
59809 /*
59810 * Remember to update debug_kmap_atomic() when adding new kmap types!
59811 */
59812-KMAP_D(20) KM_TYPE_NR
59813+KMAP_D(21) KM_TYPE_NR
58c5fc13
MT
59814 };
59815
ae4e228f 59816 #undef KMAP_D
fe2de317
MT
59817diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
59818index 725612b..9cc513a 100644
59819--- a/include/asm-generic/pgtable-nopmd.h
59820+++ b/include/asm-generic/pgtable-nopmd.h
57199397
MT
59821@@ -1,14 +1,19 @@
59822 #ifndef _PGTABLE_NOPMD_H
59823 #define _PGTABLE_NOPMD_H
59824
59825-#ifndef __ASSEMBLY__
59826-
59827 #include <asm-generic/pgtable-nopud.h>
59828
59829-struct mm_struct;
59830-
59831 #define __PAGETABLE_PMD_FOLDED
59832
59833+#define PMD_SHIFT PUD_SHIFT
59834+#define PTRS_PER_PMD 1
59835+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
59836+#define PMD_MASK (~(PMD_SIZE-1))
59837+
59838+#ifndef __ASSEMBLY__
59839+
59840+struct mm_struct;
59841+
59842 /*
59843 * Having the pmd type consist of a pud gets the size right, and allows
59844 * us to conceptually access the pud entry that this pmd is folded into
59845@@ -16,11 +21,6 @@ struct mm_struct;
59846 */
59847 typedef struct { pud_t pud; } pmd_t;
59848
59849-#define PMD_SHIFT PUD_SHIFT
59850-#define PTRS_PER_PMD 1
59851-#define PMD_SIZE (1UL << PMD_SHIFT)
59852-#define PMD_MASK (~(PMD_SIZE-1))
59853-
59854 /*
59855 * The "pud_xxx()" functions here are trivial for a folded two-level
59856 * setup: the pmd is never bad, and a pmd always exists (as it's folded
fe2de317
MT
59857diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
59858index 810431d..ccc3638 100644
59859--- a/include/asm-generic/pgtable-nopud.h
59860+++ b/include/asm-generic/pgtable-nopud.h
57199397
MT
59861@@ -1,10 +1,15 @@
59862 #ifndef _PGTABLE_NOPUD_H
59863 #define _PGTABLE_NOPUD_H
59864
59865-#ifndef __ASSEMBLY__
59866-
59867 #define __PAGETABLE_PUD_FOLDED
59868
59869+#define PUD_SHIFT PGDIR_SHIFT
59870+#define PTRS_PER_PUD 1
59871+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
59872+#define PUD_MASK (~(PUD_SIZE-1))
59873+
59874+#ifndef __ASSEMBLY__
59875+
59876 /*
59877 * Having the pud type consist of a pgd gets the size right, and allows
59878 * us to conceptually access the pgd entry that this pud is folded into
59879@@ -12,11 +17,6 @@
59880 */
59881 typedef struct { pgd_t pgd; } pud_t;
59882
59883-#define PUD_SHIFT PGDIR_SHIFT
59884-#define PTRS_PER_PUD 1
59885-#define PUD_SIZE (1UL << PUD_SHIFT)
59886-#define PUD_MASK (~(PUD_SIZE-1))
59887-
59888 /*
59889 * The "pgd_xxx()" functions here are trivial for a folded two-level
59890 * setup: the pud is never bad, and a pud always exists (as it's folded
fe2de317
MT
59891diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
59892index 76bff2b..c7a14e2 100644
59893--- a/include/asm-generic/pgtable.h
59894+++ b/include/asm-generic/pgtable.h
59895@@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
59896 #endif /* __HAVE_ARCH_PMD_WRITE */
59897 #endif
59898
59899+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
59900+static inline unsigned long pax_open_kernel(void) { return 0; }
59901+#endif
59902+
59903+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
59904+static inline unsigned long pax_close_kernel(void) { return 0; }
59905+#endif
59906+
59907 #endif /* !__ASSEMBLY__ */
59908
59909 #endif /* _ASM_GENERIC_PGTABLE_H */
59910diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
59911index db22d13..1f2e3e1 100644
59912--- a/include/asm-generic/vmlinux.lds.h
59913+++ b/include/asm-generic/vmlinux.lds.h
15a11c5b 59914@@ -217,6 +217,7 @@
58c5fc13
MT
59915 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
59916 VMLINUX_SYMBOL(__start_rodata) = .; \
59917 *(.rodata) *(.rodata.*) \
57199397 59918+ *(.data..read_only) \
58c5fc13 59919 *(__vermagic) /* Kernel version magic */ \
16454cff
MT
59920 . = ALIGN(8); \
59921 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
15a11c5b 59922@@ -723,17 +724,18 @@
58c5fc13
MT
59923 * section in the linker script will go there too. @phdr should have
59924 * a leading colon.
59925 *
59926- * Note that this macros defines __per_cpu_load as an absolute symbol.
59927+ * Note that this macros defines per_cpu_load as an absolute symbol.
59928 * If there is no need to put the percpu section at a predetermined
15a11c5b 59929 * address, use PERCPU_SECTION.
58c5fc13 59930 */
66a7e928 59931 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
58c5fc13 59932- VMLINUX_SYMBOL(__per_cpu_load) = .; \
57199397 59933- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
58c5fc13 59934+ per_cpu_load = .; \
57199397 59935+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
58c5fc13
MT
59936 - LOAD_OFFSET) { \
59937+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
15a11c5b 59938 PERCPU_INPUT(cacheline) \
58c5fc13 59939 } phdr \
57199397
MT
59940- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
59941+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
58c5fc13
MT
59942
59943 /**
15a11c5b 59944 * PERCPU_SECTION - define output section for percpu area, simple version
fe2de317
MT
59945diff --git a/include/drm/drmP.h b/include/drm/drmP.h
59946index 9b7c2bb..76b7d1e 100644
59947--- a/include/drm/drmP.h
59948+++ b/include/drm/drmP.h
c52201e0
MT
59949@@ -73,6 +73,7 @@
59950 #include <linux/workqueue.h>
59951 #include <linux/poll.h>
59952 #include <asm/pgalloc.h>
59953+#include <asm/local.h>
59954 #include "drm.h"
59955
59956 #include <linux/idr.h>
6e9df6a3 59957@@ -1035,7 +1036,7 @@ struct drm_device {
57199397
MT
59958
59959 /** \name Usage Counters */
59960 /*@{ */
59961- int open_count; /**< Outstanding files open */
c52201e0 59962+ local_t open_count; /**< Outstanding files open */
57199397
MT
59963 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
59964 atomic_t vma_count; /**< Outstanding vma areas open */
59965 int buf_use; /**< Buffers in use -- cannot alloc */
6e9df6a3 59966@@ -1046,7 +1047,7 @@ struct drm_device {
57199397
MT
59967 /*@{ */
59968 unsigned long counters;
59969 enum drm_stat_type types[15];
59970- atomic_t counts[15];
59971+ atomic_unchecked_t counts[15];
59972 /*@} */
59973
59974 struct list_head filelist;
fe2de317
MT
59975diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
59976index 73b0712..0b7ef2f 100644
59977--- a/include/drm/drm_crtc_helper.h
59978+++ b/include/drm/drm_crtc_helper.h
59979@@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
59980
59981 /* disable crtc when not in use - more explicit than dpms off */
59982 void (*disable)(struct drm_crtc *crtc);
59983-};
59984+} __no_const;
59985
59986 struct drm_encoder_helper_funcs {
59987 void (*dpms)(struct drm_encoder *encoder, int mode);
59988@@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
59989 struct drm_connector *connector);
59990 /* disable encoder when not in use - more explicit than dpms off */
59991 void (*disable)(struct drm_encoder *encoder);
59992-};
59993+} __no_const;
59994
59995 struct drm_connector_helper_funcs {
59996 int (*get_modes)(struct drm_connector *connector);
59997diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
59998index 26c1f78..6722682 100644
59999--- a/include/drm/ttm/ttm_memory.h
60000+++ b/include/drm/ttm/ttm_memory.h
15a11c5b
MT
60001@@ -47,7 +47,7 @@
60002
60003 struct ttm_mem_shrink {
60004 int (*do_shrink) (struct ttm_mem_shrink *);
60005-};
60006+} __no_const;
60007
60008 /**
60009 * struct ttm_mem_global - Global memory accounting structure.
fe2de317
MT
60010diff --git a/include/linux/a.out.h b/include/linux/a.out.h
60011index e86dfca..40cc55f 100644
60012--- a/include/linux/a.out.h
60013+++ b/include/linux/a.out.h
58c5fc13
MT
60014@@ -39,6 +39,14 @@ enum machine_type {
60015 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
60016 };
60017
60018+/* Constants for the N_FLAGS field */
60019+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
60020+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
60021+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
60022+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
60023+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
60024+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
60025+
60026 #if !defined (N_MAGIC)
60027 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
60028 #endif
fe2de317
MT
60029diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
60030index 49a83ca..df96b54 100644
60031--- a/include/linux/atmdev.h
60032+++ b/include/linux/atmdev.h
58c5fc13
MT
60033@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
60034 #endif
60035
60036 struct k_atm_aal_stats {
60037-#define __HANDLE_ITEM(i) atomic_t i
60038+#define __HANDLE_ITEM(i) atomic_unchecked_t i
60039 __AAL_STAT_ITEMS
60040 #undef __HANDLE_ITEM
60041 };
fe2de317
MT
60042diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
60043index fd88a39..f4d0bad 100644
60044--- a/include/linux/binfmts.h
60045+++ b/include/linux/binfmts.h
15a11c5b 60046@@ -88,6 +88,7 @@ struct linux_binfmt {
58c5fc13
MT
60047 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
60048 int (*load_shlib)(struct file *);
ae4e228f 60049 int (*core_dump)(struct coredump_params *cprm);
58c5fc13
MT
60050+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
60051 unsigned long min_coredump; /* minimal dump size */
58c5fc13 60052 };
16454cff 60053
fe2de317
MT
60054diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
60055index 7fbaa91..5e6a460 100644
60056--- a/include/linux/blkdev.h
60057+++ b/include/linux/blkdev.h
6e9df6a3 60058@@ -1321,7 +1321,7 @@ struct block_device_operations {
57199397 60059 /* this callback is with swap_lock and sometimes page table lock held */
15a11c5b
MT
60060 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
60061 struct module *owner;
60062-};
60063+} __do_const;
ae4e228f
MT
60064
60065 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
15a11c5b 60066 unsigned long);
fe2de317
MT
60067diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
60068index 8e9e4bc..88bd457 100644
60069--- a/include/linux/blktrace_api.h
60070+++ b/include/linux/blktrace_api.h
6e9df6a3 60071@@ -162,7 +162,7 @@ struct blk_trace {
8308f9c9
MT
60072 struct dentry *dir;
60073 struct dentry *dropped_file;
60074 struct dentry *msg_file;
60075- atomic_t dropped;
60076+ atomic_unchecked_t dropped;
60077 };
60078
60079 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
fe2de317
MT
60080diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
60081index 83195fb..0b0f77d 100644
60082--- a/include/linux/byteorder/little_endian.h
60083+++ b/include/linux/byteorder/little_endian.h
bc901d79
MT
60084@@ -42,51 +42,51 @@
60085
60086 static inline __le64 __cpu_to_le64p(const __u64 *p)
60087 {
60088- return (__force __le64)*p;
60089+ return (__force const __le64)*p;
60090 }
60091 static inline __u64 __le64_to_cpup(const __le64 *p)
60092 {
60093- return (__force __u64)*p;
60094+ return (__force const __u64)*p;
60095 }
60096 static inline __le32 __cpu_to_le32p(const __u32 *p)
60097 {
60098- return (__force __le32)*p;
60099+ return (__force const __le32)*p;
60100 }
60101 static inline __u32 __le32_to_cpup(const __le32 *p)
60102 {
60103- return (__force __u32)*p;
60104+ return (__force const __u32)*p;
60105 }
60106 static inline __le16 __cpu_to_le16p(const __u16 *p)
60107 {
60108- return (__force __le16)*p;
60109+ return (__force const __le16)*p;
60110 }
60111 static inline __u16 __le16_to_cpup(const __le16 *p)
60112 {
60113- return (__force __u16)*p;
60114+ return (__force const __u16)*p;
60115 }
60116 static inline __be64 __cpu_to_be64p(const __u64 *p)
60117 {
60118- return (__force __be64)__swab64p(p);
60119+ return (__force const __be64)__swab64p(p);
60120 }
60121 static inline __u64 __be64_to_cpup(const __be64 *p)
60122 {
60123- return __swab64p((__u64 *)p);
60124+ return __swab64p((const __u64 *)p);
60125 }
60126 static inline __be32 __cpu_to_be32p(const __u32 *p)
60127 {
60128- return (__force __be32)__swab32p(p);
60129+ return (__force const __be32)__swab32p(p);
60130 }
60131 static inline __u32 __be32_to_cpup(const __be32 *p)
60132 {
60133- return __swab32p((__u32 *)p);
60134+ return __swab32p((const __u32 *)p);
60135 }
60136 static inline __be16 __cpu_to_be16p(const __u16 *p)
60137 {
60138- return (__force __be16)__swab16p(p);
60139+ return (__force const __be16)__swab16p(p);
60140 }
60141 static inline __u16 __be16_to_cpup(const __be16 *p)
60142 {
60143- return __swab16p((__u16 *)p);
60144+ return __swab16p((const __u16 *)p);
60145 }
60146 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
60147 #define __le64_to_cpus(x) do { (void)(x); } while (0)
fe2de317
MT
60148diff --git a/include/linux/cache.h b/include/linux/cache.h
60149index 4c57065..4307975 100644
60150--- a/include/linux/cache.h
60151+++ b/include/linux/cache.h
58c5fc13
MT
60152@@ -16,6 +16,10 @@
60153 #define __read_mostly
60154 #endif
60155
60156+#ifndef __read_only
60157+#define __read_only __read_mostly
60158+#endif
60159+
60160 #ifndef ____cacheline_aligned
60161 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
60162 #endif
fe2de317
MT
60163diff --git a/include/linux/capability.h b/include/linux/capability.h
60164index c421123..e343179 100644
60165--- a/include/linux/capability.h
60166+++ b/include/linux/capability.h
66a7e928
MT
60167@@ -547,6 +547,9 @@ extern bool capable(int cap);
60168 extern bool ns_capable(struct user_namespace *ns, int cap);
60169 extern bool task_ns_capable(struct task_struct *t, int cap);
60170 extern bool nsown_capable(int cap);
60171+extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
60172+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
60173+extern bool capable_nolog(int cap);
58c5fc13
MT
60174
60175 /* audit system wants to get cap info from files as well */
66a7e928 60176 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
fe2de317
MT
60177diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
60178index 04ffb2e..6799180 100644
60179--- a/include/linux/cleancache.h
60180+++ b/include/linux/cleancache.h
15a11c5b
MT
60181@@ -31,7 +31,7 @@ struct cleancache_ops {
60182 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
60183 void (*flush_inode)(int, struct cleancache_filekey);
60184 void (*flush_fs)(int);
60185-};
60186+} __no_const;
60187
60188 extern struct cleancache_ops
60189 cleancache_register_ops(struct cleancache_ops *ops);
fe2de317
MT
60190diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
60191index dfadc96..c0e70c1 100644
60192--- a/include/linux/compiler-gcc4.h
60193+++ b/include/linux/compiler-gcc4.h
15a11c5b
MT
60194@@ -31,6 +31,12 @@
60195
60196
60197 #if __GNUC_MINOR__ >= 5
60198+
60199+#ifdef CONSTIFY_PLUGIN
60200+#define __no_const __attribute__((no_const))
60201+#define __do_const __attribute__((do_const))
60202+#endif
60203+
60204 /*
60205 * Mark a position in code as unreachable. This can be used to
60206 * suppress control flow warnings after asm blocks that transfer
60207@@ -46,6 +52,11 @@
66a7e928 60208 #define __noclone __attribute__((__noclone__))
57199397 60209
ae4e228f 60210 #endif
66a7e928 60211+
58c5fc13
MT
60212+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
60213+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
60214+#define __bos0(ptr) __bos((ptr), 0)
60215+#define __bos1(ptr) __bos((ptr), 1)
60216 #endif
ae4e228f
MT
60217
60218 #if __GNUC_MINOR__ > 0
fe2de317
MT
60219diff --git a/include/linux/compiler.h b/include/linux/compiler.h
60220index 320d6c9..8573a1c 100644
60221--- a/include/linux/compiler.h
60222+++ b/include/linux/compiler.h
6e9df6a3
MT
60223@@ -5,31 +5,62 @@
60224
60225 #ifdef __CHECKER__
60226 # define __user __attribute__((noderef, address_space(1)))
60227+# define __force_user __force __user
60228 # define __kernel __attribute__((address_space(0)))
60229+# define __force_kernel __force __kernel
60230 # define __safe __attribute__((safe))
60231 # define __force __attribute__((force))
60232 # define __nocast __attribute__((nocast))
60233 # define __iomem __attribute__((noderef, address_space(2)))
60234+# define __force_iomem __force __iomem
60235 # define __acquires(x) __attribute__((context(x,0,1)))
60236 # define __releases(x) __attribute__((context(x,1,0)))
60237 # define __acquire(x) __context__(x,1)
60238 # define __release(x) __context__(x,-1)
60239 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
60240 # define __percpu __attribute__((noderef, address_space(3)))
60241+# define __force_percpu __force __percpu
60242 #ifdef CONFIG_SPARSE_RCU_POINTER
60243 # define __rcu __attribute__((noderef, address_space(4)))
60244+# define __force_rcu __force __rcu
60245 #else
60246 # define __rcu
60247+# define __force_rcu
60248 #endif
60249 extern void __chk_user_ptr(const volatile void __user *);
60250 extern void __chk_io_ptr(const volatile void __iomem *);
60251+#elif defined(CHECKER_PLUGIN)
60252+//# define __user
60253+//# define __force_user
60254+//# define __kernel
60255+//# define __force_kernel
60256+# define __safe
60257+# define __force
60258+# define __nocast
60259+# define __iomem
60260+# define __force_iomem
60261+# define __chk_user_ptr(x) (void)0
60262+# define __chk_io_ptr(x) (void)0
60263+# define __builtin_warning(x, y...) (1)
60264+# define __acquires(x)
60265+# define __releases(x)
60266+# define __acquire(x) (void)0
60267+# define __release(x) (void)0
60268+# define __cond_lock(x,c) (c)
60269+# define __percpu
60270+# define __force_percpu
60271+# define __rcu
60272+# define __force_rcu
60273 #else
60274 # define __user
60275+# define __force_user
60276 # define __kernel
60277+# define __force_kernel
60278 # define __safe
60279 # define __force
60280 # define __nocast
60281 # define __iomem
60282+# define __force_iomem
60283 # define __chk_user_ptr(x) (void)0
60284 # define __chk_io_ptr(x) (void)0
60285 # define __builtin_warning(x, y...) (1)
fe2de317 60286@@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
6e9df6a3
MT
60287 # define __release(x) (void)0
60288 # define __cond_lock(x,c) (c)
60289 # define __percpu
60290+# define __force_percpu
60291 # define __rcu
60292+# define __force_rcu
60293 #endif
60294
60295 #ifdef __KERNEL__
fe2de317 60296@@ -264,6 +297,14 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
15a11c5b
MT
60297 # define __attribute_const__ /* unimplemented */
60298 #endif
60299
60300+#ifndef __no_const
60301+# define __no_const
60302+#endif
60303+
60304+#ifndef __do_const
60305+# define __do_const
60306+#endif
60307+
60308 /*
60309 * Tell gcc if a function is cold. The compiler will assume any path
60310 * directly leading to the call is unlikely.
fe2de317 60311@@ -273,6 +314,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
58c5fc13
MT
60312 #define __cold
60313 #endif
60314
60315+#ifndef __alloc_size
15a11c5b 60316+#define __alloc_size(...)
58c5fc13
MT
60317+#endif
60318+
60319+#ifndef __bos
15a11c5b 60320+#define __bos(ptr, arg)
58c5fc13
MT
60321+#endif
60322+
60323+#ifndef __bos0
15a11c5b 60324+#define __bos0(ptr)
58c5fc13
MT
60325+#endif
60326+
60327+#ifndef __bos1
15a11c5b 60328+#define __bos1(ptr)
58c5fc13
MT
60329+#endif
60330+
60331 /* Simple shorthand for a section definition */
60332 #ifndef __section
60333 # define __section(S) __attribute__ ((__section__(#S)))
fe2de317 60334@@ -306,6 +363,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
bc901d79
MT
60335 * use is to mediate communication between process-level code and irq/NMI
60336 * handlers, all running on the same CPU.
60337 */
60338-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
60339+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
60340+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
60341
60342 #endif /* __LINUX_COMPILER_H */
fe2de317
MT
60343diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
60344index e9eaec5..bfeb9bb 100644
60345--- a/include/linux/cpuset.h
60346+++ b/include/linux/cpuset.h
60347@@ -118,7 +118,7 @@ static inline void put_mems_allowed(void)
bc901d79
MT
60348 * nodemask.
60349 */
60350 smp_mb();
60351- --ACCESS_ONCE(current->mems_allowed_change_disable);
60352+ --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
60353 }
60354
60355 static inline void set_mems_allowed(nodemask_t nodemask)
fe2de317
MT
60356diff --git a/include/linux/crypto.h b/include/linux/crypto.h
60357index e5e468e..f079672 100644
60358--- a/include/linux/crypto.h
60359+++ b/include/linux/crypto.h
15a11c5b
MT
60360@@ -361,7 +361,7 @@ struct cipher_tfm {
60361 const u8 *key, unsigned int keylen);
60362 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
60363 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
60364-};
60365+} __no_const;
60366
60367 struct hash_tfm {
60368 int (*init)(struct hash_desc *desc);
60369@@ -382,13 +382,13 @@ struct compress_tfm {
60370 int (*cot_decompress)(struct crypto_tfm *tfm,
60371 const u8 *src, unsigned int slen,
60372 u8 *dst, unsigned int *dlen);
60373-};
60374+} __no_const;
60375
60376 struct rng_tfm {
60377 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
60378 unsigned int dlen);
60379 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
60380-};
60381+} __no_const;
60382
60383 #define crt_ablkcipher crt_u.ablkcipher
60384 #define crt_aead crt_u.aead
fe2de317
MT
60385diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
60386index 7925bf0..d5143d2 100644
60387--- a/include/linux/decompress/mm.h
60388+++ b/include/linux/decompress/mm.h
16454cff 60389@@ -77,7 +77,7 @@ static void free(void *where)
58c5fc13
MT
60390 * warnings when not needed (indeed large_malloc / large_free are not
60391 * needed by inflate */
60392
60393-#define malloc(a) kmalloc(a, GFP_KERNEL)
60394+#define malloc(a) kmalloc((a), GFP_KERNEL)
60395 #define free(a) kfree(a)
60396
60397 #define large_malloc(a) vmalloc(a)
fe2de317
MT
60398diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
60399index 347fdc3..cd01657 100644
60400--- a/include/linux/dma-mapping.h
60401+++ b/include/linux/dma-mapping.h
6e9df6a3 60402@@ -42,7 +42,7 @@ struct dma_map_ops {
15a11c5b
MT
60403 int (*dma_supported)(struct device *dev, u64 mask);
60404 int (*set_dma_mask)(struct device *dev, u64 mask);
60405 int is_phys;
60406-};
60407+} __do_const;
ae4e228f
MT
60408
60409 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
15a11c5b 60410
fe2de317
MT
60411diff --git a/include/linux/efi.h b/include/linux/efi.h
60412index 2362a0b..cfaf8fcc 100644
60413--- a/include/linux/efi.h
60414+++ b/include/linux/efi.h
6e9df6a3 60415@@ -446,7 +446,7 @@ struct efivar_operations {
15a11c5b
MT
60416 efi_get_variable_t *get_variable;
60417 efi_get_next_variable_t *get_next_variable;
60418 efi_set_variable_t *set_variable;
60419-};
60420+} __no_const;
60421
60422 struct efivars {
60423 /*
fe2de317
MT
60424diff --git a/include/linux/elf.h b/include/linux/elf.h
60425index 110821c..cb14c08 100644
60426--- a/include/linux/elf.h
60427+++ b/include/linux/elf.h
58c5fc13
MT
60428@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
60429 #define PT_GNU_EH_FRAME 0x6474e550
60430
60431 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
60432+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
60433+
60434+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
60435+
60436+/* Constants for the e_flags field */
60437+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
60438+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
60439+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
60440+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
60441+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
60442+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
60443
df50ba0c
MT
60444 /*
60445 * Extended Numbering
60446@@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
58c5fc13
MT
60447 #define DT_DEBUG 21
60448 #define DT_TEXTREL 22
60449 #define DT_JMPREL 23
60450+#define DT_FLAGS 30
60451+ #define DF_TEXTREL 0x00000004
60452 #define DT_ENCODING 32
60453 #define OLD_DT_LOOS 0x60000000
60454 #define DT_LOOS 0x6000000d
df50ba0c 60455@@ -252,6 +265,19 @@ typedef struct elf64_hdr {
58c5fc13
MT
60456 #define PF_W 0x2
60457 #define PF_X 0x1
60458
60459+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
60460+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
60461+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
60462+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
60463+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
60464+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
60465+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
60466+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
60467+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
60468+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
60469+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
60470+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
60471+
60472 typedef struct elf32_phdr{
60473 Elf32_Word p_type;
60474 Elf32_Off p_offset;
df50ba0c 60475@@ -344,6 +370,8 @@ typedef struct elf64_shdr {
58c5fc13
MT
60476 #define EI_OSABI 7
60477 #define EI_PAD 8
60478
60479+#define EI_PAX 14
60480+
60481 #define ELFMAG0 0x7f /* EI_MAG */
60482 #define ELFMAG1 'E'
60483 #define ELFMAG2 'L'
15a11c5b 60484@@ -422,6 +450,7 @@ extern Elf32_Dyn _DYNAMIC [];
58c5fc13
MT
60485 #define elf_note elf32_note
60486 #define elf_addr_t Elf32_Off
df50ba0c 60487 #define Elf_Half Elf32_Half
58c5fc13
MT
60488+#define elf_dyn Elf32_Dyn
60489
60490 #else
60491
15a11c5b 60492@@ -432,6 +461,7 @@ extern Elf64_Dyn _DYNAMIC [];
58c5fc13
MT
60493 #define elf_note elf64_note
60494 #define elf_addr_t Elf64_Off
df50ba0c 60495 #define Elf_Half Elf64_Half
58c5fc13
MT
60496+#define elf_dyn Elf64_Dyn
60497
60498 #endif
60499
fe2de317
MT
60500diff --git a/include/linux/filter.h b/include/linux/filter.h
60501index 741956f..f02f482 100644
60502--- a/include/linux/filter.h
60503+++ b/include/linux/filter.h
60504@@ -134,6 +134,7 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
883a9837
MT
60505
60506 struct sk_buff;
60507 struct sock;
60508+struct bpf_jit_work;
60509
60510 struct sk_filter
60511 {
60512@@ -141,6 +142,9 @@ struct sk_filter
60513 unsigned int len; /* Number of filter blocks */
60514 unsigned int (*bpf_func)(const struct sk_buff *skb,
60515 const struct sock_filter *filter);
60516+#ifdef CONFIG_BPF_JIT
60517+ struct bpf_jit_work *work;
60518+#endif
60519 struct rcu_head rcu;
60520 struct sock_filter insns[0];
60521 };
fe2de317
MT
60522diff --git a/include/linux/firewire.h b/include/linux/firewire.h
60523index 84ccf8e..2e9b14c 100644
60524--- a/include/linux/firewire.h
60525+++ b/include/linux/firewire.h
15a11c5b
MT
60526@@ -428,7 +428,7 @@ struct fw_iso_context {
60527 union {
60528 fw_iso_callback_t sc;
60529 fw_iso_mc_callback_t mc;
60530- } callback;
60531+ } __no_const callback;
60532 void *callback_data;
66a7e928 60533 };
15a11c5b 60534
fe2de317
MT
60535diff --git a/include/linux/fs.h b/include/linux/fs.h
60536index 277f497..9be66a4 100644
60537--- a/include/linux/fs.h
60538+++ b/include/linux/fs.h
60539@@ -1588,7 +1588,8 @@ struct file_operations {
60540 int (*setlease)(struct file *, long, struct file_lock **);
60541 long (*fallocate)(struct file *file, int mode, loff_t offset,
60542 loff_t len);
60543-};
60544+} __do_const;
60545+typedef struct file_operations __no_const file_operations_no_const;
60546
60547 struct inode_operations {
60548 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
60549diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
60550index 003dc0f..3c4ea97 100644
60551--- a/include/linux/fs_struct.h
60552+++ b/include/linux/fs_struct.h
60553@@ -6,7 +6,7 @@
60554 #include <linux/seqlock.h>
60555
60556 struct fs_struct {
60557- int users;
60558+ atomic_t users;
60559 spinlock_t lock;
60560 seqcount_t seq;
60561 int umask;
60562diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
60563index af095b5..cf1220c 100644
60564--- a/include/linux/fscache-cache.h
60565+++ b/include/linux/fscache-cache.h
15a11c5b
MT
60566@@ -102,7 +102,7 @@ struct fscache_operation {
60567 fscache_operation_release_t release;
8308f9c9
MT
60568 };
60569
60570-extern atomic_t fscache_op_debug_id;
60571+extern atomic_unchecked_t fscache_op_debug_id;
60572 extern void fscache_op_work_func(struct work_struct *work);
60573
60574 extern void fscache_enqueue_operation(struct fscache_operation *);
fe2de317 60575@@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
8308f9c9
MT
60576 {
60577 INIT_WORK(&op->work, fscache_op_work_func);
60578 atomic_set(&op->usage, 1);
60579- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
60580+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
60581 op->processor = processor;
60582 op->release = release;
60583 INIT_LIST_HEAD(&op->pend_link);
fe2de317
MT
60584diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
60585index 2a53f10..0187fdf 100644
60586--- a/include/linux/fsnotify.h
60587+++ b/include/linux/fsnotify.h
60588@@ -314,7 +314,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
15a11c5b
MT
60589 */
60590 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
60591 {
60592- return kstrdup(name, GFP_KERNEL);
60593+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
60594 }
ae4e228f
MT
60595
60596 /*
fe2de317
MT
60597diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
60598index 96efa67..1261547 100644
60599--- a/include/linux/ftrace_event.h
60600+++ b/include/linux/ftrace_event.h
6e9df6a3 60601@@ -97,7 +97,7 @@ struct trace_event_functions {
15a11c5b
MT
60602 trace_print_func raw;
60603 trace_print_func hex;
60604 trace_print_func binary;
60605-};
60606+} __no_const;
60607
60608 struct trace_event {
60609 struct hlist_node node;
fe2de317 60610@@ -252,7 +252,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
66a7e928
MT
60611 extern int trace_add_event_call(struct ftrace_event_call *call);
60612 extern void trace_remove_event_call(struct ftrace_event_call *call);
60613
60614-#define is_signed_type(type) (((type)(-1)) < 0)
60615+#define is_signed_type(type) (((type)(-1)) < (type)1)
60616
60617 int trace_set_clr_event(const char *system, const char *event, int set);
60618
fe2de317
MT
60619diff --git a/include/linux/genhd.h b/include/linux/genhd.h
60620index 02fa469..a15f279 100644
60621--- a/include/linux/genhd.h
60622+++ b/include/linux/genhd.h
15a11c5b 60623@@ -184,7 +184,7 @@ struct gendisk {
16454cff 60624 struct kobject *slave_dir;
58c5fc13
MT
60625
60626 struct timer_rand_state *random;
58c5fc13
MT
60627- atomic_t sync_io; /* RAID */
60628+ atomic_unchecked_t sync_io; /* RAID */
16454cff 60629 struct disk_events *ev;
58c5fc13
MT
60630 #ifdef CONFIG_BLK_DEV_INTEGRITY
60631 struct blk_integrity *integrity;
fe2de317
MT
60632diff --git a/include/linux/gracl.h b/include/linux/gracl.h
60633new file mode 100644
60634index 0000000..0dc3943
60635--- /dev/null
60636+++ b/include/linux/gracl.h
bc901d79 60637@@ -0,0 +1,317 @@
58c5fc13
MT
60638+#ifndef GR_ACL_H
60639+#define GR_ACL_H
60640+
60641+#include <linux/grdefs.h>
60642+#include <linux/resource.h>
60643+#include <linux/capability.h>
60644+#include <linux/dcache.h>
60645+#include <asm/resource.h>
60646+
60647+/* Major status information */
60648+
16454cff
MT
60649+#define GR_VERSION "grsecurity 2.2.2"
60650+#define GRSECURITY_VERSION 0x2202
58c5fc13
MT
60651+
60652+enum {
60653+ GR_SHUTDOWN = 0,
60654+ GR_ENABLE = 1,
60655+ GR_SPROLE = 2,
60656+ GR_RELOAD = 3,
60657+ GR_SEGVMOD = 4,
60658+ GR_STATUS = 5,
60659+ GR_UNSPROLE = 6,
60660+ GR_PASSSET = 7,
60661+ GR_SPROLEPAM = 8,
60662+};
60663+
60664+/* Password setup definitions
60665+ * kernel/grhash.c */
60666+enum {
60667+ GR_PW_LEN = 128,
60668+ GR_SALT_LEN = 16,
60669+ GR_SHA_LEN = 32,
60670+};
60671+
60672+enum {
60673+ GR_SPROLE_LEN = 64,
60674+};
60675+
bc901d79
MT
60676+enum {
60677+ GR_NO_GLOB = 0,
60678+ GR_REG_GLOB,
60679+ GR_CREATE_GLOB
60680+};
60681+
58c5fc13
MT
60682+#define GR_NLIMITS 32
60683+
60684+/* Begin Data Structures */
60685+
60686+struct sprole_pw {
60687+ unsigned char *rolename;
60688+ unsigned char salt[GR_SALT_LEN];
60689+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
60690+};
60691+
60692+struct name_entry {
60693+ __u32 key;
60694+ ino_t inode;
60695+ dev_t device;
60696+ char *name;
60697+ __u16 len;
60698+ __u8 deleted;
60699+ struct name_entry *prev;
60700+ struct name_entry *next;
60701+};
60702+
60703+struct inodev_entry {
60704+ struct name_entry *nentry;
60705+ struct inodev_entry *prev;
60706+ struct inodev_entry *next;
60707+};
60708+
60709+struct acl_role_db {
60710+ struct acl_role_label **r_hash;
60711+ __u32 r_size;
60712+};
60713+
60714+struct inodev_db {
60715+ struct inodev_entry **i_hash;
60716+ __u32 i_size;
60717+};
60718+
60719+struct name_db {
60720+ struct name_entry **n_hash;
60721+ __u32 n_size;
60722+};
60723+
60724+struct crash_uid {
60725+ uid_t uid;
60726+ unsigned long expires;
60727+};
60728+
60729+struct gr_hash_struct {
60730+ void **table;
60731+ void **nametable;
60732+ void *first;
60733+ __u32 table_size;
60734+ __u32 used_size;
60735+ int type;
60736+};
60737+
60738+/* Userspace Grsecurity ACL data structures */
60739+
60740+struct acl_subject_label {
60741+ char *filename;
60742+ ino_t inode;
60743+ dev_t device;
60744+ __u32 mode;
60745+ kernel_cap_t cap_mask;
60746+ kernel_cap_t cap_lower;
df50ba0c 60747+ kernel_cap_t cap_invert_audit;
58c5fc13
MT
60748+
60749+ struct rlimit res[GR_NLIMITS];
60750+ __u32 resmask;
60751+
60752+ __u8 user_trans_type;
60753+ __u8 group_trans_type;
60754+ uid_t *user_transitions;
60755+ gid_t *group_transitions;
60756+ __u16 user_trans_num;
60757+ __u16 group_trans_num;
60758+
bc901d79 60759+ __u32 sock_families[2];
58c5fc13
MT
60760+ __u32 ip_proto[8];
60761+ __u32 ip_type;
60762+ struct acl_ip_label **ips;
60763+ __u32 ip_num;
60764+ __u32 inaddr_any_override;
60765+
60766+ __u32 crashes;
60767+ unsigned long expires;
60768+
60769+ struct acl_subject_label *parent_subject;
60770+ struct gr_hash_struct *hash;
60771+ struct acl_subject_label *prev;
60772+ struct acl_subject_label *next;
60773+
60774+ struct acl_object_label **obj_hash;
60775+ __u32 obj_hash_size;
60776+ __u16 pax_flags;
60777+};
60778+
60779+struct role_allowed_ip {
60780+ __u32 addr;
60781+ __u32 netmask;
60782+
60783+ struct role_allowed_ip *prev;
60784+ struct role_allowed_ip *next;
60785+};
60786+
60787+struct role_transition {
60788+ char *rolename;
60789+
60790+ struct role_transition *prev;
60791+ struct role_transition *next;
60792+};
60793+
60794+struct acl_role_label {
60795+ char *rolename;
60796+ uid_t uidgid;
60797+ __u16 roletype;
60798+
60799+ __u16 auth_attempts;
60800+ unsigned long expires;
60801+
60802+ struct acl_subject_label *root_label;
60803+ struct gr_hash_struct *hash;
60804+
60805+ struct acl_role_label *prev;
60806+ struct acl_role_label *next;
60807+
60808+ struct role_transition *transitions;
60809+ struct role_allowed_ip *allowed_ips;
60810+ uid_t *domain_children;
60811+ __u16 domain_child_num;
60812+
60813+ struct acl_subject_label **subj_hash;
60814+ __u32 subj_hash_size;
60815+};
60816+
60817+struct user_acl_role_db {
60818+ struct acl_role_label **r_table;
60819+ __u32 num_pointers; /* Number of allocations to track */
60820+ __u32 num_roles; /* Number of roles */
60821+ __u32 num_domain_children; /* Number of domain children */
60822+ __u32 num_subjects; /* Number of subjects */
60823+ __u32 num_objects; /* Number of objects */
60824+};
60825+
60826+struct acl_object_label {
60827+ char *filename;
60828+ ino_t inode;
60829+ dev_t device;
60830+ __u32 mode;
60831+
60832+ struct acl_subject_label *nested;
60833+ struct acl_object_label *globbed;
60834+
60835+ /* next two structures not used */
60836+
60837+ struct acl_object_label *prev;
60838+ struct acl_object_label *next;
60839+};
60840+
60841+struct acl_ip_label {
60842+ char *iface;
60843+ __u32 addr;
60844+ __u32 netmask;
60845+ __u16 low, high;
60846+ __u8 mode;
60847+ __u32 type;
60848+ __u32 proto[8];
60849+
60850+ /* next two structures not used */
60851+
60852+ struct acl_ip_label *prev;
60853+ struct acl_ip_label *next;
60854+};
60855+
60856+struct gr_arg {
60857+ struct user_acl_role_db role_db;
60858+ unsigned char pw[GR_PW_LEN];
60859+ unsigned char salt[GR_SALT_LEN];
60860+ unsigned char sum[GR_SHA_LEN];
60861+ unsigned char sp_role[GR_SPROLE_LEN];
60862+ struct sprole_pw *sprole_pws;
60863+ dev_t segv_device;
60864+ ino_t segv_inode;
60865+ uid_t segv_uid;
60866+ __u16 num_sprole_pws;
60867+ __u16 mode;
60868+};
60869+
60870+struct gr_arg_wrapper {
60871+ struct gr_arg *arg;
60872+ __u32 version;
60873+ __u32 size;
60874+};
60875+
60876+struct subject_map {
60877+ struct acl_subject_label *user;
60878+ struct acl_subject_label *kernel;
60879+ struct subject_map *prev;
60880+ struct subject_map *next;
60881+};
60882+
60883+struct acl_subj_map_db {
60884+ struct subject_map **s_hash;
60885+ __u32 s_size;
60886+};
60887+
60888+/* End Data Structures Section */
60889+
60890+/* Hash functions generated by empirical testing by Brad Spengler
60891+ Makes good use of the low bits of the inode. Generally 0-1 times
60892+ in loop for successful match. 0-3 for unsuccessful match.
60893+ Shift/add algorithm with modulus of table size and an XOR*/
60894+
60895+static __inline__ unsigned int
60896+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
60897+{
ae4e228f 60898+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
58c5fc13
MT
60899+}
60900+
60901+ static __inline__ unsigned int
60902+shash(const struct acl_subject_label *userp, const unsigned int sz)
60903+{
60904+ return ((const unsigned long)userp % sz);
60905+}
60906+
60907+static __inline__ unsigned int
60908+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
60909+{
60910+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
60911+}
60912+
60913+static __inline__ unsigned int
60914+nhash(const char *name, const __u16 len, const unsigned int sz)
60915+{
60916+ return full_name_hash((const unsigned char *)name, len) % sz;
60917+}
60918+
ae4e228f
MT
60919+#define FOR_EACH_ROLE_START(role) \
60920+ role = role_list; \
60921+ while (role) {
58c5fc13 60922+
ae4e228f
MT
60923+#define FOR_EACH_ROLE_END(role) \
60924+ role = role->prev; \
58c5fc13
MT
60925+ }
60926+
60927+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
60928+ subj = NULL; \
60929+ iter = 0; \
60930+ while (iter < role->subj_hash_size) { \
60931+ if (subj == NULL) \
60932+ subj = role->subj_hash[iter]; \
60933+ if (subj == NULL) { \
60934+ iter++; \
60935+ continue; \
60936+ }
60937+
60938+#define FOR_EACH_SUBJECT_END(subj,iter) \
60939+ subj = subj->next; \
60940+ if (subj == NULL) \
60941+ iter++; \
60942+ }
60943+
60944+
60945+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
60946+ subj = role->hash->first; \
60947+ while (subj != NULL) {
60948+
60949+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
60950+ subj = subj->next; \
60951+ }
60952+
60953+#endif
60954+
fe2de317
MT
60955diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
60956new file mode 100644
60957index 0000000..323ecf2
60958--- /dev/null
60959+++ b/include/linux/gralloc.h
58c5fc13
MT
60960@@ -0,0 +1,9 @@
60961+#ifndef __GRALLOC_H
60962+#define __GRALLOC_H
60963+
60964+void acl_free_all(void);
60965+int acl_alloc_stack_init(unsigned long size);
60966+void *acl_alloc(unsigned long len);
60967+void *acl_alloc_num(unsigned long num, unsigned long len);
60968+
60969+#endif
fe2de317
MT
60970diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
60971new file mode 100644
60972index 0000000..b30e9bc
60973--- /dev/null
60974+++ b/include/linux/grdefs.h
15a11c5b 60975@@ -0,0 +1,140 @@
58c5fc13
MT
60976+#ifndef GRDEFS_H
60977+#define GRDEFS_H
60978+
60979+/* Begin grsecurity status declarations */
60980+
60981+enum {
60982+ GR_READY = 0x01,
60983+ GR_STATUS_INIT = 0x00 // disabled state
60984+};
60985+
60986+/* Begin ACL declarations */
60987+
60988+/* Role flags */
60989+
60990+enum {
60991+ GR_ROLE_USER = 0x0001,
60992+ GR_ROLE_GROUP = 0x0002,
60993+ GR_ROLE_DEFAULT = 0x0004,
60994+ GR_ROLE_SPECIAL = 0x0008,
60995+ GR_ROLE_AUTH = 0x0010,
60996+ GR_ROLE_NOPW = 0x0020,
60997+ GR_ROLE_GOD = 0x0040,
60998+ GR_ROLE_LEARN = 0x0080,
60999+ GR_ROLE_TPE = 0x0100,
61000+ GR_ROLE_DOMAIN = 0x0200,
16454cff
MT
61001+ GR_ROLE_PAM = 0x0400,
61002+ GR_ROLE_PERSIST = 0x0800
58c5fc13
MT
61003+};
61004+
61005+/* ACL Subject and Object mode flags */
61006+enum {
61007+ GR_DELETED = 0x80000000
61008+};
61009+
61010+/* ACL Object-only mode flags */
61011+enum {
61012+ GR_READ = 0x00000001,
61013+ GR_APPEND = 0x00000002,
61014+ GR_WRITE = 0x00000004,
61015+ GR_EXEC = 0x00000008,
61016+ GR_FIND = 0x00000010,
61017+ GR_INHERIT = 0x00000020,
61018+ GR_SETID = 0x00000040,
61019+ GR_CREATE = 0x00000080,
61020+ GR_DELETE = 0x00000100,
61021+ GR_LINK = 0x00000200,
61022+ GR_AUDIT_READ = 0x00000400,
61023+ GR_AUDIT_APPEND = 0x00000800,
61024+ GR_AUDIT_WRITE = 0x00001000,
61025+ GR_AUDIT_EXEC = 0x00002000,
61026+ GR_AUDIT_FIND = 0x00004000,
61027+ GR_AUDIT_INHERIT= 0x00008000,
61028+ GR_AUDIT_SETID = 0x00010000,
61029+ GR_AUDIT_CREATE = 0x00020000,
61030+ GR_AUDIT_DELETE = 0x00040000,
61031+ GR_AUDIT_LINK = 0x00080000,
61032+ GR_PTRACERD = 0x00100000,
61033+ GR_NOPTRACE = 0x00200000,
61034+ GR_SUPPRESS = 0x00400000,
16454cff
MT
61035+ GR_NOLEARN = 0x00800000,
61036+ GR_INIT_TRANSFER= 0x01000000
58c5fc13
MT
61037+};
61038+
61039+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
61040+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
61041+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
61042+
61043+/* ACL subject-only mode flags */
61044+enum {
61045+ GR_KILL = 0x00000001,
61046+ GR_VIEW = 0x00000002,
61047+ GR_PROTECTED = 0x00000004,
61048+ GR_LEARN = 0x00000008,
61049+ GR_OVERRIDE = 0x00000010,
61050+ /* just a placeholder, this mode is only used in userspace */
61051+ GR_DUMMY = 0x00000020,
61052+ GR_PROTSHM = 0x00000040,
61053+ GR_KILLPROC = 0x00000080,
61054+ GR_KILLIPPROC = 0x00000100,
61055+ /* just a placeholder, this mode is only used in userspace */
61056+ GR_NOTROJAN = 0x00000200,
61057+ GR_PROTPROCFD = 0x00000400,
61058+ GR_PROCACCT = 0x00000800,
61059+ GR_RELAXPTRACE = 0x00001000,
61060+ GR_NESTED = 0x00002000,
61061+ GR_INHERITLEARN = 0x00004000,
61062+ GR_PROCFIND = 0x00008000,
61063+ GR_POVERRIDE = 0x00010000,
61064+ GR_KERNELAUTH = 0x00020000,
15a11c5b
MT
61065+ GR_ATSECURE = 0x00040000,
61066+ GR_SHMEXEC = 0x00080000
58c5fc13
MT
61067+};
61068+
61069+enum {
61070+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
61071+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
61072+ GR_PAX_ENABLE_MPROTECT = 0x0004,
61073+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
61074+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
61075+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
61076+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
61077+ GR_PAX_DISABLE_MPROTECT = 0x0400,
61078+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
61079+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
61080+};
61081+
61082+enum {
61083+ GR_ID_USER = 0x01,
61084+ GR_ID_GROUP = 0x02,
61085+};
61086+
61087+enum {
61088+ GR_ID_ALLOW = 0x01,
61089+ GR_ID_DENY = 0x02,
61090+};
61091+
61092+#define GR_CRASH_RES 31
61093+#define GR_UIDTABLE_MAX 500
61094+
61095+/* begin resource learning section */
61096+enum {
61097+ GR_RLIM_CPU_BUMP = 60,
61098+ GR_RLIM_FSIZE_BUMP = 50000,
61099+ GR_RLIM_DATA_BUMP = 10000,
61100+ GR_RLIM_STACK_BUMP = 1000,
61101+ GR_RLIM_CORE_BUMP = 10000,
61102+ GR_RLIM_RSS_BUMP = 500000,
61103+ GR_RLIM_NPROC_BUMP = 1,
61104+ GR_RLIM_NOFILE_BUMP = 5,
61105+ GR_RLIM_MEMLOCK_BUMP = 50000,
61106+ GR_RLIM_AS_BUMP = 500000,
61107+ GR_RLIM_LOCKS_BUMP = 2,
61108+ GR_RLIM_SIGPENDING_BUMP = 5,
61109+ GR_RLIM_MSGQUEUE_BUMP = 10000,
61110+ GR_RLIM_NICE_BUMP = 1,
61111+ GR_RLIM_RTPRIO_BUMP = 1,
61112+ GR_RLIM_RTTIME_BUMP = 1000000
61113+};
61114+
61115+#endif
fe2de317
MT
61116diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
61117new file mode 100644
61118index 0000000..60cda84
61119--- /dev/null
61120+++ b/include/linux/grinternal.h
6e9df6a3 61121@@ -0,0 +1,220 @@
58c5fc13
MT
61122+#ifndef __GRINTERNAL_H
61123+#define __GRINTERNAL_H
61124+
61125+#ifdef CONFIG_GRKERNSEC
61126+
61127+#include <linux/fs.h>
61128+#include <linux/mnt_namespace.h>
61129+#include <linux/nsproxy.h>
61130+#include <linux/gracl.h>
61131+#include <linux/grdefs.h>
61132+#include <linux/grmsg.h>
61133+
61134+void gr_add_learn_entry(const char *fmt, ...)
61135+ __attribute__ ((format (printf, 1, 2)));
61136+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
61137+ const struct vfsmount *mnt);
61138+__u32 gr_check_create(const struct dentry *new_dentry,
61139+ const struct dentry *parent,
61140+ const struct vfsmount *mnt, const __u32 mode);
61141+int gr_check_protected_task(const struct task_struct *task);
61142+__u32 to_gr_audit(const __u32 reqmode);
61143+int gr_set_acls(const int type);
16454cff 61144+int gr_apply_subject_to_task(struct task_struct *task);
58c5fc13
MT
61145+int gr_acl_is_enabled(void);
61146+char gr_roletype_to_char(void);
61147+
61148+void gr_handle_alertkill(struct task_struct *task);
61149+char *gr_to_filename(const struct dentry *dentry,
61150+ const struct vfsmount *mnt);
61151+char *gr_to_filename1(const struct dentry *dentry,
61152+ const struct vfsmount *mnt);
61153+char *gr_to_filename2(const struct dentry *dentry,
61154+ const struct vfsmount *mnt);
61155+char *gr_to_filename3(const struct dentry *dentry,
61156+ const struct vfsmount *mnt);
61157+
61158+extern int grsec_enable_harden_ptrace;
61159+extern int grsec_enable_link;
61160+extern int grsec_enable_fifo;
61161+extern int grsec_enable_execve;
61162+extern int grsec_enable_shm;
61163+extern int grsec_enable_execlog;
61164+extern int grsec_enable_signal;
ae4e228f 61165+extern int grsec_enable_audit_ptrace;
58c5fc13
MT
61166+extern int grsec_enable_forkfail;
61167+extern int grsec_enable_time;
ae4e228f 61168+extern int grsec_enable_rofs;
58c5fc13 61169+extern int grsec_enable_chroot_shmat;
58c5fc13
MT
61170+extern int grsec_enable_chroot_mount;
61171+extern int grsec_enable_chroot_double;
61172+extern int grsec_enable_chroot_pivot;
61173+extern int grsec_enable_chroot_chdir;
61174+extern int grsec_enable_chroot_chmod;
61175+extern int grsec_enable_chroot_mknod;
61176+extern int grsec_enable_chroot_fchdir;
61177+extern int grsec_enable_chroot_nice;
61178+extern int grsec_enable_chroot_execlog;
61179+extern int grsec_enable_chroot_caps;
61180+extern int grsec_enable_chroot_sysctl;
61181+extern int grsec_enable_chroot_unix;
61182+extern int grsec_enable_tpe;
61183+extern int grsec_tpe_gid;
61184+extern int grsec_enable_tpe_all;
57199397 61185+extern int grsec_enable_tpe_invert;
58c5fc13
MT
61186+extern int grsec_enable_socket_all;
61187+extern int grsec_socket_all_gid;
61188+extern int grsec_enable_socket_client;
61189+extern int grsec_socket_client_gid;
61190+extern int grsec_enable_socket_server;
61191+extern int grsec_socket_server_gid;
61192+extern int grsec_audit_gid;
61193+extern int grsec_enable_group;
61194+extern int grsec_enable_audit_textrel;
6892158b 61195+extern int grsec_enable_log_rwxmaps;
58c5fc13
MT
61196+extern int grsec_enable_mount;
61197+extern int grsec_enable_chdir;
61198+extern int grsec_resource_logging;
ae4e228f
MT
61199+extern int grsec_enable_blackhole;
61200+extern int grsec_lastack_retries;
15a11c5b 61201+extern int grsec_enable_brute;
58c5fc13
MT
61202+extern int grsec_lock;
61203+
61204+extern spinlock_t grsec_alert_lock;
61205+extern unsigned long grsec_alert_wtime;
61206+extern unsigned long grsec_alert_fyet;
61207+
61208+extern spinlock_t grsec_audit_lock;
61209+
61210+extern rwlock_t grsec_exec_file_lock;
61211+
6892158b
MT
61212+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
61213+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
61214+ (tsk)->exec_file->f_vfsmnt) : "/")
58c5fc13 61215+
6892158b
MT
61216+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
61217+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
61218+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58c5fc13 61219+
6892158b
MT
61220+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
61221+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
61222+ (tsk)->exec_file->f_vfsmnt) : "/")
58c5fc13 61223+
6892158b
MT
61224+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
61225+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
61226+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58c5fc13 61227+
6892158b 61228+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
58c5fc13 61229+
6892158b 61230+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
58c5fc13 61231+
6892158b
MT
61232+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
61233+ (task)->pid, (cred)->uid, \
61234+ (cred)->euid, (cred)->gid, (cred)->egid, \
58c5fc13 61235+ gr_parent_task_fullpath(task), \
6892158b
MT
61236+ (task)->real_parent->comm, (task)->real_parent->pid, \
61237+ (pcred)->uid, (pcred)->euid, \
61238+ (pcred)->gid, (pcred)->egid
58c5fc13
MT
61239+
61240+#define GR_CHROOT_CAPS {{ \
61241+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
61242+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
61243+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
61244+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
61245+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
6e9df6a3
MT
61246+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
61247+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
58c5fc13
MT
61248+
61249+#define security_learn(normal_msg,args...) \
61250+({ \
61251+ read_lock(&grsec_exec_file_lock); \
61252+ gr_add_learn_entry(normal_msg "\n", ## args); \
61253+ read_unlock(&grsec_exec_file_lock); \
61254+})
61255+
61256+enum {
61257+ GR_DO_AUDIT,
61258+ GR_DONT_AUDIT,
16454cff 61259+ /* used for non-audit messages that we shouldn't kill the task on */
58c5fc13
MT
61260+ GR_DONT_AUDIT_GOOD
61261+};
61262+
61263+enum {
61264+ GR_TTYSNIFF,
61265+ GR_RBAC,
61266+ GR_RBAC_STR,
61267+ GR_STR_RBAC,
61268+ GR_RBAC_MODE2,
61269+ GR_RBAC_MODE3,
61270+ GR_FILENAME,
61271+ GR_SYSCTL_HIDDEN,
61272+ GR_NOARGS,
61273+ GR_ONE_INT,
61274+ GR_ONE_INT_TWO_STR,
61275+ GR_ONE_STR,
61276+ GR_STR_INT,
bc901d79 61277+ GR_TWO_STR_INT,
58c5fc13 61278+ GR_TWO_INT,
71d190be 61279+ GR_TWO_U64,
58c5fc13
MT
61280+ GR_THREE_INT,
61281+ GR_FIVE_INT_TWO_STR,
61282+ GR_TWO_STR,
61283+ GR_THREE_STR,
61284+ GR_FOUR_STR,
61285+ GR_STR_FILENAME,
61286+ GR_FILENAME_STR,
61287+ GR_FILENAME_TWO_INT,
61288+ GR_FILENAME_TWO_INT_STR,
61289+ GR_TEXTREL,
61290+ GR_PTRACE,
61291+ GR_RESOURCE,
61292+ GR_CAP,
61293+ GR_SIG,
61294+ GR_SIG2,
61295+ GR_CRASH1,
61296+ GR_CRASH2,
6892158b
MT
61297+ GR_PSACCT,
61298+ GR_RWXMAP
58c5fc13
MT
61299+};
61300+
61301+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
61302+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
61303+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
61304+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
61305+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
61306+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
61307+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
61308+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
61309+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
61310+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
61311+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
61312+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
61313+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
61314+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
71d190be 61315+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
58c5fc13
MT
61316+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
61317+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
61318+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
bc901d79 61319+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
58c5fc13
MT
61320+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
61321+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
61322+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
61323+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
61324+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
61325+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
61326+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
61327+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
61328+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
61329+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
61330+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
61331+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
61332+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
61333+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
61334+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
6892158b 61335+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
58c5fc13
MT
61336+
61337+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
61338+
61339+#endif
61340+
61341+#endif
fe2de317
MT
61342diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
61343new file mode 100644
61344index 0000000..9d5fd4a
61345--- /dev/null
61346+++ b/include/linux/grmsg.h
71d190be 61347@@ -0,0 +1,108 @@
58c5fc13 61348+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
ae4e228f 61349+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
58c5fc13
MT
61350+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
61351+#define GR_STOPMOD_MSG "denied modification of module state by "
ae4e228f
MT
61352+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
61353+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
58c5fc13
MT
61354+#define GR_IOPERM_MSG "denied use of ioperm() by "
61355+#define GR_IOPL_MSG "denied use of iopl() by "
61356+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
61357+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
61358+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
71d190be 61359+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
58c5fc13 61360+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
ae4e228f
MT
61361+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
61362+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
58c5fc13
MT
61363+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
61364+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
61365+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
61366+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
61367+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
61368+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
61369+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
ae4e228f 61370+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
58c5fc13
MT
61371+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
61372+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
61373+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
61374+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
61375+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
61376+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
61377+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
61378+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
ae4e228f 61379+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
58c5fc13 61380+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
58c5fc13
MT
61381+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
61382+#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
61383+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
61384+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
61385+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
61386+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
61387+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
61388+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
61389+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
61390+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
61391+#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
61392+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
61393+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
61394+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
61395+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
bc901d79 61396+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
58c5fc13
MT
61397+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
61398+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
61399+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
61400+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
61401+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
61402+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
61403+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
61404+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
61405+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
61406+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
61407+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
61408+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
61409+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
61410+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
61411+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
61412+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
61413+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
61414+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
61415+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
61416+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
61417+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
58c5fc13
MT
61418+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
61419+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
6892158b 61420+#define GR_FAILFORK_MSG "failed fork with errno %s by "
58c5fc13
MT
61421+#define GR_NICE_CHROOT_MSG "denied priority change by "
61422+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
61423+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
61424+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
61425+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
61426+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
61427+#define GR_TIME_MSG "time set by "
61428+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
61429+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
61430+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
61431+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
bc901d79 61432+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
58c5fc13
MT
61433+#define GR_BIND_MSG "denied bind() by "
61434+#define GR_CONNECT_MSG "denied connect() by "
ae4e228f
MT
61435+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
61436+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
61437+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
58c5fc13
MT
61438+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
61439+#define GR_CAP_ACL_MSG "use of %s denied for "
15a11c5b 61440+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
df50ba0c 61441+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
58c5fc13
MT
61442+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
61443+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
61444+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
61445+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
61446+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
61447+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
61448+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
61449+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
6892158b
MT
61450+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
61451+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
58c5fc13 61452+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
ae4e228f
MT
61453+#define GR_VM86_MSG "denied use of vm86 by "
61454+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
16454cff 61455+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
fe2de317
MT
61456diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
61457new file mode 100644
61458index 0000000..bd25f72
61459--- /dev/null
61460+++ b/include/linux/grsecurity.h
6e9df6a3 61461@@ -0,0 +1,228 @@
58c5fc13
MT
61462+#ifndef GR_SECURITY_H
61463+#define GR_SECURITY_H
61464+#include <linux/fs.h>
61465+#include <linux/fs_struct.h>
61466+#include <linux/binfmts.h>
61467+#include <linux/gracl.h>
61468+
61469+/* notify of brain-dead configs */
15a11c5b
MT
61470+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61471+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
61472+#endif
58c5fc13
MT
61473+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
61474+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
61475+#endif
61476+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
61477+#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
61478+#endif
61479+#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
61480+#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
61481+#endif
61482+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
61483+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
61484+#endif
61485+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
61486+#error "CONFIG_PAX enabled, but no PaX options are enabled."
61487+#endif
61488+
15a11c5b
MT
61489+#include <linux/compat.h>
61490+
61491+struct user_arg_ptr {
61492+#ifdef CONFIG_COMPAT
61493+ bool is_compat;
61494+#endif
61495+ union {
61496+ const char __user *const __user *native;
61497+#ifdef CONFIG_COMPAT
61498+ compat_uptr_t __user *compat;
61499+#endif
61500+ } ptr;
61501+};
61502+
71d190be 61503+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
58c5fc13 61504+void gr_handle_brute_check(void);
71d190be
MT
61505+void gr_handle_kernel_exploit(void);
61506+int gr_process_user_ban(void);
58c5fc13
MT
61507+
61508+char gr_roletype_to_char(void);
61509+
bc901d79
MT
61510+int gr_acl_enable_at_secure(void);
61511+
58c5fc13
MT
61512+int gr_check_user_change(int real, int effective, int fs);
61513+int gr_check_group_change(int real, int effective, int fs);
61514+
61515+void gr_del_task_from_ip_table(struct task_struct *p);
61516+
61517+int gr_pid_is_chrooted(struct task_struct *p);
57199397 61518+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
58c5fc13
MT
61519+int gr_handle_chroot_nice(void);
61520+int gr_handle_chroot_sysctl(const int op);
61521+int gr_handle_chroot_setpriority(struct task_struct *p,
61522+ const int niceval);
61523+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
61524+int gr_handle_chroot_chroot(const struct dentry *dentry,
61525+ const struct vfsmount *mnt);
58c5fc13
MT
61526+void gr_handle_chroot_chdir(struct path *path);
61527+int gr_handle_chroot_chmod(const struct dentry *dentry,
61528+ const struct vfsmount *mnt, const int mode);
61529+int gr_handle_chroot_mknod(const struct dentry *dentry,
61530+ const struct vfsmount *mnt, const int mode);
61531+int gr_handle_chroot_mount(const struct dentry *dentry,
61532+ const struct vfsmount *mnt,
61533+ const char *dev_name);
61534+int gr_handle_chroot_pivot(void);
15a11c5b 61535+int gr_handle_chroot_unix(const pid_t pid);
58c5fc13
MT
61536+
61537+int gr_handle_rawio(const struct inode *inode);
58c5fc13
MT
61538+
61539+void gr_handle_ioperm(void);
61540+void gr_handle_iopl(void);
61541+
61542+int gr_tpe_allow(const struct file *file);
61543+
df50ba0c
MT
61544+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
61545+void gr_clear_chroot_entries(struct task_struct *task);
58c5fc13
MT
61546+
61547+void gr_log_forkfail(const int retval);
61548+void gr_log_timechange(void);
61549+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
61550+void gr_log_chdir(const struct dentry *dentry,
61551+ const struct vfsmount *mnt);
61552+void gr_log_chroot_exec(const struct dentry *dentry,
61553+ const struct vfsmount *mnt);
15a11c5b 61554+void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
58c5fc13
MT
61555+void gr_log_remount(const char *devname, const int retval);
61556+void gr_log_unmount(const char *devname, const int retval);
61557+void gr_log_mount(const char *from, const char *to, const int retval);
61558+void gr_log_textrel(struct vm_area_struct *vma);
6892158b
MT
61559+void gr_log_rwxmmap(struct file *file);
61560+void gr_log_rwxmprotect(struct file *file);
58c5fc13
MT
61561+
61562+int gr_handle_follow_link(const struct inode *parent,
61563+ const struct inode *inode,
61564+ const struct dentry *dentry,
61565+ const struct vfsmount *mnt);
61566+int gr_handle_fifo(const struct dentry *dentry,
61567+ const struct vfsmount *mnt,
61568+ const struct dentry *dir, const int flag,
61569+ const int acc_mode);
61570+int gr_handle_hardlink(const struct dentry *dentry,
61571+ const struct vfsmount *mnt,
61572+ struct inode *inode,
61573+ const int mode, const char *to);
61574+
61575+int gr_is_capable(const int cap);
61576+int gr_is_capable_nolog(const int cap);
61577+void gr_learn_resource(const struct task_struct *task, const int limit,
61578+ const unsigned long wanted, const int gt);
61579+void gr_copy_label(struct task_struct *tsk);
61580+void gr_handle_crash(struct task_struct *task, const int sig);
61581+int gr_handle_signal(const struct task_struct *p, const int sig);
61582+int gr_check_crash_uid(const uid_t uid);
61583+int gr_check_protected_task(const struct task_struct *task);
57199397 61584+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
58c5fc13
MT
61585+int gr_acl_handle_mmap(const struct file *file,
61586+ const unsigned long prot);
61587+int gr_acl_handle_mprotect(const struct file *file,
61588+ const unsigned long prot);
61589+int gr_check_hidden_task(const struct task_struct *tsk);
61590+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
61591+ const struct vfsmount *mnt);
61592+__u32 gr_acl_handle_utime(const struct dentry *dentry,
61593+ const struct vfsmount *mnt);
61594+__u32 gr_acl_handle_access(const struct dentry *dentry,
61595+ const struct vfsmount *mnt, const int fmode);
61596+__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
61597+ const struct vfsmount *mnt, mode_t mode);
61598+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
61599+ const struct vfsmount *mnt, mode_t mode);
61600+__u32 gr_acl_handle_chown(const struct dentry *dentry,
61601+ const struct vfsmount *mnt);
bc901d79
MT
61602+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
61603+ const struct vfsmount *mnt);
58c5fc13
MT
61604+int gr_handle_ptrace(struct task_struct *task, const long request);
61605+int gr_handle_proc_ptrace(struct task_struct *task);
61606+__u32 gr_acl_handle_execve(const struct dentry *dentry,
61607+ const struct vfsmount *mnt);
61608+int gr_check_crash_exec(const struct file *filp);
61609+int gr_acl_is_enabled(void);
61610+void gr_set_kernel_label(struct task_struct *task);
61611+void gr_set_role_label(struct task_struct *task, const uid_t uid,
61612+ const gid_t gid);
61613+int gr_set_proc_label(const struct dentry *dentry,
61614+ const struct vfsmount *mnt,
61615+ const int unsafe_share);
61616+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
61617+ const struct vfsmount *mnt);
61618+__u32 gr_acl_handle_open(const struct dentry *dentry,
6e9df6a3 61619+ const struct vfsmount *mnt, int acc_mode);
58c5fc13
MT
61620+__u32 gr_acl_handle_creat(const struct dentry *dentry,
61621+ const struct dentry *p_dentry,
6e9df6a3
MT
61622+ const struct vfsmount *p_mnt,
61623+ int open_flags, int acc_mode, const int imode);
58c5fc13
MT
61624+void gr_handle_create(const struct dentry *dentry,
61625+ const struct vfsmount *mnt);
6e9df6a3
MT
61626+void gr_handle_proc_create(const struct dentry *dentry,
61627+ const struct inode *inode);
58c5fc13
MT
61628+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
61629+ const struct dentry *parent_dentry,
61630+ const struct vfsmount *parent_mnt,
61631+ const int mode);
61632+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
61633+ const struct dentry *parent_dentry,
61634+ const struct vfsmount *parent_mnt);
61635+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
61636+ const struct vfsmount *mnt);
61637+void gr_handle_delete(const ino_t ino, const dev_t dev);
61638+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
61639+ const struct vfsmount *mnt);
61640+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
61641+ const struct dentry *parent_dentry,
61642+ const struct vfsmount *parent_mnt,
61643+ const char *from);
61644+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
61645+ const struct dentry *parent_dentry,
61646+ const struct vfsmount *parent_mnt,
61647+ const struct dentry *old_dentry,
61648+ const struct vfsmount *old_mnt, const char *to);
61649+int gr_acl_handle_rename(struct dentry *new_dentry,
61650+ struct dentry *parent_dentry,
61651+ const struct vfsmount *parent_mnt,
61652+ struct dentry *old_dentry,
61653+ struct inode *old_parent_inode,
61654+ struct vfsmount *old_mnt, const char *newname);
61655+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
61656+ struct dentry *old_dentry,
61657+ struct dentry *new_dentry,
61658+ struct vfsmount *mnt, const __u8 replace);
61659+__u32 gr_check_link(const struct dentry *new_dentry,
61660+ const struct dentry *parent_dentry,
61661+ const struct vfsmount *parent_mnt,
61662+ const struct dentry *old_dentry,
61663+ const struct vfsmount *old_mnt);
61664+int gr_acl_handle_filldir(const struct file *file, const char *name,
61665+ const unsigned int namelen, const ino_t ino);
61666+
61667+__u32 gr_acl_handle_unix(const struct dentry *dentry,
61668+ const struct vfsmount *mnt);
61669+void gr_acl_handle_exit(void);
61670+void gr_acl_handle_psacct(struct task_struct *task, const long code);
61671+int gr_acl_handle_procpidmem(const struct task_struct *task);
ae4e228f
MT
61672+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
61673+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
61674+void gr_audit_ptrace(struct task_struct *task);
16454cff 61675+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
58c5fc13
MT
61676+
61677+#ifdef CONFIG_GRKERNSEC
6892158b 61678+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
ae4e228f 61679+void gr_handle_vm86(void);
71d190be 61680+void gr_handle_mem_readwrite(u64 from, u64 to);
58c5fc13
MT
61681+
61682+extern int grsec_enable_dmesg;
df50ba0c 61683+extern int grsec_disable_privio;
15a11c5b
MT
61684+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
61685+extern int grsec_enable_chroot_findtask;
61686+#endif
58c5fc13
MT
61687+#endif
61688+
61689+#endif
fe2de317
MT
61690diff --git a/include/linux/grsock.h b/include/linux/grsock.h
61691new file mode 100644
61692index 0000000..e7ffaaf
61693--- /dev/null
61694+++ b/include/linux/grsock.h
ae4e228f
MT
61695@@ -0,0 +1,19 @@
61696+#ifndef __GRSOCK_H
61697+#define __GRSOCK_H
61698+
61699+extern void gr_attach_curr_ip(const struct sock *sk);
61700+extern int gr_handle_sock_all(const int family, const int type,
61701+ const int protocol);
61702+extern int gr_handle_sock_server(const struct sockaddr *sck);
df50ba0c 61703+extern int gr_handle_sock_server_other(const struct sock *sck);
ae4e228f
MT
61704+extern int gr_handle_sock_client(const struct sockaddr *sck);
61705+extern int gr_search_connect(struct socket * sock,
61706+ struct sockaddr_in * addr);
61707+extern int gr_search_bind(struct socket * sock,
61708+ struct sockaddr_in * addr);
61709+extern int gr_search_listen(struct socket * sock);
61710+extern int gr_search_accept(struct socket * sock);
61711+extern int gr_search_socket(const int domain, const int type,
61712+ const int protocol);
61713+
61714+#endif
fe2de317
MT
61715diff --git a/include/linux/hid.h b/include/linux/hid.h
61716index 9cf8e7a..5ec94d0 100644
61717--- a/include/linux/hid.h
61718+++ b/include/linux/hid.h
6e9df6a3 61719@@ -676,7 +676,7 @@ struct hid_ll_driver {
15a11c5b
MT
61720 unsigned int code, int value);
61721
61722 int (*parse)(struct hid_device *hdev);
61723-};
61724+} __no_const;
61725
61726 #define PM_HINT_FULLON 1<<5
61727 #define PM_HINT_NORMAL 1<<1
fe2de317
MT
61728diff --git a/include/linux/highmem.h b/include/linux/highmem.h
61729index 3a93f73..b19d0b3 100644
61730--- a/include/linux/highmem.h
61731+++ b/include/linux/highmem.h
61732@@ -185,6 +185,18 @@ static inline void clear_highpage(struct page *page)
58c5fc13
MT
61733 kunmap_atomic(kaddr, KM_USER0);
61734 }
61735
61736+static inline void sanitize_highpage(struct page *page)
61737+{
61738+ void *kaddr;
61739+ unsigned long flags;
61740+
61741+ local_irq_save(flags);
61742+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
61743+ clear_page(kaddr);
61744+ kunmap_atomic(kaddr, KM_CLEARPAGE);
61745+ local_irq_restore(flags);
61746+}
61747+
61748 static inline void zero_user_segments(struct page *page,
61749 unsigned start1, unsigned end1,
61750 unsigned start2, unsigned end2)
fe2de317
MT
61751diff --git a/include/linux/i2c.h b/include/linux/i2c.h
61752index a6c652e..1f5878f 100644
61753--- a/include/linux/i2c.h
61754+++ b/include/linux/i2c.h
15a11c5b
MT
61755@@ -346,6 +346,7 @@ struct i2c_algorithm {
61756 /* To determine what the adapter supports */
61757 u32 (*functionality) (struct i2c_adapter *);
61758 };
61759+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
61760
61761 /*
61762 * i2c_adapter is the structure used to identify a physical i2c bus along
fe2de317
MT
61763diff --git a/include/linux/i2o.h b/include/linux/i2o.h
61764index a6deef4..c56a7f2 100644
61765--- a/include/linux/i2o.h
61766+++ b/include/linux/i2o.h
8308f9c9
MT
61767@@ -564,7 +564,7 @@ struct i2o_controller {
61768 struct i2o_device *exec; /* Executive */
61769 #if BITS_PER_LONG == 64
61770 spinlock_t context_list_lock; /* lock for context_list */
61771- atomic_t context_list_counter; /* needed for unique contexts */
61772+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
61773 struct list_head context_list; /* list of context id's
61774 and pointers */
61775 #endif
fe2de317
MT
61776diff --git a/include/linux/init.h b/include/linux/init.h
61777index 9146f39..885354d 100644
61778--- a/include/linux/init.h
61779+++ b/include/linux/init.h
61780@@ -293,13 +293,13 @@ void __init parse_early_options(char *cmdline);
6892158b
MT
61781
61782 /* Each module must use one module_init(). */
61783 #define module_init(initfn) \
61784- static inline initcall_t __inittest(void) \
61785+ static inline __used initcall_t __inittest(void) \
61786 { return initfn; } \
61787 int init_module(void) __attribute__((alias(#initfn)));
61788
61789 /* This is only required if you want to be unloadable. */
61790 #define module_exit(exitfn) \
61791- static inline exitcall_t __exittest(void) \
61792+ static inline __used exitcall_t __exittest(void) \
61793 { return exitfn; } \
61794 void cleanup_module(void) __attribute__((alias(#exitfn)));
61795
fe2de317
MT
61796diff --git a/include/linux/init_task.h b/include/linux/init_task.h
61797index d14e058..4162929 100644
61798--- a/include/linux/init_task.h
61799+++ b/include/linux/init_task.h
15a11c5b
MT
61800@@ -126,6 +126,12 @@ extern struct cred init_cred;
61801 # define INIT_PERF_EVENTS(tsk)
71d190be
MT
61802 #endif
61803
61804+#ifdef CONFIG_X86
61805+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
61806+#else
61807+#define INIT_TASK_THREAD_INFO
61808+#endif
61809+
61810 /*
15a11c5b
MT
61811 * INIT_TASK is used to set up the first task table, touch at
61812 * your own risk!. Base=0, limit=0x1fffff (=2MB)
61813@@ -164,6 +170,7 @@ extern struct cred init_cred;
71d190be
MT
61814 RCU_INIT_POINTER(.cred, &init_cred), \
61815 .comm = "swapper", \
61816 .thread = INIT_THREAD, \
61817+ INIT_TASK_THREAD_INFO \
61818 .fs = &init_fs, \
61819 .files = &init_files, \
61820 .signal = &init_signals, \
fe2de317
MT
61821diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
61822index 9310c69..6ebb244 100644
61823--- a/include/linux/intel-iommu.h
61824+++ b/include/linux/intel-iommu.h
15a11c5b
MT
61825@@ -296,7 +296,7 @@ struct iommu_flush {
61826 u8 fm, u64 type);
61827 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
61828 unsigned int size_order, u64 type);
61829-};
61830+} __no_const;
61831
61832 enum {
61833 SR_DMAR_FECTL_REG,
fe2de317
MT
61834diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
61835index f51a81b..adfcb44 100644
61836--- a/include/linux/interrupt.h
61837+++ b/include/linux/interrupt.h
6e9df6a3 61838@@ -425,7 +425,7 @@ enum
ae4e228f
MT
61839 /* map softirq index to softirq name. update 'softirq_to_name' in
61840 * kernel/softirq.c when adding a new softirq.
61841 */
61842-extern char *softirq_to_name[NR_SOFTIRQS];
61843+extern const char * const softirq_to_name[NR_SOFTIRQS];
61844
61845 /* softirq mask and active fields moved to irq_cpustat_t in
61846 * asm/hardirq.h to get better cache usage. KAO
fe2de317 61847@@ -433,12 +433,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
ae4e228f
MT
61848
61849 struct softirq_action
61850 {
61851- void (*action)(struct softirq_action *);
61852+ void (*action)(void);
61853 };
61854
61855 asmlinkage void do_softirq(void);
61856 asmlinkage void __do_softirq(void);
61857-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
61858+extern void open_softirq(int nr, void (*action)(void));
61859 extern void softirq_init(void);
bc901d79
MT
61860 static inline void __raise_softirq_irqoff(unsigned int nr)
61861 {
fe2de317
MT
61862diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
61863index 0df513b..fe901a2 100644
61864--- a/include/linux/kallsyms.h
61865+++ b/include/linux/kallsyms.h
58c5fc13
MT
61866@@ -15,7 +15,8 @@
61867
61868 struct module;
61869
61870-#ifdef CONFIG_KALLSYMS
bc901d79 61871+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
58c5fc13
MT
61872+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
61873 /* Lookup the address for a symbol. Returns 0 if not found. */
61874 unsigned long kallsyms_lookup_name(const char *name);
61875
fe2de317 61876@@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
58c5fc13
MT
61877 /* Stupid that this does nothing, but I didn't create this mess. */
61878 #define __print_symbol(fmt, addr)
61879 #endif /*CONFIG_KALLSYMS*/
bc901d79
MT
61880+#else /* when included by kallsyms.c, vsnprintf.c, or
61881+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
58c5fc13 61882+extern void __print_symbol(const char *fmt, unsigned long address);
66a7e928 61883+extern int sprint_backtrace(char *buffer, unsigned long address);
bc901d79
MT
61884+extern int sprint_symbol(char *buffer, unsigned long address);
61885+const char *kallsyms_lookup(unsigned long addr,
61886+ unsigned long *symbolsize,
61887+ unsigned long *offset,
61888+ char **modname, char *namebuf);
58c5fc13
MT
61889+#endif
61890
61891 /* This macro allows us to keep printk typechecking */
61892 static void __check_printsym_format(const char *fmt, ...)
fe2de317
MT
61893diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
61894index fa39183..40160be 100644
61895--- a/include/linux/kgdb.h
61896+++ b/include/linux/kgdb.h
8308f9c9
MT
61897@@ -53,7 +53,7 @@ extern int kgdb_connected;
61898 extern int kgdb_io_module_registered;
61899
61900 extern atomic_t kgdb_setting_breakpoint;
61901-extern atomic_t kgdb_cpu_doing_single_step;
61902+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
61903
61904 extern struct task_struct *kgdb_usethread;
61905 extern struct task_struct *kgdb_contthread;
15a11c5b
MT
61906@@ -251,7 +251,7 @@ struct kgdb_arch {
61907 void (*disable_hw_break)(struct pt_regs *regs);
61908 void (*remove_all_hw_break)(void);
61909 void (*correct_hw_break)(void);
61910-};
61911+} __do_const;
ae4e228f 61912
15a11c5b
MT
61913 /**
61914 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
61915@@ -276,7 +276,7 @@ struct kgdb_io {
61916 void (*pre_exception) (void);
61917 void (*post_exception) (void);
61918 int is_console;
61919-};
61920+} __do_const;
ae4e228f 61921
15a11c5b 61922 extern struct kgdb_arch arch_kgdb_ops;
ae4e228f 61923
fe2de317
MT
61924diff --git a/include/linux/kmod.h b/include/linux/kmod.h
61925index 0da38cf..d23f05f 100644
61926--- a/include/linux/kmod.h
61927+++ b/include/linux/kmod.h
61928@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
71d190be
MT
61929 * usually useless though. */
61930 extern int __request_module(bool wait, const char *name, ...) \
61931 __attribute__((format(printf, 2, 3)));
61932+extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
61933+ __attribute__((format(printf, 3, 4)));
61934 #define request_module(mod...) __request_module(true, mod)
61935 #define request_module_nowait(mod...) __request_module(false, mod)
61936 #define try_then_request_module(x, mod...) \
fe2de317
MT
61937diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
61938index eabb21a..3f030f4 100644
61939--- a/include/linux/kvm_host.h
61940+++ b/include/linux/kvm_host.h
61941@@ -308,7 +308,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
58c5fc13
MT
61942 void vcpu_load(struct kvm_vcpu *vcpu);
61943 void vcpu_put(struct kvm_vcpu *vcpu);
61944
57199397
MT
61945-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
61946+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
58c5fc13
MT
61947 struct module *module);
61948 void kvm_exit(void);
61949
fe2de317 61950@@ -454,7 +454,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
58c5fc13
MT
61951 struct kvm_guest_debug *dbg);
61952 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
61953
61954-int kvm_arch_init(void *opaque);
61955+int kvm_arch_init(const void *opaque);
61956 void kvm_arch_exit(void);
61957
61958 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
fe2de317
MT
61959diff --git a/include/linux/libata.h b/include/linux/libata.h
61960index efd6f98..5f5fd37 100644
61961--- a/include/linux/libata.h
61962+++ b/include/linux/libata.h
6e9df6a3 61963@@ -909,7 +909,7 @@ struct ata_port_operations {
15a11c5b
MT
61964 * fields must be pointers.
61965 */
61966 const struct ata_port_operations *inherits;
61967-};
61968+} __do_const;
66a7e928 61969
15a11c5b 61970 struct ata_port_info {
ae4e228f 61971 unsigned long flags;
fe2de317
MT
61972diff --git a/include/linux/mca.h b/include/linux/mca.h
61973index 3797270..7765ede 100644
61974--- a/include/linux/mca.h
61975+++ b/include/linux/mca.h
15a11c5b
MT
61976@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
61977 int region);
61978 void * (*mca_transform_memory)(struct mca_device *,
61979 void *memory);
61980-};
61981+} __no_const;
61982
61983 struct mca_bus {
61984 u64 default_dma_mask;
fe2de317
MT
61985diff --git a/include/linux/memory.h b/include/linux/memory.h
61986index 935699b..11042cc 100644
61987--- a/include/linux/memory.h
61988+++ b/include/linux/memory.h
15a11c5b
MT
61989@@ -144,7 +144,7 @@ struct memory_accessor {
61990 size_t count);
61991 ssize_t (*write)(struct memory_accessor *, const char *buf,
61992 off_t offset, size_t count);
61993-};
61994+} __no_const;
ae4e228f
MT
61995
61996 /*
15a11c5b 61997 * Kernel text modification mutex, used for code patching. Users of this lock
fe2de317
MT
61998diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
61999index 896b5e4..1159ad0 100644
62000--- a/include/linux/mfd/abx500.h
62001+++ b/include/linux/mfd/abx500.h
15a11c5b
MT
62002@@ -234,6 +234,7 @@ struct abx500_ops {
62003 int (*event_registers_startup_state_get) (struct device *, u8 *);
66a7e928
MT
62004 int (*startup_irq_enabled) (struct device *, unsigned int);
62005 };
15a11c5b 62006+typedef struct abx500_ops __no_const abx500_ops_no_const;
66a7e928 62007
15a11c5b 62008 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
66a7e928 62009 void abx500_remove_ops(struct device *dev);
fe2de317
MT
62010diff --git a/include/linux/mm.h b/include/linux/mm.h
62011index fedc5f0..7cedb6d 100644
62012--- a/include/linux/mm.h
62013+++ b/include/linux/mm.h
62014@@ -114,7 +114,14 @@ extern unsigned int kobjsize(const void *objp);
58c5fc13 62015
df50ba0c
MT
62016 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
62017 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
62018+
62019+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
62020+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
62021+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
62022+#else
62023 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
58c5fc13
MT
62024+#endif
62025+
df50ba0c
MT
62026 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
62027 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
62028
6e9df6a3 62029@@ -1011,34 +1018,6 @@ int set_page_dirty(struct page *page);
bc901d79
MT
62030 int set_page_dirty_lock(struct page *page);
62031 int clear_page_dirty_for_io(struct page *page);
62032
62033-/* Is the vma a continuation of the stack vma above it? */
66a7e928 62034-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
bc901d79
MT
62035-{
62036- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
62037-}
66a7e928
MT
62038-
62039-static inline int stack_guard_page_start(struct vm_area_struct *vma,
62040- unsigned long addr)
62041-{
62042- return (vma->vm_flags & VM_GROWSDOWN) &&
62043- (vma->vm_start == addr) &&
62044- !vma_growsdown(vma->vm_prev, addr);
62045-}
62046-
62047-/* Is the vma a continuation of the stack vma below it? */
62048-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
62049-{
62050- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
62051-}
62052-
62053-static inline int stack_guard_page_end(struct vm_area_struct *vma,
62054- unsigned long addr)
62055-{
62056- return (vma->vm_flags & VM_GROWSUP) &&
62057- (vma->vm_end == addr) &&
62058- !vma_growsup(vma->vm_next, addr);
62059-}
bc901d79
MT
62060-
62061 extern unsigned long move_page_tables(struct vm_area_struct *vma,
62062 unsigned long old_addr, struct vm_area_struct *new_vma,
62063 unsigned long new_addr, unsigned long len);
fe2de317 62064@@ -1133,6 +1112,15 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
6e9df6a3
MT
62065 }
62066 #endif
58c5fc13 62067
6892158b 62068+#ifdef CONFIG_MMU
15a11c5b 62069+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
6892158b 62070+#else
15a11c5b 62071+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
6892158b
MT
62072+{
62073+ return __pgprot(0);
62074+}
62075+#endif
58c5fc13
MT
62076+
62077 int vma_wants_writenotify(struct vm_area_struct *vma);
62078
bc901d79 62079 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
6e9df6a3 62080@@ -1417,6 +1405,7 @@ out:
58c5fc13
MT
62081 }
62082
62083 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
62084+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
62085
62086 extern unsigned long do_brk(unsigned long, unsigned long);
62087
fe2de317 62088@@ -1474,6 +1463,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
58c5fc13
MT
62089 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
62090 struct vm_area_struct **pprev);
62091
62092+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
df50ba0c 62093+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
58c5fc13
MT
62094+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
62095+
62096 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
62097 NULL if none. Assume start_addr < end_addr. */
62098 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
fe2de317 62099@@ -1490,15 +1483,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
58c5fc13
MT
62100 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
62101 }
62102
6892158b 62103-#ifdef CONFIG_MMU
58c5fc13 62104-pgprot_t vm_get_page_prot(unsigned long vm_flags);
6892158b
MT
62105-#else
62106-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
62107-{
62108- return __pgprot(0);
62109-}
62110-#endif
62111-
58c5fc13
MT
62112 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
62113 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
62114 unsigned long pfn, unsigned long size, pgprot_t);
fe2de317 62115@@ -1612,7 +1596,7 @@ extern int unpoison_memory(unsigned long pfn);
ae4e228f
MT
62116 extern int sysctl_memory_failure_early_kill;
62117 extern int sysctl_memory_failure_recovery;
62118 extern void shake_page(struct page *p, int access);
62119-extern atomic_long_t mce_bad_pages;
62120+extern atomic_long_unchecked_t mce_bad_pages;
62121 extern int soft_offline_page(struct page *page, int flags);
66a7e928
MT
62122
62123 extern void dump_page(struct page *page);
fe2de317 62124@@ -1626,5 +1610,11 @@ extern void copy_user_huge_page(struct page *dst, struct page *src,
16454cff
MT
62125 unsigned int pages_per_huge_page);
62126 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
df50ba0c 62127
58c5fc13
MT
62128+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
62129+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
62130+#else
62131+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
62132+#endif
62133+
62134 #endif /* __KERNEL__ */
62135 #endif /* _LINUX_MM_H */
fe2de317
MT
62136diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
62137index 10a2f62..c8fa287 100644
62138--- a/include/linux/mm_types.h
62139+++ b/include/linux/mm_types.h
6e9df6a3 62140@@ -230,6 +230,8 @@ struct vm_area_struct {
58c5fc13
MT
62141 #ifdef CONFIG_NUMA
62142 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
62143 #endif
62144+
62145+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
62146 };
62147
62148 struct core_thread {
6e9df6a3 62149@@ -362,6 +364,24 @@ struct mm_struct {
15a11c5b
MT
62150 #ifdef CONFIG_CPUMASK_OFFSTACK
62151 struct cpumask cpumask_allocation;
58c5fc13
MT
62152 #endif
62153+
62154+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
62155+ unsigned long pax_flags;
62156+#endif
62157+
62158+#ifdef CONFIG_PAX_DLRESOLVE
62159+ unsigned long call_dl_resolve;
62160+#endif
62161+
62162+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
62163+ unsigned long call_syscall;
62164+#endif
62165+
62166+#ifdef CONFIG_PAX_ASLR
62167+ unsigned long delta_mmap; /* randomized offset */
62168+ unsigned long delta_stack; /* randomized offset */
62169+#endif
62170+
62171 };
62172
15a11c5b 62173 static inline void mm_init_cpumask(struct mm_struct *mm)
fe2de317
MT
62174diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
62175index 1d1b1e1..2a13c78 100644
62176--- a/include/linux/mmu_notifier.h
62177+++ b/include/linux/mmu_notifier.h
62178@@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
ae4e228f
MT
62179 */
62180 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
62181 ({ \
62182- pte_t __pte; \
62183+ pte_t ___pte; \
62184 struct vm_area_struct *___vma = __vma; \
62185 unsigned long ___address = __address; \
62186- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
62187+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
62188 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
62189- __pte; \
62190+ ___pte; \
62191 })
62192
16454cff 62193 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
fe2de317
MT
62194diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
62195index be1ac8d..26868ce 100644
62196--- a/include/linux/mmzone.h
62197+++ b/include/linux/mmzone.h
6e9df6a3 62198@@ -356,7 +356,7 @@ struct zone {
57199397
MT
62199 unsigned long flags; /* zone flags, see below */
62200
62201 /* Zone statistics */
62202- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
62203+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
62204
62205 /*
6892158b 62206 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
fe2de317
MT
62207diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
62208index ae28e93..1ac2233 100644
62209--- a/include/linux/mod_devicetable.h
62210+++ b/include/linux/mod_devicetable.h
58c5fc13
MT
62211@@ -12,7 +12,7 @@
62212 typedef unsigned long kernel_ulong_t;
62213 #endif
62214
62215-#define PCI_ANY_ID (~0)
62216+#define PCI_ANY_ID ((__u16)~0)
62217
62218 struct pci_device_id {
62219 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
62220@@ -131,7 +131,7 @@ struct usb_device_id {
62221 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
62222 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
62223
62224-#define HID_ANY_ID (~0)
62225+#define HID_ANY_ID (~0U)
62226
62227 struct hid_device_id {
62228 __u16 bus;
fe2de317
MT
62229diff --git a/include/linux/module.h b/include/linux/module.h
62230index 1c30087..fc2a442 100644
62231--- a/include/linux/module.h
62232+++ b/include/linux/module.h
15a11c5b
MT
62233@@ -16,6 +16,7 @@
62234 #include <linux/kobject.h>
62235 #include <linux/moduleparam.h>
62236 #include <linux/tracepoint.h>
62237+#include <linux/fs.h>
62238
62239 #include <linux/percpu.h>
62240 #include <asm/module.h>
6e9df6a3 62241@@ -327,19 +328,16 @@ struct module
58c5fc13
MT
62242 int (*init)(void);
62243
62244 /* If this is non-NULL, vfree after init() returns */
62245- void *module_init;
62246+ void *module_init_rx, *module_init_rw;
62247
62248 /* Here is the actual code + data, vfree'd on unload. */
62249- void *module_core;
62250+ void *module_core_rx, *module_core_rw;
62251
62252 /* Here are the sizes of the init and core sections */
62253- unsigned int init_size, core_size;
62254+ unsigned int init_size_rw, core_size_rw;
62255
62256 /* The size of the executable code in each section. */
62257- unsigned int init_text_size, core_text_size;
16454cff
MT
62258-
62259- /* Size of RO sections of the module (text+rodata) */
62260- unsigned int init_ro_size, core_ro_size;
58c5fc13
MT
62261+ unsigned int init_size_rx, core_size_rx;
62262
62263 /* Arch-specific module values */
62264 struct mod_arch_specific arch;
6e9df6a3 62265@@ -395,6 +393,10 @@ struct module
15a11c5b
MT
62266 #ifdef CONFIG_EVENT_TRACING
62267 struct ftrace_event_call **trace_events;
62268 unsigned int num_trace_events;
62269+ struct file_operations trace_id;
62270+ struct file_operations trace_enable;
62271+ struct file_operations trace_format;
62272+ struct file_operations trace_filter;
62273 #endif
62274 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
62275 unsigned int num_ftrace_callsites;
fe2de317 62276@@ -445,16 +447,46 @@ bool is_module_address(unsigned long addr);
df50ba0c 62277 bool is_module_percpu_address(unsigned long addr);
58c5fc13
MT
62278 bool is_module_text_address(unsigned long addr);
62279
62280+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
62281+{
62282+
62283+#ifdef CONFIG_PAX_KERNEXEC
62284+ if (ktla_ktva(addr) >= (unsigned long)start &&
62285+ ktla_ktva(addr) < (unsigned long)start + size)
62286+ return 1;
62287+#endif
62288+
62289+ return ((void *)addr >= start && (void *)addr < start + size);
62290+}
62291+
62292+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
62293+{
62294+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
62295+}
62296+
62297+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
62298+{
62299+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
62300+}
62301+
62302+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
62303+{
62304+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
62305+}
62306+
62307+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
62308+{
62309+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
62310+}
62311+
62312 static inline int within_module_core(unsigned long addr, struct module *mod)
62313 {
62314- return (unsigned long)mod->module_core <= addr &&
62315- addr < (unsigned long)mod->module_core + mod->core_size;
62316+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
62317 }
62318
62319 static inline int within_module_init(unsigned long addr, struct module *mod)
62320 {
62321- return (unsigned long)mod->module_init <= addr &&
62322- addr < (unsigned long)mod->module_init + mod->init_size;
62323+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
62324 }
62325
62326 /* Search for module by name: must hold module_mutex. */
fe2de317
MT
62327diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
62328index b2be02e..6a9fdb1 100644
62329--- a/include/linux/moduleloader.h
62330+++ b/include/linux/moduleloader.h
62331@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
58c5fc13
MT
62332 sections. Returns NULL on failure. */
62333 void *module_alloc(unsigned long size);
62334
62335+#ifdef CONFIG_PAX_KERNEXEC
62336+void *module_alloc_exec(unsigned long size);
62337+#else
62338+#define module_alloc_exec(x) module_alloc(x)
62339+#endif
62340+
62341 /* Free memory returned from module_alloc. */
62342 void module_free(struct module *mod, void *module_region);
62343
62344+#ifdef CONFIG_PAX_KERNEXEC
62345+void module_free_exec(struct module *mod, void *module_region);
62346+#else
ae4e228f 62347+#define module_free_exec(x, y) module_free((x), (y))
58c5fc13
MT
62348+#endif
62349+
62350 /* Apply the given relocation to the (simplified) ELF. Return -error
62351 or 0. */
62352 int apply_relocate(Elf_Shdr *sechdrs,
fe2de317
MT
62353diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
62354index ddaae98..3c70938 100644
62355--- a/include/linux/moduleparam.h
62356+++ b/include/linux/moduleparam.h
62357@@ -255,7 +255,7 @@ static inline void __kernel_param_unlock(void)
6892158b
MT
62358 * @len is usually just sizeof(string).
62359 */
62360 #define module_param_string(name, string, len, perm) \
62361- static const struct kparam_string __param_string_##name \
62362+ static const struct kparam_string __param_string_##name __used \
62363 = { len, string }; \
62364 __module_param_call(MODULE_PARAM_PREFIX, name, \
62365 &param_ops_string, \
fe2de317 62366@@ -370,7 +370,7 @@ extern int param_get_invbool(char *buffer, const struct kernel_param *kp);
6892158b
MT
62367 * module_param_named() for why this might be necessary.
62368 */
62369 #define module_param_array_named(name, array, type, nump, perm) \
62370- static const struct kparam_array __param_arr_##name \
62371+ static const struct kparam_array __param_arr_##name __used \
15a11c5b
MT
62372 = { .max = ARRAY_SIZE(array), .num = nump, \
62373 .ops = &param_ops_##type, \
62374 .elemsize = sizeof(array[0]), .elem = array }; \
fe2de317
MT
62375diff --git a/include/linux/namei.h b/include/linux/namei.h
62376index ffc0213..2c1f2cb 100644
62377--- a/include/linux/namei.h
62378+++ b/include/linux/namei.h
66a7e928 62379@@ -24,7 +24,7 @@ struct nameidata {
16454cff 62380 unsigned seq;
58c5fc13
MT
62381 int last_type;
62382 unsigned depth;
62383- char *saved_names[MAX_NESTED_LINKS + 1];
62384+ const char *saved_names[MAX_NESTED_LINKS + 1];
62385
62386 /* Intent data */
62387 union {
6e9df6a3 62388@@ -94,12 +94,12 @@ extern int follow_up(struct path *);
58c5fc13
MT
62389 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
62390 extern void unlock_rename(struct dentry *, struct dentry *);
62391
62392-static inline void nd_set_link(struct nameidata *nd, char *path)
62393+static inline void nd_set_link(struct nameidata *nd, const char *path)
62394 {
62395 nd->saved_names[nd->depth] = path;
62396 }
62397
62398-static inline char *nd_get_link(struct nameidata *nd)
ae4e228f 62399+static inline const char *nd_get_link(const struct nameidata *nd)
58c5fc13
MT
62400 {
62401 return nd->saved_names[nd->depth];
62402 }
fe2de317
MT
62403diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
62404index ddee79b..67af106 100644
62405--- a/include/linux/netdevice.h
62406+++ b/include/linux/netdevice.h
6e9df6a3 62407@@ -944,6 +944,7 @@ struct net_device_ops {
15a11c5b
MT
62408 int (*ndo_set_features)(struct net_device *dev,
62409 u32 features);
62410 };
62411+typedef struct net_device_ops __no_const net_device_ops_no_const;
62412
62413 /*
62414 * The DEVICE structure.
fe2de317
MT
62415diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
62416new file mode 100644
62417index 0000000..33f4af8
62418--- /dev/null
62419+++ b/include/linux/netfilter/xt_gradm.h
6892158b
MT
62420@@ -0,0 +1,9 @@
62421+#ifndef _LINUX_NETFILTER_XT_GRADM_H
62422+#define _LINUX_NETFILTER_XT_GRADM_H 1
62423+
62424+struct xt_gradm_mtinfo {
62425+ __u16 flags;
62426+ __u16 invflags;
62427+};
62428+
62429+#endif
fe2de317
MT
62430diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
62431index c65a18a..0c05f3a 100644
62432--- a/include/linux/of_pdt.h
62433+++ b/include/linux/of_pdt.h
15a11c5b
MT
62434@@ -32,7 +32,7 @@ struct of_pdt_ops {
62435
62436 /* return 0 on success; fill in 'len' with number of bytes in path */
62437 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
62438-};
62439+} __no_const;
62440
62441 extern void *prom_early_alloc(unsigned long size);
62442
fe2de317
MT
62443diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
62444index 49c8727..34d2ae1 100644
62445--- a/include/linux/oprofile.h
62446+++ b/include/linux/oprofile.h
62447@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
ae4e228f
MT
62448 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
62449 char const * name, ulong * val);
58c5fc13 62450
ae4e228f
MT
62451-/** Create a file for read-only access to an atomic_t. */
62452+/** Create a file for read-only access to an atomic_unchecked_t. */
58c5fc13
MT
62453 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
62454- char const * name, atomic_t * val);
62455+ char const * name, atomic_unchecked_t * val);
62456
62457 /** create a directory */
62458 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
fe2de317
MT
62459diff --git a/include/linux/padata.h b/include/linux/padata.h
62460index 4633b2f..988bc08 100644
62461--- a/include/linux/padata.h
62462+++ b/include/linux/padata.h
8308f9c9
MT
62463@@ -129,7 +129,7 @@ struct parallel_data {
62464 struct padata_instance *pinst;
62465 struct padata_parallel_queue __percpu *pqueue;
62466 struct padata_serial_queue __percpu *squeue;
62467- atomic_t seq_nr;
62468+ atomic_unchecked_t seq_nr;
62469 atomic_t reorder_objects;
62470 atomic_t refcnt;
62471 unsigned int max_seq_nr;
fe2de317
MT
62472diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
62473index c816075..cd28c4d 100644
62474--- a/include/linux/perf_event.h
62475+++ b/include/linux/perf_event.h
6e9df6a3 62476@@ -745,8 +745,8 @@ struct perf_event {
8308f9c9
MT
62477
62478 enum perf_event_active_state state;
62479 unsigned int attach_state;
62480- local64_t count;
62481- atomic64_t child_count;
62482+ local64_t count; /* PaX: fix it one day */
62483+ atomic64_unchecked_t child_count;
62484
62485 /*
62486 * These are the total time in nanoseconds that the event
6e9df6a3 62487@@ -797,8 +797,8 @@ struct perf_event {
8308f9c9
MT
62488 * These accumulate total time (in nanoseconds) that children
62489 * events have been enabled and running, respectively.
62490 */
62491- atomic64_t child_total_time_enabled;
62492- atomic64_t child_total_time_running;
62493+ atomic64_unchecked_t child_total_time_enabled;
62494+ atomic64_unchecked_t child_total_time_running;
62495
62496 /*
62497 * Protect attach/detach and child_list:
fe2de317
MT
62498diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
62499index 77257c9..51d473a 100644
62500--- a/include/linux/pipe_fs_i.h
62501+++ b/include/linux/pipe_fs_i.h
16454cff 62502@@ -46,9 +46,9 @@ struct pipe_buffer {
57199397 62503 struct pipe_inode_info {
ae4e228f 62504 wait_queue_head_t wait;
57199397 62505 unsigned int nrbufs, curbuf, buffers;
ae4e228f
MT
62506- unsigned int readers;
62507- unsigned int writers;
62508- unsigned int waiting_writers;
62509+ atomic_t readers;
62510+ atomic_t writers;
62511+ atomic_t waiting_writers;
62512 unsigned int r_counter;
62513 unsigned int w_counter;
57199397 62514 struct page *tmp_page;
fe2de317
MT
62515diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
62516index daac05d..c6802ce 100644
62517--- a/include/linux/pm_runtime.h
62518+++ b/include/linux/pm_runtime.h
62519@@ -99,7 +99,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
bc901d79
MT
62520
62521 static inline void pm_runtime_mark_last_busy(struct device *dev)
62522 {
62523- ACCESS_ONCE(dev->power.last_busy) = jiffies;
62524+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
62525 }
62526
62527 #else /* !CONFIG_PM_RUNTIME */
fe2de317
MT
62528diff --git a/include/linux/poison.h b/include/linux/poison.h
62529index 79159de..f1233a9 100644
62530--- a/include/linux/poison.h
62531+++ b/include/linux/poison.h
ae4e228f 62532@@ -19,8 +19,8 @@
58c5fc13
MT
62533 * under normal circumstances, used to verify that nobody uses
62534 * non-initialized list entries.
62535 */
ae4e228f
MT
62536-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
62537-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
62538+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
62539+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
58c5fc13
MT
62540
62541 /********** include/linux/timer.h **********/
62542 /*
fe2de317
MT
62543diff --git a/include/linux/preempt.h b/include/linux/preempt.h
62544index 58969b2..ead129b 100644
62545--- a/include/linux/preempt.h
62546+++ b/include/linux/preempt.h
6e9df6a3 62547@@ -123,7 +123,7 @@ struct preempt_ops {
15a11c5b
MT
62548 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
62549 void (*sched_out)(struct preempt_notifier *notifier,
62550 struct task_struct *next);
62551-};
62552+} __no_const;
66a7e928 62553
15a11c5b
MT
62554 /**
62555 * preempt_notifier - key for installing preemption notifiers
fe2de317
MT
62556diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
62557index 643b96c..ef55a9c 100644
62558--- a/include/linux/proc_fs.h
62559+++ b/include/linux/proc_fs.h
62560@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
58c5fc13
MT
62561 return proc_create_data(name, mode, parent, proc_fops, NULL);
62562 }
62563
62564+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
62565+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
62566+{
62567+#ifdef CONFIG_GRKERNSEC_PROC_USER
62568+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
62569+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62570+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
62571+#else
62572+ return proc_create_data(name, mode, parent, proc_fops, NULL);
62573+#endif
62574+}
62575+
62576+
62577 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
62578 mode_t mode, struct proc_dir_entry *base,
62579 read_proc_t *read_proc, void * data)
15a11c5b
MT
62580@@ -258,7 +271,7 @@ union proc_op {
62581 int (*proc_show)(struct seq_file *m,
62582 struct pid_namespace *ns, struct pid *pid,
62583 struct task_struct *task);
62584-};
62585+} __no_const;
62586
62587 struct ctl_table_header;
62588 struct ctl_table;
fe2de317
MT
62589diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
62590index 800f113..af90cc8 100644
62591--- a/include/linux/ptrace.h
62592+++ b/include/linux/ptrace.h
62593@@ -129,10 +129,10 @@ extern void __ptrace_unlink(struct task_struct *child);
16454cff
MT
62594 extern void exit_ptrace(struct task_struct *tracer);
62595 #define PTRACE_MODE_READ 1
62596 #define PTRACE_MODE_ATTACH 2
62597-/* Returns 0 on success, -errno on denial. */
62598-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
62599 /* Returns true on success, false on denial. */
62600 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
62601+/* Returns true on success, false on denial. */
62602+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
62603
62604 static inline int ptrace_reparented(struct task_struct *child)
62605 {
fe2de317
MT
62606diff --git a/include/linux/random.h b/include/linux/random.h
62607index d13059f..2eaafaa 100644
62608--- a/include/linux/random.h
62609+++ b/include/linux/random.h
15a11c5b 62610@@ -69,12 +69,17 @@ void srandom32(u32 seed);
57199397
MT
62611
62612 u32 prandom32(struct rnd_state *);
58c5fc13
MT
62613
62614+static inline unsigned long pax_get_random_long(void)
62615+{
62616+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
62617+}
62618+
57199397
MT
62619 /*
62620 * Handle minimum values for seeds
62621 */
62622 static inline u32 __seed(u32 x, u32 m)
62623 {
62624- return (x < m) ? x + m : x;
62625+ return (x <= m) ? x + m + 1 : x;
62626 }
58c5fc13 62627
57199397 62628 /**
fe2de317
MT
62629diff --git a/include/linux/reboot.h b/include/linux/reboot.h
62630index e0879a7..a12f962 100644
62631--- a/include/linux/reboot.h
62632+++ b/include/linux/reboot.h
62633@@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
66a7e928
MT
62634 * Architecture-specific implementations of sys_reboot commands.
62635 */
62636
62637-extern void machine_restart(char *cmd);
62638-extern void machine_halt(void);
62639-extern void machine_power_off(void);
62640+extern void machine_restart(char *cmd) __noreturn;
62641+extern void machine_halt(void) __noreturn;
62642+extern void machine_power_off(void) __noreturn;
62643
62644 extern void machine_shutdown(void);
62645 struct pt_regs;
fe2de317 62646@@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
66a7e928
MT
62647 */
62648
62649 extern void kernel_restart_prepare(char *cmd);
62650-extern void kernel_restart(char *cmd);
62651-extern void kernel_halt(void);
62652-extern void kernel_power_off(void);
62653+extern void kernel_restart(char *cmd) __noreturn;
62654+extern void kernel_halt(void) __noreturn;
62655+extern void kernel_power_off(void) __noreturn;
62656
62657 extern int C_A_D; /* for sysctl */
62658 void ctrl_alt_del(void);
6e9df6a3 62659@@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
66a7e928
MT
62660 * Emergency restart, callable from an interrupt handler.
62661 */
62662
62663-extern void emergency_restart(void);
62664+extern void emergency_restart(void) __noreturn;
62665 #include <asm/emergency-restart.h>
62666
62667 #endif
fe2de317
MT
62668diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
62669index 96d465f..b084e05 100644
62670--- a/include/linux/reiserfs_fs.h
62671+++ b/include/linux/reiserfs_fs.h
62672@@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
58c5fc13
MT
62673 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
62674
62675 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
62676-#define get_generation(s) atomic_read (&fs_generation(s))
62677+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
62678 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
62679 #define __fs_changed(gen,s) (gen != get_generation (s))
ae4e228f 62680 #define fs_changed(gen,s) \
fe2de317
MT
62681diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
62682index 52c83b6..18ed7eb 100644
62683--- a/include/linux/reiserfs_fs_sb.h
62684+++ b/include/linux/reiserfs_fs_sb.h
ae4e228f 62685@@ -386,7 +386,7 @@ struct reiserfs_sb_info {
58c5fc13
MT
62686 /* Comment? -Hans */
62687 wait_queue_head_t s_wait;
62688 /* To be obsoleted soon by per buffer seals.. -Hans */
62689- atomic_t s_generation_counter; // increased by one every time the
62690+ atomic_unchecked_t s_generation_counter; // increased by one every time the
62691 // tree gets re-balanced
62692 unsigned long s_properties; /* File system properties. Currently holds
62693 on-disk FS format */
fe2de317
MT
62694diff --git a/include/linux/relay.h b/include/linux/relay.h
62695index 14a86bc..17d0700 100644
62696--- a/include/linux/relay.h
62697+++ b/include/linux/relay.h
15a11c5b
MT
62698@@ -159,7 +159,7 @@ struct rchan_callbacks
62699 * The callback should return 0 if successful, negative if not.
62700 */
62701 int (*remove_buf_file)(struct dentry *dentry);
62702-};
62703+} __no_const;
62704
62705 /*
62706 * CONFIG_RELAY kernel API, kernel/relay.c
fe2de317
MT
62707diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
62708index c6c6084..5bf1212 100644
62709--- a/include/linux/rfkill.h
62710+++ b/include/linux/rfkill.h
15a11c5b
MT
62711@@ -147,6 +147,7 @@ struct rfkill_ops {
62712 void (*query)(struct rfkill *rfkill, void *data);
62713 int (*set_block)(void *data, bool blocked);
62714 };
62715+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
62716
62717 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
62718 /**
fe2de317
MT
62719diff --git a/include/linux/rmap.h b/include/linux/rmap.h
62720index 2148b12..519b820 100644
62721--- a/include/linux/rmap.h
62722+++ b/include/linux/rmap.h
62723@@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
57199397
MT
62724 void anon_vma_init(void); /* create anon_vma_cachep */
62725 int anon_vma_prepare(struct vm_area_struct *);
62726 void unlink_anon_vmas(struct vm_area_struct *);
62727-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
62728-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
62729+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
62730+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
62731 void __anon_vma_link(struct vm_area_struct *);
57199397 62732
66a7e928 62733 static inline void anon_vma_merge(struct vm_area_struct *vma,
fe2de317
MT
62734diff --git a/include/linux/sched.h b/include/linux/sched.h
62735index 41d0237..5a64056 100644
62736--- a/include/linux/sched.h
62737+++ b/include/linux/sched.h
66a7e928 62738@@ -100,6 +100,7 @@ struct bio_list;
58c5fc13 62739 struct fs_struct;
ae4e228f 62740 struct perf_event_context;
66a7e928 62741 struct blk_plug;
58c5fc13
MT
62742+struct linux_binprm;
62743
62744 /*
62745 * List of flags we want to share for kernel threads,
15a11c5b 62746@@ -380,10 +381,13 @@ struct user_namespace;
57199397
MT
62747 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
62748
62749 extern int sysctl_max_map_count;
62750+extern unsigned long sysctl_heap_stack_gap;
62751
62752 #include <linux/aio.h>
62753
62754 #ifdef CONFIG_MMU
16454cff
MT
62755+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
62756+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
57199397
MT
62757 extern void arch_pick_mmap_layout(struct mm_struct *mm);
62758 extern unsigned long
62759 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
66a7e928 62760@@ -629,6 +633,17 @@ struct signal_struct {
16454cff
MT
62761 #ifdef CONFIG_TASKSTATS
62762 struct taskstats *stats;
58c5fc13 62763 #endif
16454cff 62764+
58c5fc13
MT
62765+#ifdef CONFIG_GRKERNSEC
62766+ u32 curr_ip;
bc901d79 62767+ u32 saved_ip;
58c5fc13
MT
62768+ u32 gr_saddr;
62769+ u32 gr_daddr;
62770+ u16 gr_sport;
62771+ u16 gr_dport;
62772+ u8 used_accept:1;
62773+#endif
ae4e228f 62774+
16454cff
MT
62775 #ifdef CONFIG_AUDIT
62776 unsigned audit_tty;
62777 struct tty_audit_buf *tty_audit_buf;
15a11c5b 62778@@ -710,6 +725,11 @@ struct user_struct {
71d190be
MT
62779 struct key *session_keyring; /* UID's default session keyring */
62780 #endif
62781
62782+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
62783+ unsigned int banned;
62784+ unsigned long ban_expires;
62785+#endif
62786+
62787 /* Hash table maintenance information */
62788 struct hlist_node uidhash_node;
62789 uid_t uid;
15a11c5b 62790@@ -1340,8 +1360,8 @@ struct task_struct {
58c5fc13
MT
62791 struct list_head thread_group;
62792
62793 struct completion *vfork_done; /* for vfork() */
62794- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
62795- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
62796+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
62797+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
62798
62799 cputime_t utime, stime, utimescaled, stimescaled;
62800 cputime_t gtime;
15a11c5b 62801@@ -1357,13 +1377,6 @@ struct task_struct {
58c5fc13
MT
62802 struct task_cputime cputime_expires;
62803 struct list_head cpu_timers[3];
62804
62805-/* process credentials */
bc901d79 62806- const struct cred __rcu *real_cred; /* objective and real subjective task
58c5fc13 62807- * credentials (COW) */
bc901d79 62808- const struct cred __rcu *cred; /* effective (overridable) subjective task
58c5fc13 62809- * credentials (COW) */
ae4e228f 62810- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
58c5fc13
MT
62811-
62812 char comm[TASK_COMM_LEN]; /* executable name excluding path
62813 - access with [gs]et_task_comm (which lock
62814 it with task_lock())
15a11c5b 62815@@ -1380,8 +1393,16 @@ struct task_struct {
71d190be
MT
62816 #endif
62817 /* CPU-specific state of this task */
bc901d79 62818 struct thread_struct thread;
71d190be
MT
62819+/* thread_info moved to task_struct */
62820+#ifdef CONFIG_X86
62821+ struct thread_info tinfo;
62822+#endif
bc901d79
MT
62823 /* filesystem information */
62824 struct fs_struct *fs;
58c5fc13 62825+
bc901d79 62826+ const struct cred __rcu *cred; /* effective (overridable) subjective task
58c5fc13 62827+ * credentials (COW) */
58c5fc13 62828+
bc901d79
MT
62829 /* open file information */
62830 struct files_struct *files;
62831 /* namespaces */
15a11c5b 62832@@ -1428,6 +1449,11 @@ struct task_struct {
bc901d79
MT
62833 struct rt_mutex_waiter *pi_blocked_on;
62834 #endif
ae4e228f 62835
bc901d79
MT
62836+/* process credentials */
62837+ const struct cred __rcu *real_cred; /* objective and real subjective task
ae4e228f 62838+ * credentials (COW) */
bc901d79 62839+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
ae4e228f 62840+
bc901d79
MT
62841 #ifdef CONFIG_DEBUG_MUTEXES
62842 /* mutex deadlock detection */
62843 struct mutex_waiter *blocked_on;
6e9df6a3 62844@@ -1537,6 +1563,21 @@ struct task_struct {
ae4e228f
MT
62845 unsigned long default_timer_slack_ns;
62846
62847 struct list_head *scm_work_list;
58c5fc13
MT
62848+
62849+#ifdef CONFIG_GRKERNSEC
62850+ /* grsecurity */
df50ba0c 62851+ struct dentry *gr_chroot_dentry;
58c5fc13
MT
62852+ struct acl_subject_label *acl;
62853+ struct acl_role_label *role;
62854+ struct file *exec_file;
62855+ u16 acl_role_id;
16454cff 62856+ /* is this the task that authenticated to the special role */
58c5fc13
MT
62857+ u8 acl_sp_role;
62858+ u8 is_writable;
62859+ u8 brute;
df50ba0c 62860+ u8 gr_is_chrooted;
58c5fc13
MT
62861+#endif
62862+
ae4e228f 62863 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
df50ba0c 62864 /* Index of current stored address in ret_stack */
ae4e228f 62865 int curr_ret_stack;
6e9df6a3 62866@@ -1571,6 +1612,57 @@ struct task_struct {
ae4e228f 62867 #endif
58c5fc13
MT
62868 };
62869
62870+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
62871+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
62872+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
62873+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
62874+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
62875+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
62876+
62877+#ifdef CONFIG_PAX_SOFTMODE
15a11c5b 62878+extern int pax_softmode;
58c5fc13
MT
62879+#endif
62880+
62881+extern int pax_check_flags(unsigned long *);
62882+
62883+/* if tsk != current then task_lock must be held on it */
62884+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
62885+static inline unsigned long pax_get_flags(struct task_struct *tsk)
62886+{
62887+ if (likely(tsk->mm))
62888+ return tsk->mm->pax_flags;
62889+ else
62890+ return 0UL;
62891+}
62892+
62893+/* if tsk != current then task_lock must be held on it */
62894+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
62895+{
62896+ if (likely(tsk->mm)) {
62897+ tsk->mm->pax_flags = flags;
62898+ return 0;
62899+ }
62900+ return -EINVAL;
62901+}
62902+#endif
62903+
62904+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
62905+extern void pax_set_initial_flags(struct linux_binprm *bprm);
62906+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
62907+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
62908+#endif
62909+
15a11c5b 62910+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
6e9df6a3 62911+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
15a11c5b
MT
62912+extern void pax_report_refcount_overflow(struct pt_regs *regs);
62913+extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
66a7e928
MT
62914+
62915+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15a11c5b
MT
62916+extern void pax_track_stack(void);
62917+#else
62918+static inline void pax_track_stack(void) {}
66a7e928 62919+#endif
58c5fc13
MT
62920+
62921 /* Future-safe accessor for struct task_struct's cpus_allowed. */
ae4e228f 62922 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
58c5fc13 62923
6e9df6a3 62924@@ -2074,7 +2166,9 @@ void yield(void);
71d190be
MT
62925 extern struct exec_domain default_exec_domain;
62926
62927 union thread_union {
62928+#ifndef CONFIG_X86
62929 struct thread_info thread_info;
62930+#endif
62931 unsigned long stack[THREAD_SIZE/sizeof(long)];
62932 };
62933
6e9df6a3 62934@@ -2107,6 +2201,7 @@ extern struct pid_namespace init_pid_ns;
15a11c5b
MT
62935 */
62936
62937 extern struct task_struct *find_task_by_vpid(pid_t nr);
62938+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
62939 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
62940 struct pid_namespace *ns);
62941
fe2de317 62942@@ -2243,7 +2338,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
58c5fc13
MT
62943 extern void exit_itimers(struct signal_struct *);
62944 extern void flush_itimer_signals(void);
62945
62946-extern NORET_TYPE void do_group_exit(int);
62947+extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
62948
62949 extern void daemonize(const char *, ...);
62950 extern int allow_signal(int);
fe2de317 62951@@ -2408,13 +2503,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
58c5fc13
MT
62952
62953 #endif
62954
62955-static inline int object_is_on_stack(void *obj)
ae4e228f 62956+static inline int object_starts_on_stack(void *obj)
58c5fc13 62957 {
ae4e228f
MT
62958- void *stack = task_stack_page(current);
62959+ const void *stack = task_stack_page(current);
58c5fc13 62960
ae4e228f
MT
62961 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
62962 }
62963
57199397
MT
62964+#ifdef CONFIG_PAX_USERCOPY
62965+extern int object_is_on_stack(const void *obj, unsigned long len);
62966+#endif
ae4e228f
MT
62967+
62968 extern void thread_info_cache_init(void);
62969
62970 #ifdef CONFIG_DEBUG_STACK_USAGE
fe2de317
MT
62971diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
62972index 899fbb4..1cb4138 100644
62973--- a/include/linux/screen_info.h
62974+++ b/include/linux/screen_info.h
ae4e228f 62975@@ -43,7 +43,8 @@ struct screen_info {
58c5fc13
MT
62976 __u16 pages; /* 0x32 */
62977 __u16 vesa_attributes; /* 0x34 */
62978 __u32 capabilities; /* 0x36 */
62979- __u8 _reserved[6]; /* 0x3a */
62980+ __u16 vesapm_size; /* 0x3a */
62981+ __u8 _reserved[4]; /* 0x3c */
62982 } __attribute__((packed));
62983
62984 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
fe2de317
MT
62985diff --git a/include/linux/security.h b/include/linux/security.h
62986index ebd2a53..2d949ae 100644
62987--- a/include/linux/security.h
62988+++ b/include/linux/security.h
66a7e928 62989@@ -36,6 +36,7 @@
58c5fc13
MT
62990 #include <linux/key.h>
62991 #include <linux/xfrm.h>
df50ba0c 62992 #include <linux/slab.h>
58c5fc13
MT
62993+#include <linux/grsecurity.h>
62994 #include <net/flow.h>
62995
62996 /* Maximum number of letters for an LSM name string */
fe2de317
MT
62997diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
62998index be720cd..a0e1b94 100644
62999--- a/include/linux/seq_file.h
63000+++ b/include/linux/seq_file.h
6e9df6a3 63001@@ -33,6 +33,7 @@ struct seq_operations {
15a11c5b
MT
63002 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
63003 int (*show) (struct seq_file *m, void *v);
63004 };
63005+typedef struct seq_operations __no_const seq_operations_no_const;
63006
63007 #define SEQ_SKIP 1
63008
fe2de317
MT
63009diff --git a/include/linux/shm.h b/include/linux/shm.h
63010index 92808b8..c28cac4 100644
63011--- a/include/linux/shm.h
63012+++ b/include/linux/shm.h
63013@@ -98,6 +98,10 @@ struct shmid_kernel /* private to the kernel */
15a11c5b 63014
6e9df6a3
MT
63015 /* The task created the shm object. NULL if the task is dead. */
63016 struct task_struct *shm_creator;
58c5fc13
MT
63017+#ifdef CONFIG_GRKERNSEC
63018+ time_t shm_createtime;
63019+ pid_t shm_lapid;
63020+#endif
63021 };
63022
63023 /* shm_mode upper byte flags */
fe2de317
MT
63024diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
63025index 0f96646..cfb757a 100644
63026--- a/include/linux/skbuff.h
63027+++ b/include/linux/skbuff.h
63028@@ -610,7 +610,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
bc901d79
MT
63029 */
63030 static inline int skb_queue_empty(const struct sk_buff_head *list)
63031 {
63032- return list->next == (struct sk_buff *)list;
63033+ return list->next == (const struct sk_buff *)list;
63034 }
63035
63036 /**
fe2de317 63037@@ -623,7 +623,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
bc901d79
MT
63038 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
63039 const struct sk_buff *skb)
63040 {
63041- return skb->next == (struct sk_buff *)list;
63042+ return skb->next == (const struct sk_buff *)list;
63043 }
63044
63045 /**
fe2de317 63046@@ -636,7 +636,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
bc901d79
MT
63047 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
63048 const struct sk_buff *skb)
63049 {
63050- return skb->prev == (struct sk_buff *)list;
63051+ return skb->prev == (const struct sk_buff *)list;
63052 }
63053
63054 /**
fe2de317 63055@@ -1458,7 +1458,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
8308f9c9
MT
63056 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
63057 */
63058 #ifndef NET_SKB_PAD
63059-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
15a11c5b 63060+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
8308f9c9
MT
63061 #endif
63062
63063 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
fe2de317
MT
63064diff --git a/include/linux/slab.h b/include/linux/slab.h
63065index 573c809..e84c132 100644
63066--- a/include/linux/slab.h
63067+++ b/include/linux/slab.h
71d190be 63068@@ -11,12 +11,20 @@
ae4e228f
MT
63069
63070 #include <linux/gfp.h>
63071 #include <linux/types.h>
63072+#include <linux/err.h>
63073
63074 /*
63075 * Flags to pass to kmem_cache_create().
71d190be
MT
63076 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
63077 */
63078 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
63079+
63080+#ifdef CONFIG_PAX_USERCOPY
63081+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
63082+#else
63083+#define SLAB_USERCOPY 0x00000000UL
63084+#endif
63085+
63086 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
63087 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
63088 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
63089@@ -87,10 +95,13 @@
58c5fc13
MT
63090 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
63091 * Both make kfree a no-op.
63092 */
63093-#define ZERO_SIZE_PTR ((void *)16)
ae4e228f
MT
63094+#define ZERO_SIZE_PTR \
63095+({ \
63096+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
63097+ (void *)(-MAX_ERRNO-1L); \
63098+})
58c5fc13
MT
63099
63100-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
63101- (unsigned long)ZERO_SIZE_PTR)
df50ba0c 63102+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
58c5fc13
MT
63103
63104 /*
63105 * struct kmem_cache related prototypes
fe2de317 63106@@ -161,6 +172,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
58c5fc13
MT
63107 void kfree(const void *);
63108 void kzfree(const void *);
63109 size_t ksize(const void *);
63110+void check_object_size(const void *ptr, unsigned long n, bool to);
63111
63112 /*
63113 * Allocator specific definitions. These are mainly used to establish optimized
fe2de317 63114@@ -353,4 +365,59 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
58c5fc13
MT
63115
63116 void __init kmem_cache_init_late(void);
63117
66a7e928
MT
63118+#define kmalloc(x, y) \
63119+({ \
63120+ void *___retval; \
63121+ intoverflow_t ___x = (intoverflow_t)x; \
63122+ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
63123+ ___retval = NULL; \
63124+ else \
63125+ ___retval = kmalloc((size_t)___x, (y)); \
63126+ ___retval; \
58c5fc13
MT
63127+})
63128+
63129+#define kmalloc_node(x, y, z) \
63130+({ \
63131+ void *___retval; \
63132+ intoverflow_t ___x = (intoverflow_t)x; \
63133+ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
63134+ ___retval = NULL; \
63135+ else \
63136+ ___retval = kmalloc_node((size_t)___x, (y), (z));\
63137+ ___retval; \
63138+})
63139+
66a7e928
MT
63140+#define kzalloc(x, y) \
63141+({ \
63142+ void *___retval; \
63143+ intoverflow_t ___x = (intoverflow_t)x; \
63144+ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
63145+ ___retval = NULL; \
63146+ else \
63147+ ___retval = kzalloc((size_t)___x, (y)); \
63148+ ___retval; \
63149+})
63150+
63151+#define __krealloc(x, y, z) \
63152+({ \
63153+ void *___retval; \
63154+ intoverflow_t ___y = (intoverflow_t)y; \
63155+ if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
63156+ ___retval = NULL; \
63157+ else \
63158+ ___retval = __krealloc((x), (size_t)___y, (z)); \
63159+ ___retval; \
63160+})
63161+
63162+#define krealloc(x, y, z) \
63163+({ \
63164+ void *___retval; \
63165+ intoverflow_t ___y = (intoverflow_t)y; \
63166+ if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
63167+ ___retval = NULL; \
63168+ else \
63169+ ___retval = krealloc((x), (size_t)___y, (z)); \
63170+ ___retval; \
58c5fc13
MT
63171+})
63172+
63173 #endif /* _LINUX_SLAB_H */
fe2de317
MT
63174diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
63175index d00e0ba..1b3bf7b 100644
63176--- a/include/linux/slab_def.h
63177+++ b/include/linux/slab_def.h
63178@@ -68,10 +68,10 @@ struct kmem_cache {
63179 unsigned long node_allocs;
63180 unsigned long node_frees;
63181 unsigned long node_overflow;
63182- atomic_t allochit;
63183- atomic_t allocmiss;
63184- atomic_t freehit;
63185- atomic_t freemiss;
63186+ atomic_unchecked_t allochit;
63187+ atomic_unchecked_t allocmiss;
63188+ atomic_unchecked_t freehit;
63189+ atomic_unchecked_t freemiss;
63190
63191 /*
63192 * If debugging is enabled, then the allocator can add additional
63193diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
63194index f58d641..c56bf9c 100644
63195--- a/include/linux/slub_def.h
63196+++ b/include/linux/slub_def.h
6e9df6a3 63197@@ -85,7 +85,7 @@ struct kmem_cache {
58c5fc13
MT
63198 struct kmem_cache_order_objects max;
63199 struct kmem_cache_order_objects min;
63200 gfp_t allocflags; /* gfp flags to use on each alloc */
63201- int refcount; /* Refcount for slab cache destroy */
63202+ atomic_t refcount; /* Refcount for slab cache destroy */
63203 void (*ctor)(void *);
63204 int inuse; /* Offset to metadata */
63205 int align; /* Alignment */
fe2de317 63206@@ -211,7 +211,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
15a11c5b
MT
63207 }
63208
63209 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
63210-void *__kmalloc(size_t size, gfp_t flags);
63211+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
63212
63213 static __always_inline void *
63214 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
fe2de317
MT
63215diff --git a/include/linux/sonet.h b/include/linux/sonet.h
63216index de8832d..0147b46 100644
63217--- a/include/linux/sonet.h
63218+++ b/include/linux/sonet.h
58c5fc13 63219@@ -61,7 +61,7 @@ struct sonet_stats {
6e9df6a3 63220 #include <linux/atomic.h>
58c5fc13
MT
63221
63222 struct k_sonet_stats {
63223-#define __HANDLE_ITEM(i) atomic_t i
63224+#define __HANDLE_ITEM(i) atomic_unchecked_t i
63225 __SONET_ITEMS
63226 #undef __HANDLE_ITEM
63227 };
fe2de317
MT
63228diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
63229index db7bcaf..1aca77e 100644
63230--- a/include/linux/sunrpc/clnt.h
63231+++ b/include/linux/sunrpc/clnt.h
63232@@ -169,9 +169,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
bc901d79
MT
63233 {
63234 switch (sap->sa_family) {
63235 case AF_INET:
63236- return ntohs(((struct sockaddr_in *)sap)->sin_port);
63237+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
63238 case AF_INET6:
63239- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
63240+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
63241 }
63242 return 0;
63243 }
fe2de317 63244@@ -204,7 +204,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
bc901d79
MT
63245 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
63246 const struct sockaddr *src)
63247 {
63248- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
63249+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
63250 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
63251
63252 dsin->sin_family = ssin->sin_family;
fe2de317 63253@@ -301,7 +301,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
bc901d79
MT
63254 if (sa->sa_family != AF_INET6)
63255 return 0;
63256
63257- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
63258+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
63259 }
63260
63261 #endif /* __KERNEL__ */
fe2de317
MT
63262diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
63263index e775689..9e206d9 100644
63264--- a/include/linux/sunrpc/sched.h
63265+++ b/include/linux/sunrpc/sched.h
6e9df6a3
MT
63266@@ -105,6 +105,7 @@ struct rpc_call_ops {
63267 void (*rpc_call_done)(struct rpc_task *, void *);
63268 void (*rpc_release)(void *);
63269 };
63270+typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
63271
63272 struct rpc_task_setup {
63273 struct rpc_task *task;
fe2de317
MT
63274diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
63275index c14fe86..393245e 100644
63276--- a/include/linux/sunrpc/svc_rdma.h
63277+++ b/include/linux/sunrpc/svc_rdma.h
8308f9c9
MT
63278@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
63279 extern unsigned int svcrdma_max_requests;
63280 extern unsigned int svcrdma_max_req_size;
63281
63282-extern atomic_t rdma_stat_recv;
63283-extern atomic_t rdma_stat_read;
63284-extern atomic_t rdma_stat_write;
63285-extern atomic_t rdma_stat_sq_starve;
63286-extern atomic_t rdma_stat_rq_starve;
63287-extern atomic_t rdma_stat_rq_poll;
63288-extern atomic_t rdma_stat_rq_prod;
63289-extern atomic_t rdma_stat_sq_poll;
63290-extern atomic_t rdma_stat_sq_prod;
63291+extern atomic_unchecked_t rdma_stat_recv;
63292+extern atomic_unchecked_t rdma_stat_read;
63293+extern atomic_unchecked_t rdma_stat_write;
63294+extern atomic_unchecked_t rdma_stat_sq_starve;
63295+extern atomic_unchecked_t rdma_stat_rq_starve;
63296+extern atomic_unchecked_t rdma_stat_rq_poll;
63297+extern atomic_unchecked_t rdma_stat_rq_prod;
63298+extern atomic_unchecked_t rdma_stat_sq_poll;
63299+extern atomic_unchecked_t rdma_stat_sq_prod;
63300
63301 #define RPCRDMA_VERSION 1
63302
fe2de317
MT
63303diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
63304index 11684d9..0d245eb 100644
63305--- a/include/linux/sysctl.h
63306+++ b/include/linux/sysctl.h
ae4e228f 63307@@ -155,7 +155,11 @@ enum
58c5fc13
MT
63308 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
63309 };
63310
63311-
63312+#ifdef CONFIG_PAX_SOFTMODE
63313+enum {
63314+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
63315+};
63316+#endif
63317
63318 /* CTL_VM names: */
63319 enum
fe2de317 63320@@ -967,6 +971,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
bc901d79
MT
63321
63322 extern int proc_dostring(struct ctl_table *, int,
63323 void __user *, size_t *, loff_t *);
63324+extern int proc_dostring_modpriv(struct ctl_table *, int,
63325+ void __user *, size_t *, loff_t *);
63326 extern int proc_dointvec(struct ctl_table *, int,
63327 void __user *, size_t *, loff_t *);
63328 extern int proc_dointvec_minmax(struct ctl_table *, int,
fe2de317
MT
63329diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
63330index ff7dc08..893e1bd 100644
63331--- a/include/linux/tty_ldisc.h
63332+++ b/include/linux/tty_ldisc.h
16454cff 63333@@ -148,7 +148,7 @@ struct tty_ldisc_ops {
58c5fc13
MT
63334
63335 struct module *owner;
63336
63337- int refcount;
63338+ atomic_t refcount;
63339 };
63340
63341 struct tty_ldisc {
fe2de317
MT
63342diff --git a/include/linux/types.h b/include/linux/types.h
63343index 176da8c..e45e473 100644
63344--- a/include/linux/types.h
63345+++ b/include/linux/types.h
66a7e928 63346@@ -213,10 +213,26 @@ typedef struct {
57199397 63347 int counter;
58c5fc13
MT
63348 } atomic_t;
63349
63350+#ifdef CONFIG_PAX_REFCOUNT
63351+typedef struct {
57199397 63352+ int counter;
58c5fc13
MT
63353+} atomic_unchecked_t;
63354+#else
63355+typedef atomic_t atomic_unchecked_t;
63356+#endif
63357+
63358 #ifdef CONFIG_64BIT
63359 typedef struct {
57199397 63360 long counter;
58c5fc13
MT
63361 } atomic64_t;
63362+
63363+#ifdef CONFIG_PAX_REFCOUNT
63364+typedef struct {
57199397 63365+ long counter;
58c5fc13
MT
63366+} atomic64_unchecked_t;
63367+#else
63368+typedef atomic64_t atomic64_unchecked_t;
63369+#endif
63370 #endif
63371
6892158b 63372 struct list_head {
fe2de317
MT
63373diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
63374index 5ca0951..ab496a5 100644
63375--- a/include/linux/uaccess.h
63376+++ b/include/linux/uaccess.h
63377@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
58c5fc13
MT
63378 long ret; \
63379 mm_segment_t old_fs = get_fs(); \
63380 \
63381- set_fs(KERNEL_DS); \
63382 pagefault_disable(); \
6e9df6a3 63383- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
58c5fc13 63384- pagefault_enable(); \
6e9df6a3
MT
63385+ set_fs(KERNEL_DS); \
63386+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
58c5fc13
MT
63387 set_fs(old_fs); \
63388+ pagefault_enable(); \
63389 ret; \
63390 })
63391
fe2de317
MT
63392diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
63393index 99c1b4d..bb94261 100644
63394--- a/include/linux/unaligned/access_ok.h
63395+++ b/include/linux/unaligned/access_ok.h
bc901d79
MT
63396@@ -6,32 +6,32 @@
63397
63398 static inline u16 get_unaligned_le16(const void *p)
63399 {
63400- return le16_to_cpup((__le16 *)p);
63401+ return le16_to_cpup((const __le16 *)p);
63402 }
63403
63404 static inline u32 get_unaligned_le32(const void *p)
63405 {
63406- return le32_to_cpup((__le32 *)p);
63407+ return le32_to_cpup((const __le32 *)p);
63408 }
63409
63410 static inline u64 get_unaligned_le64(const void *p)
63411 {
63412- return le64_to_cpup((__le64 *)p);
63413+ return le64_to_cpup((const __le64 *)p);
63414 }
63415
63416 static inline u16 get_unaligned_be16(const void *p)
63417 {
63418- return be16_to_cpup((__be16 *)p);
63419+ return be16_to_cpup((const __be16 *)p);
63420 }
63421
63422 static inline u32 get_unaligned_be32(const void *p)
63423 {
63424- return be32_to_cpup((__be32 *)p);
63425+ return be32_to_cpup((const __be32 *)p);
63426 }
63427
63428 static inline u64 get_unaligned_be64(const void *p)
63429 {
63430- return be64_to_cpup((__be64 *)p);
63431+ return be64_to_cpup((const __be64 *)p);
63432 }
63433
63434 static inline void put_unaligned_le16(u16 val, void *p)
fe2de317
MT
63435diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
63436index cf97b5b..40ebc87 100644
63437--- a/include/linux/vermagic.h
63438+++ b/include/linux/vermagic.h
6e9df6a3
MT
63439@@ -26,9 +26,35 @@
63440 #define MODULE_ARCH_VERMAGIC ""
63441 #endif
63442
63443+#ifdef CONFIG_PAX_REFCOUNT
63444+#define MODULE_PAX_REFCOUNT "REFCOUNT "
63445+#else
63446+#define MODULE_PAX_REFCOUNT ""
63447+#endif
63448+
63449+#ifdef CONSTIFY_PLUGIN
63450+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
63451+#else
63452+#define MODULE_CONSTIFY_PLUGIN ""
63453+#endif
63454+
63455+#ifdef STACKLEAK_PLUGIN
63456+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
63457+#else
63458+#define MODULE_STACKLEAK_PLUGIN ""
63459+#endif
63460+
63461+#ifdef CONFIG_GRKERNSEC
63462+#define MODULE_GRSEC "GRSEC "
63463+#else
63464+#define MODULE_GRSEC ""
63465+#endif
63466+
63467 #define VERMAGIC_STRING \
63468 UTS_RELEASE " " \
63469 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
63470 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
63471- MODULE_ARCH_VERMAGIC
63472+ MODULE_ARCH_VERMAGIC \
63473+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
63474+ MODULE_GRSEC
63475
fe2de317
MT
63476diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
63477index 687fb11..b342358 100644
63478--- a/include/linux/vmalloc.h
63479+++ b/include/linux/vmalloc.h
63480@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
58c5fc13
MT
63481 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
63482 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
6e9df6a3 63483 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
58c5fc13 63484+
df50ba0c 63485+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
6e9df6a3 63486+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
58c5fc13
MT
63487+#endif
63488+
63489 /* bits [20..32] reserved for arch specific ioremap internals */
63490
63491 /*
fe2de317 63492@@ -156,4 +161,103 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
66a7e928 63493 # endif
bc901d79 63494 #endif
58c5fc13
MT
63495
63496+#define vmalloc(x) \
63497+({ \
63498+ void *___retval; \
63499+ intoverflow_t ___x = (intoverflow_t)x; \
63500+ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
63501+ ___retval = NULL; \
63502+ else \
63503+ ___retval = vmalloc((unsigned long)___x); \
63504+ ___retval; \
63505+})
63506+
bc901d79
MT
63507+#define vzalloc(x) \
63508+({ \
63509+ void *___retval; \
63510+ intoverflow_t ___x = (intoverflow_t)x; \
63511+ if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
63512+ ___retval = NULL; \
63513+ else \
63514+ ___retval = vzalloc((unsigned long)___x); \
63515+ ___retval; \
63516+})
63517+
58c5fc13
MT
63518+#define __vmalloc(x, y, z) \
63519+({ \
63520+ void *___retval; \
63521+ intoverflow_t ___x = (intoverflow_t)x; \
63522+ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
63523+ ___retval = NULL; \
63524+ else \
63525+ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
63526+ ___retval; \
63527+})
63528+
63529+#define vmalloc_user(x) \
63530+({ \
63531+ void *___retval; \
63532+ intoverflow_t ___x = (intoverflow_t)x; \
63533+ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
63534+ ___retval = NULL; \
63535+ else \
63536+ ___retval = vmalloc_user((unsigned long)___x); \
63537+ ___retval; \
63538+})
63539+
63540+#define vmalloc_exec(x) \
63541+({ \
63542+ void *___retval; \
63543+ intoverflow_t ___x = (intoverflow_t)x; \
63544+ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
63545+ ___retval = NULL; \
63546+ else \
63547+ ___retval = vmalloc_exec((unsigned long)___x); \
63548+ ___retval; \
63549+})
63550+
63551+#define vmalloc_node(x, y) \
63552+({ \
63553+ void *___retval; \
63554+ intoverflow_t ___x = (intoverflow_t)x; \
63555+ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
63556+ ___retval = NULL; \
63557+ else \
63558+ ___retval = vmalloc_node((unsigned long)___x, (y));\
63559+ ___retval; \
63560+})
63561+
bc901d79
MT
63562+#define vzalloc_node(x, y) \
63563+({ \
63564+ void *___retval; \
63565+ intoverflow_t ___x = (intoverflow_t)x; \
63566+ if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
63567+ ___retval = NULL; \
63568+ else \
63569+ ___retval = vzalloc_node((unsigned long)___x, (y));\
63570+ ___retval; \
63571+})
63572+
58c5fc13
MT
63573+#define vmalloc_32(x) \
63574+({ \
63575+ void *___retval; \
63576+ intoverflow_t ___x = (intoverflow_t)x; \
63577+ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
63578+ ___retval = NULL; \
63579+ else \
63580+ ___retval = vmalloc_32((unsigned long)___x); \
63581+ ___retval; \
63582+})
63583+
63584+#define vmalloc_32_user(x) \
63585+({ \
bc901d79 63586+void *___retval; \
58c5fc13
MT
63587+ intoverflow_t ___x = (intoverflow_t)x; \
63588+ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
63589+ ___retval = NULL; \
63590+ else \
63591+ ___retval = vmalloc_32_user((unsigned long)___x);\
63592+ ___retval; \
63593+})
63594+
63595 #endif /* _LINUX_VMALLOC_H */
fe2de317
MT
63596diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
63597index 65efb92..137adbb 100644
63598--- a/include/linux/vmstat.h
63599+++ b/include/linux/vmstat.h
63600@@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
57199397
MT
63601 /*
63602 * Zone based page accounting with per cpu differentials.
63603 */
63604-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63605+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63606
63607 static inline void zone_page_state_add(long x, struct zone *zone,
63608 enum zone_stat_item item)
63609 {
63610- atomic_long_add(x, &zone->vm_stat[item]);
63611- atomic_long_add(x, &vm_stat[item]);
63612+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
63613+ atomic_long_add_unchecked(x, &vm_stat[item]);
63614 }
63615
63616 static inline unsigned long global_page_state(enum zone_stat_item item)
63617 {
63618- long x = atomic_long_read(&vm_stat[item]);
63619+ long x = atomic_long_read_unchecked(&vm_stat[item]);
63620 #ifdef CONFIG_SMP
63621 if (x < 0)
63622 x = 0;
fe2de317 63623@@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
57199397
MT
63624 static inline unsigned long zone_page_state(struct zone *zone,
63625 enum zone_stat_item item)
63626 {
63627- long x = atomic_long_read(&zone->vm_stat[item]);
63628+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
63629 #ifdef CONFIG_SMP
63630 if (x < 0)
63631 x = 0;
fe2de317 63632@@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
6892158b
MT
63633 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
63634 enum zone_stat_item item)
63635 {
63636- long x = atomic_long_read(&zone->vm_stat[item]);
63637+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
63638
63639 #ifdef CONFIG_SMP
63640 int cpu;
fe2de317 63641@@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
57199397
MT
63642
63643 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
63644 {
63645- atomic_long_inc(&zone->vm_stat[item]);
63646- atomic_long_inc(&vm_stat[item]);
63647+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
63648+ atomic_long_inc_unchecked(&vm_stat[item]);
63649 }
63650
63651 static inline void __inc_zone_page_state(struct page *page,
fe2de317 63652@@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
57199397
MT
63653
63654 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
63655 {
63656- atomic_long_dec(&zone->vm_stat[item]);
63657- atomic_long_dec(&vm_stat[item]);
63658+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
63659+ atomic_long_dec_unchecked(&vm_stat[item]);
63660 }
63661
63662 static inline void __dec_zone_page_state(struct page *page,
fe2de317
MT
63663diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
63664index 4aeff96..b378cdc 100644
63665--- a/include/media/saa7146_vv.h
63666+++ b/include/media/saa7146_vv.h
15a11c5b
MT
63667@@ -163,7 +163,7 @@ struct saa7146_ext_vv
63668 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
63669
63670 /* the extension can override this */
63671- struct v4l2_ioctl_ops ops;
63672+ v4l2_ioctl_ops_no_const ops;
63673 /* pointer to the saa7146 core ops */
63674 const struct v4l2_ioctl_ops *core_ops;
63675
fe2de317
MT
63676diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
63677index c7c40f1..4f01585 100644
63678--- a/include/media/v4l2-dev.h
63679+++ b/include/media/v4l2-dev.h
63680@@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local);
6e9df6a3
MT
63681
63682
63683 struct v4l2_file_operations {
63684- struct module *owner;
63685+ struct module * const owner;
63686 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
63687 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
63688 unsigned int (*poll) (struct file *, struct poll_table_struct *);
63689@@ -68,6 +68,7 @@ struct v4l2_file_operations {
63690 int (*open) (struct file *);
63691 int (*release) (struct file *);
63692 };
63693+typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
63694
63695 /*
63696 * Newer version of video_device, handled by videodev2.c
fe2de317
MT
63697diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
63698index dd9f1e7..8c4dd86 100644
63699--- a/include/media/v4l2-ioctl.h
63700+++ b/include/media/v4l2-ioctl.h
6e9df6a3 63701@@ -272,7 +272,7 @@ struct v4l2_ioctl_ops {
15a11c5b
MT
63702 long (*vidioc_default) (struct file *file, void *fh,
63703 bool valid_prio, int cmd, void *arg);
63704 };
6e9df6a3 63705-
15a11c5b
MT
63706+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
63707
15a11c5b 63708 /* v4l debugging and diagnostics */
6e9df6a3 63709
fe2de317
MT
63710diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
63711index c5dedd8..a93b07b 100644
63712--- a/include/net/caif/caif_hsi.h
63713+++ b/include/net/caif/caif_hsi.h
6e9df6a3
MT
63714@@ -94,7 +94,7 @@ struct cfhsi_drv {
63715 void (*rx_done_cb) (struct cfhsi_drv *drv);
63716 void (*wake_up_cb) (struct cfhsi_drv *drv);
63717 void (*wake_down_cb) (struct cfhsi_drv *drv);
63718-};
63719+} __no_const;
63720
63721 /* Structure implemented by HSI device. */
63722 struct cfhsi_dev {
fe2de317
MT
63723diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
63724index 9e5425b..8136ffc 100644
63725--- a/include/net/caif/cfctrl.h
63726+++ b/include/net/caif/cfctrl.h
15a11c5b
MT
63727@@ -52,7 +52,7 @@ struct cfctrl_rsp {
63728 void (*radioset_rsp)(void);
63729 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
63730 struct cflayer *client_layer);
63731-};
63732+} __no_const;
63733
63734 /* Link Setup Parameters for CAIF-Links. */
63735 struct cfctrl_link_param {
8308f9c9
MT
63736@@ -101,8 +101,8 @@ struct cfctrl_request_info {
63737 struct cfctrl {
63738 struct cfsrvl serv;
63739 struct cfctrl_rsp res;
63740- atomic_t req_seq_no;
63741- atomic_t rsp_seq_no;
63742+ atomic_unchecked_t req_seq_no;
63743+ atomic_unchecked_t rsp_seq_no;
63744 struct list_head list;
63745 /* Protects from simultaneous access to first_req list */
63746 spinlock_t info_list_lock;
fe2de317
MT
63747diff --git a/include/net/flow.h b/include/net/flow.h
63748index a094477..bc91db1 100644
63749--- a/include/net/flow.h
63750+++ b/include/net/flow.h
63751@@ -207,6 +207,6 @@ extern struct flow_cache_object *flow_cache_lookup(
8308f9c9
MT
63752 u8 dir, flow_resolve_t resolver, void *ctx);
63753
63754 extern void flow_cache_flush(void);
63755-extern atomic_t flow_cache_genid;
63756+extern atomic_unchecked_t flow_cache_genid;
63757
66a7e928 63758 #endif
fe2de317
MT
63759diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
63760index e9ff3fc..9d3e5c7 100644
63761--- a/include/net/inetpeer.h
63762+++ b/include/net/inetpeer.h
63763@@ -48,8 +48,8 @@ struct inet_peer {
6892158b
MT
63764 */
63765 union {
63766 struct {
66a7e928
MT
63767- atomic_t rid; /* Frag reception counter */
63768- atomic_t ip_id_count; /* IP ID for the next packet */
63769+ atomic_unchecked_t rid; /* Frag reception counter */
63770+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
63771 __u32 tcp_ts;
63772 __u32 tcp_ts_stamp;
6e9df6a3 63773 };
fe2de317 63774@@ -113,11 +113,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
6892158b
MT
63775 more++;
63776 inet_peer_refcheck(p);
6e9df6a3
MT
63777 do {
63778- old = atomic_read(&p->ip_id_count);
63779+ old = atomic_read_unchecked(&p->ip_id_count);
63780 new = old + more;
63781 if (!new)
63782 new = 1;
63783- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
63784+ } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
63785 return new;
63786 }
63787
fe2de317
MT
63788diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
63789index 10422ef..662570f 100644
63790--- a/include/net/ip_fib.h
63791+++ b/include/net/ip_fib.h
63792@@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
66a7e928
MT
63793
63794 #define FIB_RES_SADDR(net, res) \
63795 ((FIB_RES_NH(res).nh_saddr_genid == \
63796- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
63797+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
63798 FIB_RES_NH(res).nh_saddr : \
63799 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
63800 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
fe2de317
MT
63801diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
63802index 8fa4430..05dd772 100644
63803--- a/include/net/ip_vs.h
63804+++ b/include/net/ip_vs.h
15a11c5b 63805@@ -509,7 +509,7 @@ struct ip_vs_conn {
8308f9c9
MT
63806 struct ip_vs_conn *control; /* Master control connection */
63807 atomic_t n_control; /* Number of controlled ones */
63808 struct ip_vs_dest *dest; /* real server */
63809- atomic_t in_pkts; /* incoming packet counter */
63810+ atomic_unchecked_t in_pkts; /* incoming packet counter */
63811
63812 /* packet transmitter for different forwarding methods. If it
63813 mangles the packet, it must return NF_DROP or better NF_STOLEN,
15a11c5b 63814@@ -647,7 +647,7 @@ struct ip_vs_dest {
8308f9c9 63815 __be16 port; /* port number of the server */
66a7e928 63816 union nf_inet_addr addr; /* IP address of the server */
8308f9c9
MT
63817 volatile unsigned flags; /* dest status flags */
63818- atomic_t conn_flags; /* flags to copy to conn */
63819+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
63820 atomic_t weight; /* server weight */
63821
63822 atomic_t refcnt; /* reference counter */
fe2de317
MT
63823diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
63824index 69b610a..fe3962c 100644
63825--- a/include/net/irda/ircomm_core.h
63826+++ b/include/net/irda/ircomm_core.h
15a11c5b
MT
63827@@ -51,7 +51,7 @@ typedef struct {
63828 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
63829 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
63830 struct ircomm_info *);
63831-} call_t;
63832+} __no_const call_t;
63833
63834 struct ircomm_cb {
63835 irda_queue_t queue;
fe2de317
MT
63836diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
63837index 59ba38bc..d515662 100644
63838--- a/include/net/irda/ircomm_tty.h
63839+++ b/include/net/irda/ircomm_tty.h
c52201e0
MT
63840@@ -35,6 +35,7 @@
63841 #include <linux/termios.h>
63842 #include <linux/timer.h>
63843 #include <linux/tty.h> /* struct tty_struct */
63844+#include <asm/local.h>
63845
63846 #include <net/irda/irias_object.h>
63847 #include <net/irda/ircomm_core.h>
63848@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
58c5fc13
MT
63849 unsigned short close_delay;
63850 unsigned short closing_wait; /* time to wait before closing */
63851
63852- int open_count;
63853- int blocked_open; /* # of blocked opens */
c52201e0
MT
63854+ local_t open_count;
63855+ local_t blocked_open; /* # of blocked opens */
58c5fc13
MT
63856
63857 /* Protect concurent access to :
63858 * o self->open_count
fe2de317
MT
63859diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
63860index f82a1e8..82d81e8 100644
63861--- a/include/net/iucv/af_iucv.h
63862+++ b/include/net/iucv/af_iucv.h
8308f9c9
MT
63863@@ -87,7 +87,7 @@ struct iucv_sock {
63864 struct iucv_sock_list {
63865 struct hlist_head head;
63866 rwlock_t lock;
63867- atomic_t autobind_name;
63868+ atomic_unchecked_t autobind_name;
63869 };
63870
63871 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
fe2de317
MT
63872diff --git a/include/net/lapb.h b/include/net/lapb.h
63873index 96cb5dd..25e8d4f 100644
63874--- a/include/net/lapb.h
63875+++ b/include/net/lapb.h
15a11c5b
MT
63876@@ -95,7 +95,7 @@ struct lapb_cb {
63877 struct sk_buff_head write_queue;
63878 struct sk_buff_head ack_queue;
63879 unsigned char window;
63880- struct lapb_register_struct callbacks;
63881+ struct lapb_register_struct *callbacks;
63882
63883 /* FRMR control information */
63884 struct lapb_frame frmr_data;
fe2de317
MT
63885diff --git a/include/net/neighbour.h b/include/net/neighbour.h
63886index 2720884..3aa5c25 100644
63887--- a/include/net/neighbour.h
63888+++ b/include/net/neighbour.h
6e9df6a3
MT
63889@@ -122,7 +122,7 @@ struct neigh_ops {
63890 void (*error_report)(struct neighbour *, struct sk_buff *);
63891 int (*output)(struct neighbour *, struct sk_buff *);
63892 int (*connected_output)(struct neighbour *, struct sk_buff *);
15a11c5b
MT
63893-};
63894+} __do_const;
ae4e228f
MT
63895
63896 struct pneigh_entry {
15a11c5b 63897 struct pneigh_entry *next;
fe2de317
MT
63898diff --git a/include/net/netlink.h b/include/net/netlink.h
63899index 98c1854..d4add7b 100644
63900--- a/include/net/netlink.h
63901+++ b/include/net/netlink.h
63902@@ -562,7 +562,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
bc901d79
MT
63903 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
63904 {
63905 if (mark)
63906- skb_trim(skb, (unsigned char *) mark - skb->data);
63907+ skb_trim(skb, (const unsigned char *) mark - skb->data);
63908 }
63909
63910 /**
fe2de317
MT
63911diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
63912index d786b4f..4c3dd41 100644
63913--- a/include/net/netns/ipv4.h
63914+++ b/include/net/netns/ipv4.h
15a11c5b
MT
63915@@ -56,8 +56,8 @@ struct netns_ipv4 {
63916
63917 unsigned int sysctl_ping_group_range[2];
8308f9c9
MT
63918
63919- atomic_t rt_genid;
66a7e928 63920- atomic_t dev_addr_genid;
8308f9c9 63921+ atomic_unchecked_t rt_genid;
66a7e928 63922+ atomic_unchecked_t dev_addr_genid;
8308f9c9
MT
63923
63924 #ifdef CONFIG_IP_MROUTE
63925 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
fe2de317
MT
63926diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
63927index 6a72a58..e6a127d 100644
63928--- a/include/net/sctp/sctp.h
63929+++ b/include/net/sctp/sctp.h
6e9df6a3 63930@@ -318,9 +318,9 @@ do { \
58c5fc13
MT
63931
63932 #else /* SCTP_DEBUG */
63933
63934-#define SCTP_DEBUG_PRINTK(whatever...)
bc901d79 63935-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
58c5fc13
MT
63936-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
63937+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
bc901d79 63938+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
58c5fc13
MT
63939+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
63940 #define SCTP_ENABLE_DEBUG
63941 #define SCTP_DISABLE_DEBUG
63942 #define SCTP_ASSERT(expr, str, func)
fe2de317
MT
63943diff --git a/include/net/sock.h b/include/net/sock.h
63944index 8e4062f..77b041e 100644
63945--- a/include/net/sock.h
63946+++ b/include/net/sock.h
6e9df6a3 63947@@ -278,7 +278,7 @@ struct sock {
8308f9c9
MT
63948 #ifdef CONFIG_RPS
63949 __u32 sk_rxhash;
63950 #endif
63951- atomic_t sk_drops;
63952+ atomic_unchecked_t sk_drops;
63953 int sk_rcvbuf;
63954
63955 struct sk_filter __rcu *sk_filter;
fe2de317 63956@@ -1391,7 +1391,7 @@ static inline void sk_nocaps_add(struct sock *sk, int flags)
15a11c5b
MT
63957 }
63958
63959 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
63960- char __user *from, char *to,
63961+ char __user *from, unsigned char *to,
63962 int copy, int offset)
63963 {
63964 if (skb->ip_summed == CHECKSUM_NONE) {
fe2de317
MT
63965diff --git a/include/net/tcp.h b/include/net/tcp.h
63966index acc620a..f4d99c6 100644
63967--- a/include/net/tcp.h
63968+++ b/include/net/tcp.h
6e9df6a3 63969@@ -1401,8 +1401,8 @@ enum tcp_seq_states {
ae4e228f
MT
63970 struct tcp_seq_afinfo {
63971 char *name;
63972 sa_family_t family;
16454cff 63973- struct file_operations seq_fops;
15a11c5b
MT
63974- struct seq_operations seq_ops;
63975+ file_operations_no_const seq_fops;
63976+ seq_operations_no_const seq_ops;
ae4e228f 63977 };
16454cff 63978
15a11c5b 63979 struct tcp_iter_state {
fe2de317
MT
63980diff --git a/include/net/udp.h b/include/net/udp.h
63981index 67ea6fc..e42aee8 100644
63982--- a/include/net/udp.h
63983+++ b/include/net/udp.h
15a11c5b 63984@@ -234,8 +234,8 @@ struct udp_seq_afinfo {
ae4e228f
MT
63985 char *name;
63986 sa_family_t family;
63987 struct udp_table *udp_table;
16454cff 63988- struct file_operations seq_fops;
15a11c5b
MT
63989- struct seq_operations seq_ops;
63990+ file_operations_no_const seq_fops;
63991+ seq_operations_no_const seq_ops;
ae4e228f 63992 };
16454cff 63993
15a11c5b 63994 struct udp_iter_state {
fe2de317
MT
63995diff --git a/include/net/xfrm.h b/include/net/xfrm.h
63996index b203e14..1df3991 100644
63997--- a/include/net/xfrm.h
63998+++ b/include/net/xfrm.h
66a7e928 63999@@ -505,7 +505,7 @@ struct xfrm_policy {
8308f9c9
MT
64000 struct timer_list timer;
64001
64002 struct flow_cache_object flo;
64003- atomic_t genid;
64004+ atomic_unchecked_t genid;
64005 u32 priority;
64006 u32 index;
64007 struct xfrm_mark mark;
fe2de317
MT
64008diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
64009index 2d0191c..a55797d 100644
64010--- a/include/rdma/iw_cm.h
64011+++ b/include/rdma/iw_cm.h
15a11c5b
MT
64012@@ -120,7 +120,7 @@ struct iw_cm_verbs {
64013 int backlog);
64014
64015 int (*destroy_listen)(struct iw_cm_id *cm_id);
64016-};
64017+} __no_const;
64018
64019 /**
64020 * iw_create_cm_id - Create an IW CM identifier.
fe2de317
MT
64021diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
64022index 7d96829..4ba78d3 100644
64023--- a/include/scsi/libfc.h
64024+++ b/include/scsi/libfc.h
6e9df6a3 64025@@ -758,6 +758,7 @@ struct libfc_function_template {
15a11c5b
MT
64026 */
64027 void (*disc_stop_final) (struct fc_lport *);
64028 };
64029+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
64030
64031 /**
64032 * struct fc_disc - Discovery context
6e9df6a3 64033@@ -861,7 +862,7 @@ struct fc_lport {
15a11c5b
MT
64034 struct fc_vport *vport;
64035
64036 /* Operational Information */
64037- struct libfc_function_template tt;
64038+ libfc_function_template_no_const tt;
64039 u8 link_up;
64040 u8 qfull;
64041 enum fc_lport_state state;
fe2de317
MT
64042diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
64043index d371c3c..e228a8c 100644
64044--- a/include/scsi/scsi_device.h
64045+++ b/include/scsi/scsi_device.h
8308f9c9
MT
64046@@ -161,9 +161,9 @@ struct scsi_device {
64047 unsigned int max_device_blocked; /* what device_blocked counts down from */
64048 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
64049
64050- atomic_t iorequest_cnt;
64051- atomic_t iodone_cnt;
64052- atomic_t ioerr_cnt;
64053+ atomic_unchecked_t iorequest_cnt;
64054+ atomic_unchecked_t iodone_cnt;
64055+ atomic_unchecked_t ioerr_cnt;
64056
64057 struct device sdev_gendev,
64058 sdev_dev;
fe2de317
MT
64059diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
64060index 2a65167..91e01f8 100644
64061--- a/include/scsi/scsi_transport_fc.h
64062+++ b/include/scsi/scsi_transport_fc.h
15a11c5b
MT
64063@@ -711,7 +711,7 @@ struct fc_function_template {
64064 unsigned long show_host_system_hostname:1;
66a7e928 64065
15a11c5b
MT
64066 unsigned long disable_target_scan:1;
64067-};
64068+} __do_const;
66a7e928 64069
66a7e928 64070
15a11c5b 64071 /**
fe2de317
MT
64072diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
64073index 030b87c..98a6954 100644
64074--- a/include/sound/ak4xxx-adda.h
64075+++ b/include/sound/ak4xxx-adda.h
15a11c5b
MT
64076@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
64077 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
64078 unsigned char val);
64079 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
64080-};
64081+} __no_const;
64082
64083 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
64084
fe2de317
MT
64085diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
64086index 8c05e47..2b5df97 100644
64087--- a/include/sound/hwdep.h
64088+++ b/include/sound/hwdep.h
15a11c5b
MT
64089@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
64090 struct snd_hwdep_dsp_status *status);
64091 int (*dsp_load)(struct snd_hwdep *hw,
64092 struct snd_hwdep_dsp_image *image);
64093-};
64094+} __no_const;
64095
64096 struct snd_hwdep {
64097 struct snd_card *card;
fe2de317
MT
64098diff --git a/include/sound/info.h b/include/sound/info.h
64099index 4e94cf1..76748b1 100644
64100--- a/include/sound/info.h
64101+++ b/include/sound/info.h
15a11c5b
MT
64102@@ -44,7 +44,7 @@ struct snd_info_entry_text {
64103 struct snd_info_buffer *buffer);
64104 void (*write)(struct snd_info_entry *entry,
64105 struct snd_info_buffer *buffer);
64106-};
64107+} __no_const;
64108
64109 struct snd_info_entry_ops {
64110 int (*open)(struct snd_info_entry *entry,
fe2de317
MT
64111diff --git a/include/sound/pcm.h b/include/sound/pcm.h
64112index 57e71fa..a2c7534 100644
64113--- a/include/sound/pcm.h
64114+++ b/include/sound/pcm.h
15a11c5b
MT
64115@@ -81,6 +81,7 @@ struct snd_pcm_ops {
64116 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
64117 int (*ack)(struct snd_pcm_substream *substream);
64118 };
64119+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
66a7e928 64120
15a11c5b
MT
64121 /*
64122 *
fe2de317
MT
64123diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
64124index af1b49e..a5d55a5 100644
64125--- a/include/sound/sb16_csp.h
64126+++ b/include/sound/sb16_csp.h
15a11c5b
MT
64127@@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
64128 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
64129 int (*csp_stop) (struct snd_sb_csp * p);
64130 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
64131-};
64132+} __no_const;
66a7e928 64133
15a11c5b
MT
64134 /*
64135 * CSP private data
fe2de317
MT
64136diff --git a/include/sound/soc.h b/include/sound/soc.h
64137index aa19f5a..a5b8208 100644
64138--- a/include/sound/soc.h
64139+++ b/include/sound/soc.h
6e9df6a3
MT
64140@@ -676,7 +676,7 @@ struct snd_soc_platform_driver {
64141 /* platform IO - used for platform DAPM */
64142 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
64143 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
15a11c5b
MT
64144-};
64145+} __do_const;
64146
64147 struct snd_soc_platform {
64148 const char *name;
fe2de317
MT
64149diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
64150index 444cd6b..3327cc5 100644
64151--- a/include/sound/ymfpci.h
64152+++ b/include/sound/ymfpci.h
8308f9c9
MT
64153@@ -358,7 +358,7 @@ struct snd_ymfpci {
64154 spinlock_t reg_lock;
64155 spinlock_t voice_lock;
64156 wait_queue_head_t interrupt_sleep;
64157- atomic_t interrupt_sleep_count;
64158+ atomic_unchecked_t interrupt_sleep_count;
64159 struct snd_info_entry *proc_entry;
64160 const struct firmware *dsp_microcode;
64161 const struct firmware *controller_microcode;
fe2de317
MT
64162diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
64163index 2704065..e10f3ef 100644
64164--- a/include/target/target_core_base.h
64165+++ b/include/target/target_core_base.h
6e9df6a3 64166@@ -356,7 +356,7 @@ struct t10_reservation_ops {
15a11c5b
MT
64167 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
64168 int (*t10_pr_register)(struct se_cmd *);
64169 int (*t10_pr_clear)(struct se_cmd *);
64170-};
64171+} __no_const;
64172
6e9df6a3 64173 struct t10_reservation {
15a11c5b 64174 /* Reservation effects all target ports */
6e9df6a3 64175@@ -496,8 +496,8 @@ struct se_cmd {
8308f9c9
MT
64176 atomic_t t_task_cdbs_left;
64177 atomic_t t_task_cdbs_ex_left;
64178 atomic_t t_task_cdbs_timeout_left;
64179- atomic_t t_task_cdbs_sent;
64180- atomic_t t_transport_aborted;
64181+ atomic_unchecked_t t_task_cdbs_sent;
64182+ atomic_unchecked_t t_transport_aborted;
64183 atomic_t t_transport_active;
64184 atomic_t t_transport_complete;
64185 atomic_t t_transport_queue_active;
6e9df6a3 64186@@ -744,7 +744,7 @@ struct se_device {
8308f9c9
MT
64187 atomic_t active_cmds;
64188 atomic_t simple_cmds;
64189 atomic_t depth_left;
64190- atomic_t dev_ordered_id;
64191+ atomic_unchecked_t dev_ordered_id;
64192 atomic_t dev_tur_active;
64193 atomic_t execute_tasks;
64194 atomic_t dev_status_thr_count;
fe2de317
MT
64195diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
64196index 1c09820..7f5ec79 100644
64197--- a/include/trace/events/irq.h
64198+++ b/include/trace/events/irq.h
bc901d79 64199@@ -36,7 +36,7 @@ struct softirq_action;
ae4e228f
MT
64200 */
64201 TRACE_EVENT(irq_handler_entry,
64202
64203- TP_PROTO(int irq, struct irqaction *action),
64204+ TP_PROTO(int irq, const struct irqaction *action),
64205
64206 TP_ARGS(irq, action),
64207
bc901d79 64208@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
ae4e228f
MT
64209 */
64210 TRACE_EVENT(irq_handler_exit,
64211
64212- TP_PROTO(int irq, struct irqaction *action, int ret),
64213+ TP_PROTO(int irq, const struct irqaction *action, int ret),
64214
64215 TP_ARGS(irq, action, ret),
64216
fe2de317
MT
64217diff --git a/include/video/udlfb.h b/include/video/udlfb.h
64218index 69d485a..dd0bee7 100644
64219--- a/include/video/udlfb.h
64220+++ b/include/video/udlfb.h
8308f9c9
MT
64221@@ -51,10 +51,10 @@ struct dlfb_data {
64222 int base8;
64223 u32 pseudo_palette[256];
64224 /* blit-only rendering path metrics, exposed through sysfs */
64225- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
64226- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
64227- atomic_t bytes_sent; /* to usb, after compression including overhead */
64228- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
64229+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
64230+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
64231+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
64232+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
64233 };
64234
64235 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
fe2de317
MT
64236diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
64237index 0993a22..32ba2fe 100644
64238--- a/include/video/uvesafb.h
64239+++ b/include/video/uvesafb.h
58c5fc13
MT
64240@@ -177,6 +177,7 @@ struct uvesafb_par {
64241 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
64242 u8 pmi_setpal; /* PMI for palette changes */
64243 u16 *pmi_base; /* protected mode interface location */
64244+ u8 *pmi_code; /* protected mode code location */
64245 void *pmi_start;
64246 void *pmi_pal;
64247 u8 *vbe_state_orig; /*
fe2de317
MT
64248diff --git a/init/Kconfig b/init/Kconfig
64249index d627783..693a9f3 100644
64250--- a/init/Kconfig
64251+++ b/init/Kconfig
64252@@ -1202,7 +1202,7 @@ config SLUB_DEBUG
64253
64254 config COMPAT_BRK
64255 bool "Disable heap randomization"
64256- default y
64257+ default n
64258 help
64259 Randomizing heap placement makes heap exploits harder, but it
64260 also breaks ancient binaries (including anything libc5 based).
64261diff --git a/init/do_mounts.c b/init/do_mounts.c
64262index c0851a8..4f8977d 100644
64263--- a/init/do_mounts.c
64264+++ b/init/do_mounts.c
64265@@ -287,11 +287,11 @@ static void __init get_fs_names(char *page)
58c5fc13
MT
64266
64267 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
64268 {
64269- int err = sys_mount(name, "/root", fs, flags, data);
6e9df6a3 64270+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
58c5fc13
MT
64271 if (err)
64272 return err;
64273
6e9df6a3
MT
64274- sys_chdir((const char __user __force *)"/root");
64275+ sys_chdir((const char __force_user*)"/root");
64276 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
64277 printk(KERN_INFO
64278 "VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
fe2de317 64279@@ -383,18 +383,18 @@ void __init change_floppy(char *fmt, ...)
58c5fc13
MT
64280 va_start(args, fmt);
64281 vsprintf(buf, fmt, args);
64282 va_end(args);
64283- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
64284+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
64285 if (fd >= 0) {
64286 sys_ioctl(fd, FDEJECT, 0);
64287 sys_close(fd);
64288 }
64289 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
64290- fd = sys_open("/dev/console", O_RDWR, 0);
df50ba0c 64291+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
58c5fc13
MT
64292 if (fd >= 0) {
64293 sys_ioctl(fd, TCGETS, (long)&termios);
64294 termios.c_lflag &= ~ICANON;
64295 sys_ioctl(fd, TCSETSF, (long)&termios);
64296- sys_read(fd, &c, 1);
64297+ sys_read(fd, (char __user *)&c, 1);
64298 termios.c_lflag |= ICANON;
64299 sys_ioctl(fd, TCSETSF, (long)&termios);
64300 sys_close(fd);
66a7e928 64301@@ -488,6 +488,6 @@ void __init prepare_namespace(void)
58c5fc13
MT
64302 mount_root();
64303 out:
ae4e228f 64304 devtmpfs_mount("dev");
58c5fc13 64305- sys_mount(".", "/", NULL, MS_MOVE, NULL);
6e9df6a3
MT
64306- sys_chroot((const char __user __force *)".");
64307+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
64308+ sys_chroot((const char __force_user *)".");
58c5fc13 64309 }
fe2de317
MT
64310diff --git a/init/do_mounts.h b/init/do_mounts.h
64311index f5b978a..69dbfe8 100644
64312--- a/init/do_mounts.h
64313+++ b/init/do_mounts.h
58c5fc13
MT
64314@@ -15,15 +15,15 @@ extern int root_mountflags;
64315
64316 static inline int create_dev(char *name, dev_t dev)
64317 {
64318- sys_unlink(name);
64319- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
6e9df6a3
MT
64320+ sys_unlink((char __force_user *)name);
64321+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
58c5fc13
MT
64322 }
64323
64324 #if BITS_PER_LONG == 32
64325 static inline u32 bstat(char *name)
64326 {
64327 struct stat64 stat;
64328- if (sys_stat64(name, &stat) != 0)
6e9df6a3
MT
64329+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
64330 return 0;
64331 if (!S_ISBLK(stat.st_mode))
64332 return 0;
64333@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
64334 static inline u32 bstat(char *name)
64335 {
64336 struct stat stat;
64337- if (sys_newstat(name, &stat) != 0)
64338+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
58c5fc13
MT
64339 return 0;
64340 if (!S_ISBLK(stat.st_mode))
64341 return 0;
fe2de317
MT
64342diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
64343index 3098a38..253064e 100644
64344--- a/init/do_mounts_initrd.c
64345+++ b/init/do_mounts_initrd.c
6892158b 64346@@ -44,13 +44,13 @@ static void __init handle_initrd(void)
58c5fc13
MT
64347 create_dev("/dev/root.old", Root_RAM0);
64348 /* mount initrd on rootfs' /root */
64349 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
64350- sys_mkdir("/old", 0700);
64351- root_fd = sys_open("/", 0, 0);
64352- old_fd = sys_open("/old", 0, 0);
6e9df6a3
MT
64353+ sys_mkdir((const char __force_user *)"/old", 0700);
64354+ root_fd = sys_open((const char __force_user *)"/", 0, 0);
64355+ old_fd = sys_open((const char __force_user *)"/old", 0, 0);
58c5fc13
MT
64356 /* move initrd over / and chdir/chroot in initrd root */
64357- sys_chdir("/root");
64358- sys_mount(".", "/", NULL, MS_MOVE, NULL);
64359- sys_chroot(".");
6e9df6a3
MT
64360+ sys_chdir((const char __force_user *)"/root");
64361+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
64362+ sys_chroot((const char __force_user *)".");
58c5fc13
MT
64363
64364 /*
64365 * In case that a resume from disk is carried out by linuxrc or one of
6892158b 64366@@ -67,15 +67,15 @@ static void __init handle_initrd(void)
58c5fc13
MT
64367
64368 /* move initrd to rootfs' /old */
64369 sys_fchdir(old_fd);
64370- sys_mount("/", ".", NULL, MS_MOVE, NULL);
6e9df6a3 64371+ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
58c5fc13
MT
64372 /* switch root and cwd back to / of rootfs */
64373 sys_fchdir(root_fd);
64374- sys_chroot(".");
6e9df6a3 64375+ sys_chroot((const char __force_user *)".");
58c5fc13
MT
64376 sys_close(old_fd);
64377 sys_close(root_fd);
64378
64379 if (new_decode_dev(real_root_dev) == Root_RAM0) {
64380- sys_chdir("/old");
6e9df6a3 64381+ sys_chdir((const char __force_user *)"/old");
58c5fc13
MT
64382 return;
64383 }
64384
6892158b 64385@@ -83,17 +83,17 @@ static void __init handle_initrd(void)
58c5fc13
MT
64386 mount_root();
64387
64388 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
64389- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
6e9df6a3 64390+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
58c5fc13
MT
64391 if (!error)
64392 printk("okay\n");
64393 else {
64394- int fd = sys_open("/dev/root.old", O_RDWR, 0);
6e9df6a3 64395+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
58c5fc13
MT
64396 if (error == -ENOENT)
64397 printk("/initrd does not exist. Ignored.\n");
64398 else
64399 printk("failed\n");
64400 printk(KERN_NOTICE "Unmounting old root\n");
64401- sys_umount("/old", MNT_DETACH);
6e9df6a3 64402+ sys_umount((char __force_user *)"/old", MNT_DETACH);
58c5fc13
MT
64403 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
64404 if (fd < 0) {
64405 error = fd;
6892158b 64406@@ -116,11 +116,11 @@ int __init initrd_load(void)
58c5fc13
MT
64407 * mounted in the normal path.
64408 */
64409 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
64410- sys_unlink("/initrd.image");
6e9df6a3 64411+ sys_unlink((const char __force_user *)"/initrd.image");
58c5fc13
MT
64412 handle_initrd();
64413 return 1;
64414 }
64415 }
64416- sys_unlink("/initrd.image");
6e9df6a3 64417+ sys_unlink((const char __force_user *)"/initrd.image");
58c5fc13
MT
64418 return 0;
64419 }
fe2de317
MT
64420diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
64421index 32c4799..c27ee74 100644
64422--- a/init/do_mounts_md.c
64423+++ b/init/do_mounts_md.c
58c5fc13
MT
64424@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
64425 partitioned ? "_d" : "", minor,
64426 md_setup_args[ent].device_names);
64427
64428- fd = sys_open(name, 0, 0);
6e9df6a3 64429+ fd = sys_open((char __force_user *)name, 0, 0);
58c5fc13
MT
64430 if (fd < 0) {
64431 printk(KERN_ERR "md: open failed - cannot start "
64432 "array %s\n", name);
64433@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
64434 * array without it
64435 */
64436 sys_close(fd);
64437- fd = sys_open(name, 0, 0);
6e9df6a3 64438+ fd = sys_open((char __force_user *)name, 0, 0);
58c5fc13
MT
64439 sys_ioctl(fd, BLKRRPART, 0);
64440 }
64441 sys_close(fd);
6e9df6a3
MT
64442@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
64443
64444 wait_for_device_probe();
64445
64446- fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
64447+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
64448 if (fd >= 0) {
64449 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
64450 sys_close(fd);
fe2de317
MT
64451diff --git a/init/initramfs.c b/init/initramfs.c
64452index 2531811..040d4d4 100644
64453--- a/init/initramfs.c
64454+++ b/init/initramfs.c
ae4e228f
MT
64455@@ -74,7 +74,7 @@ static void __init free_hash(void)
64456 }
64457 }
64458
64459-static long __init do_utime(char __user *filename, time_t mtime)
64460+static long __init do_utime(__force char __user *filename, time_t mtime)
64461 {
64462 struct timespec t[2];
64463
64464@@ -109,7 +109,7 @@ static void __init dir_utime(void)
64465 struct dir_entry *de, *tmp;
64466 list_for_each_entry_safe(de, tmp, &dir_list, list) {
64467 list_del(&de->list);
64468- do_utime(de->name, de->mtime);
6e9df6a3 64469+ do_utime((char __force_user *)de->name, de->mtime);
ae4e228f
MT
64470 kfree(de->name);
64471 kfree(de);
64472 }
58c5fc13
MT
64473@@ -271,7 +271,7 @@ static int __init maybe_link(void)
64474 if (nlink >= 2) {
64475 char *old = find_link(major, minor, ino, mode, collected);
64476 if (old)
64477- return (sys_link(old, collected) < 0) ? -1 : 1;
6e9df6a3 64478+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
58c5fc13
MT
64479 }
64480 return 0;
64481 }
fe2de317 64482@@ -280,11 +280,11 @@ static void __init clean_path(char *path, mode_t mode)
58c5fc13
MT
64483 {
64484 struct stat st;
64485
64486- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
6e9df6a3 64487+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
58c5fc13
MT
64488 if (S_ISDIR(st.st_mode))
64489- sys_rmdir(path);
6e9df6a3 64490+ sys_rmdir((char __force_user *)path);
58c5fc13
MT
64491 else
64492- sys_unlink(path);
6e9df6a3 64493+ sys_unlink((char __force_user *)path);
58c5fc13
MT
64494 }
64495 }
64496
64497@@ -305,7 +305,7 @@ static int __init do_name(void)
64498 int openflags = O_WRONLY|O_CREAT;
64499 if (ml != 1)
64500 openflags |= O_TRUNC;
64501- wfd = sys_open(collected, openflags, mode);
6e9df6a3 64502+ wfd = sys_open((char __force_user *)collected, openflags, mode);
58c5fc13
MT
64503
64504 if (wfd >= 0) {
64505 sys_fchown(wfd, uid, gid);
ae4e228f 64506@@ -317,17 +317,17 @@ static int __init do_name(void)
58c5fc13
MT
64507 }
64508 }
64509 } else if (S_ISDIR(mode)) {
64510- sys_mkdir(collected, mode);
64511- sys_chown(collected, uid, gid);
64512- sys_chmod(collected, mode);
6e9df6a3
MT
64513+ sys_mkdir((char __force_user *)collected, mode);
64514+ sys_chown((char __force_user *)collected, uid, gid);
64515+ sys_chmod((char __force_user *)collected, mode);
58c5fc13
MT
64516 dir_add(collected, mtime);
64517 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
64518 S_ISFIFO(mode) || S_ISSOCK(mode)) {
64519 if (maybe_link() == 0) {
64520- sys_mknod(collected, mode, rdev);
64521- sys_chown(collected, uid, gid);
64522- sys_chmod(collected, mode);
ae4e228f 64523- do_utime(collected, mtime);
6e9df6a3
MT
64524+ sys_mknod((char __force_user *)collected, mode, rdev);
64525+ sys_chown((char __force_user *)collected, uid, gid);
64526+ sys_chmod((char __force_user *)collected, mode);
64527+ do_utime((char __force_user *)collected, mtime);
58c5fc13
MT
64528 }
64529 }
ae4e228f
MT
64530 return 0;
64531@@ -336,15 +336,15 @@ static int __init do_name(void)
58c5fc13
MT
64532 static int __init do_copy(void)
64533 {
64534 if (count >= body_len) {
64535- sys_write(wfd, victim, body_len);
6e9df6a3 64536+ sys_write(wfd, (char __force_user *)victim, body_len);
58c5fc13 64537 sys_close(wfd);
ae4e228f 64538- do_utime(vcollected, mtime);
6e9df6a3 64539+ do_utime((char __force_user *)vcollected, mtime);
58c5fc13 64540 kfree(vcollected);
ae4e228f 64541 eat(body_len);
58c5fc13
MT
64542 state = SkipIt;
64543 return 0;
64544 } else {
64545- sys_write(wfd, victim, count);
6e9df6a3 64546+ sys_write(wfd, (char __force_user *)victim, count);
58c5fc13
MT
64547 body_len -= count;
64548 eat(count);
64549 return 1;
ae4e228f 64550@@ -355,9 +355,9 @@ static int __init do_symlink(void)
58c5fc13
MT
64551 {
64552 collected[N_ALIGN(name_len) + body_len] = '\0';
64553 clean_path(collected, 0);
64554- sys_symlink(collected + N_ALIGN(name_len), collected);
64555- sys_lchown(collected, uid, gid);
ae4e228f 64556- do_utime(collected, mtime);
6e9df6a3
MT
64557+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
64558+ sys_lchown((char __force_user *)collected, uid, gid);
64559+ do_utime((char __force_user *)collected, mtime);
58c5fc13
MT
64560 state = SkipIt;
64561 next_state = Reset;
ae4e228f 64562 return 0;
fe2de317
MT
64563diff --git a/init/main.c b/init/main.c
64564index 03b408d..5777f59 100644
64565--- a/init/main.c
64566+++ b/init/main.c
64567@@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { }
58c5fc13
MT
64568 extern void tc_init(void);
64569 #endif
58c5fc13 64570
16454cff
MT
64571+extern void grsecurity_init(void);
64572+
64573 /*
64574 * Debug helper: via this flag we know that we are in 'early bootup code'
64575 * where only the boot processor is running with IRQ disabled. This means
fe2de317 64576@@ -149,6 +151,49 @@ static int __init set_reset_devices(char *str)
58c5fc13
MT
64577
64578 __setup("reset_devices", set_reset_devices);
64579
df50ba0c 64580+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
bc901d79
MT
64581+extern char pax_enter_kernel_user[];
64582+extern char pax_exit_kernel_user[];
df50ba0c
MT
64583+extern pgdval_t clone_pgd_mask;
64584+#endif
64585+
64586+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
58c5fc13
MT
64587+static int __init setup_pax_nouderef(char *str)
64588+{
df50ba0c 64589+#ifdef CONFIG_X86_32
58c5fc13 64590+ unsigned int cpu;
66a7e928 64591+ struct desc_struct *gdt;
58c5fc13
MT
64592+
64593+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
66a7e928
MT
64594+ gdt = get_cpu_gdt_table(cpu);
64595+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
64596+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
64597+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
64598+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
58c5fc13 64599+ }
bc901d79 64600+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
df50ba0c 64601+#else
6892158b
MT
64602+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
64603+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
df50ba0c
MT
64604+ clone_pgd_mask = ~(pgdval_t)0UL;
64605+#endif
58c5fc13
MT
64606+
64607+ return 0;
64608+}
64609+early_param("pax_nouderef", setup_pax_nouderef);
64610+#endif
64611+
64612+#ifdef CONFIG_PAX_SOFTMODE
15a11c5b 64613+int pax_softmode;
58c5fc13
MT
64614+
64615+static int __init setup_pax_softmode(char *str)
64616+{
64617+ get_option(&str, &pax_softmode);
64618+ return 1;
64619+}
64620+__setup("pax_softmode=", setup_pax_softmode);
64621+#endif
64622+
6892158b
MT
64623 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
64624 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
58c5fc13 64625 static const char *panic_later, *panic_param;
fe2de317 64626@@ -678,6 +723,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
58c5fc13
MT
64627 {
64628 int count = preempt_count();
6892158b 64629 int ret;
58c5fc13
MT
64630+ const char *msg1 = "", *msg2 = "";
64631
6892158b
MT
64632 if (initcall_debug)
64633 ret = do_one_initcall_debug(fn);
fe2de317 64634@@ -690,15 +736,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
6892158b 64635 sprintf(msgbuf, "error code %d ", ret);
58c5fc13
MT
64636
64637 if (preempt_count() != count) {
64638- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
64639+ msg1 = " preemption imbalance";
64640 preempt_count() = count;
64641 }
64642 if (irqs_disabled()) {
64643- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
64644+ msg2 = " disabled interrupts";
64645 local_irq_enable();
64646 }
64647- if (msgbuf[0]) {
64648- printk("initcall %pF returned with %s\n", fn, msgbuf);
64649+ if (msgbuf[0] || *msg1 || *msg2) {
64650+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
64651 }
64652
6892158b 64653 return ret;
fe2de317 64654@@ -817,7 +863,7 @@ static int __init kernel_init(void * unused)
df50ba0c
MT
64655 do_basic_setup();
64656
64657 /* Open the /dev/console on the rootfs, this should never fail */
64658- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
6e9df6a3 64659+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
df50ba0c
MT
64660 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
64661
64662 (void) sys_dup(0);
fe2de317 64663@@ -830,11 +876,13 @@ static int __init kernel_init(void * unused)
ae4e228f
MT
64664 if (!ramdisk_execute_command)
64665 ramdisk_execute_command = "/init";
64666
64667- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
6e9df6a3 64668+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
ae4e228f 64669 ramdisk_execute_command = NULL;
58c5fc13
MT
64670 prepare_namespace();
64671 }
64672
64673+ grsecurity_init();
64674+
64675 /*
64676 * Ok, we have completed the initial bootup, and
64677 * we're essentially up and running. Get rid of the
fe2de317
MT
64678diff --git a/ipc/mqueue.c b/ipc/mqueue.c
64679index ed049ea..6442f7f 100644
64680--- a/ipc/mqueue.c
64681+++ b/ipc/mqueue.c
64682@@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
6e9df6a3
MT
64683 mq_bytes = (mq_msg_tblsz +
64684 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
64685
64686+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
64687 spin_lock(&mq_lock);
64688 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
64689 u->mq_bytes + mq_bytes > task_rlimit(p, RLIMIT_MSGQUEUE)) {
fe2de317
MT
64690diff --git a/ipc/msg.c b/ipc/msg.c
64691index 7385de2..a8180e0 100644
64692--- a/ipc/msg.c
64693+++ b/ipc/msg.c
64694@@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
15a11c5b
MT
64695 return security_msg_queue_associate(msq, msgflg);
64696 }
64697
64698+static struct ipc_ops msg_ops = {
64699+ .getnew = newque,
64700+ .associate = msg_security,
64701+ .more_checks = NULL
64702+};
64703+
64704 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
64705 {
64706 struct ipc_namespace *ns;
64707- struct ipc_ops msg_ops;
64708 struct ipc_params msg_params;
64709
64710 ns = current->nsproxy->ipc_ns;
64711
64712- msg_ops.getnew = newque;
64713- msg_ops.associate = msg_security;
64714- msg_ops.more_checks = NULL;
64715-
64716 msg_params.key = key;
64717 msg_params.flg = msgflg;
64718
fe2de317
MT
64719diff --git a/ipc/sem.c b/ipc/sem.c
64720index c8e00f8..1135c4e 100644
64721--- a/ipc/sem.c
64722+++ b/ipc/sem.c
64723@@ -318,10 +318,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
15a11c5b
MT
64724 return 0;
64725 }
64726
64727+static struct ipc_ops sem_ops = {
64728+ .getnew = newary,
64729+ .associate = sem_security,
64730+ .more_checks = sem_more_checks
64731+};
64732+
64733 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
64734 {
64735 struct ipc_namespace *ns;
64736- struct ipc_ops sem_ops;
64737 struct ipc_params sem_params;
64738
64739 ns = current->nsproxy->ipc_ns;
fe2de317 64740@@ -329,10 +334,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
15a11c5b
MT
64741 if (nsems < 0 || nsems > ns->sc_semmsl)
64742 return -EINVAL;
64743
64744- sem_ops.getnew = newary;
64745- sem_ops.associate = sem_security;
64746- sem_ops.more_checks = sem_more_checks;
64747-
64748 sem_params.key = key;
64749 sem_params.flg = semflg;
64750 sem_params.u.nsems = nsems;
fe2de317 64751@@ -848,6 +849,8 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
66a7e928
MT
64752 int nsems;
64753 struct list_head tasks;
64754
64755+ pax_track_stack();
64756+
64757 sma = sem_lock_check(ns, semid);
64758 if (IS_ERR(sma))
64759 return PTR_ERR(sma);
fe2de317 64760@@ -1295,6 +1298,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
66a7e928
MT
64761 struct ipc_namespace *ns;
64762 struct list_head tasks;
64763
64764+ pax_track_stack();
64765+
64766 ns = current->nsproxy->ipc_ns;
64767
64768 if (nsops < 1 || semid < 0)
fe2de317
MT
64769diff --git a/ipc/shm.c b/ipc/shm.c
64770index 02ecf2c..c8f5627 100644
64771--- a/ipc/shm.c
64772+++ b/ipc/shm.c
64773@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
58c5fc13
MT
64774 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
64775 #endif
64776
64777+#ifdef CONFIG_GRKERNSEC
64778+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
64779+ const time_t shm_createtime, const uid_t cuid,
64780+ const int shmid);
64781+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
64782+ const time_t shm_createtime);
64783+#endif
64784+
64785 void shm_init_ns(struct ipc_namespace *ns)
64786 {
64787 ns->shm_ctlmax = SHMMAX;
fe2de317 64788@@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
58c5fc13
MT
64789 shp->shm_lprid = 0;
64790 shp->shm_atim = shp->shm_dtim = 0;
64791 shp->shm_ctim = get_seconds();
64792+#ifdef CONFIG_GRKERNSEC
64793+ {
64794+ struct timespec timeval;
64795+ do_posix_clock_monotonic_gettime(&timeval);
64796+
64797+ shp->shm_createtime = timeval.tv_sec;
64798+ }
64799+#endif
64800 shp->shm_segsz = size;
64801 shp->shm_nattch = 0;
64802 shp->shm_file = file;
fe2de317 64803@@ -559,18 +575,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
15a11c5b
MT
64804 return 0;
64805 }
64806
64807+static struct ipc_ops shm_ops = {
64808+ .getnew = newseg,
64809+ .associate = shm_security,
64810+ .more_checks = shm_more_checks
64811+};
64812+
64813 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
64814 {
64815 struct ipc_namespace *ns;
64816- struct ipc_ops shm_ops;
64817 struct ipc_params shm_params;
64818
64819 ns = current->nsproxy->ipc_ns;
64820
64821- shm_ops.getnew = newseg;
64822- shm_ops.associate = shm_security;
64823- shm_ops.more_checks = shm_more_checks;
64824-
64825 shm_params.key = key;
64826 shm_params.flg = shmflg;
64827 shm_params.u.size = size;
fe2de317 64828@@ -870,8 +887,6 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
16454cff
MT
64829 case SHM_LOCK:
64830 case SHM_UNLOCK:
64831 {
64832- struct file *uninitialized_var(shm_file);
64833-
64834 lru_add_drain_all(); /* drain pagevecs to lru lists */
64835
64836 shp = shm_lock_check(ns, shmid);
fe2de317 64837@@ -1004,9 +1019,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
58c5fc13
MT
64838 if (err)
64839 goto out_unlock;
64840
64841+#ifdef CONFIG_GRKERNSEC
64842+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
64843+ shp->shm_perm.cuid, shmid) ||
64844+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
64845+ err = -EACCES;
64846+ goto out_unlock;
64847+ }
64848+#endif
64849+
ae4e228f
MT
64850 path = shp->shm_file->f_path;
64851 path_get(&path);
58c5fc13
MT
64852 shp->shm_nattch++;
64853+#ifdef CONFIG_GRKERNSEC
64854+ shp->shm_lapid = current->pid;
64855+#endif
64856 size = i_size_read(path.dentry->d_inode);
64857 shm_unlock(shp);
64858
fe2de317
MT
64859diff --git a/kernel/acct.c b/kernel/acct.c
64860index fa7eb3d..7faf116 100644
64861--- a/kernel/acct.c
64862+++ b/kernel/acct.c
64863@@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
58c5fc13
MT
64864 */
64865 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
64866 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
64867- file->f_op->write(file, (char *)&ac,
6e9df6a3 64868+ file->f_op->write(file, (char __force_user *)&ac,
58c5fc13
MT
64869 sizeof(acct_t), &file->f_pos);
64870 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
64871 set_fs(fs);
fe2de317
MT
64872diff --git a/kernel/audit.c b/kernel/audit.c
64873index 0a1355c..dca420f 100644
64874--- a/kernel/audit.c
64875+++ b/kernel/audit.c
6e9df6a3 64876@@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
8308f9c9
MT
64877 3) suppressed due to audit_rate_limit
64878 4) suppressed due to audit_backlog_limit
64879 */
64880-static atomic_t audit_lost = ATOMIC_INIT(0);
64881+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
64882
64883 /* The netlink socket. */
64884 static struct sock *audit_sock;
6e9df6a3 64885@@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
8308f9c9
MT
64886 unsigned long now;
64887 int print;
64888
64889- atomic_inc(&audit_lost);
64890+ atomic_inc_unchecked(&audit_lost);
64891
64892 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
64893
6e9df6a3 64894@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
8308f9c9
MT
64895 printk(KERN_WARNING
64896 "audit: audit_lost=%d audit_rate_limit=%d "
64897 "audit_backlog_limit=%d\n",
64898- atomic_read(&audit_lost),
64899+ atomic_read_unchecked(&audit_lost),
64900 audit_rate_limit,
64901 audit_backlog_limit);
64902 audit_panic(message);
fe2de317 64903@@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
8308f9c9
MT
64904 status_set.pid = audit_pid;
64905 status_set.rate_limit = audit_rate_limit;
64906 status_set.backlog_limit = audit_backlog_limit;
64907- status_set.lost = atomic_read(&audit_lost);
64908+ status_set.lost = atomic_read_unchecked(&audit_lost);
64909 status_set.backlog = skb_queue_len(&audit_skb_queue);
64910 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
64911 &status_set, sizeof(status_set));
fe2de317
MT
64912diff --git a/kernel/auditsc.c b/kernel/auditsc.c
64913index ce4b054..8139ed7 100644
64914--- a/kernel/auditsc.c
64915+++ b/kernel/auditsc.c
64916@@ -2118,7 +2118,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
8308f9c9
MT
64917 }
64918
64919 /* global counter which is incremented every time something logs in */
64920-static atomic_t session_id = ATOMIC_INIT(0);
64921+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
64922
64923 /**
64924 * audit_set_loginuid - set a task's audit_context loginuid
fe2de317 64925@@ -2131,7 +2131,7 @@ static atomic_t session_id = ATOMIC_INIT(0);
8308f9c9
MT
64926 */
64927 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
64928 {
64929- unsigned int sessionid = atomic_inc_return(&session_id);
64930+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
64931 struct audit_context *context = task->audit_context;
64932
64933 if (context && context->in_syscall) {
fe2de317
MT
64934diff --git a/kernel/capability.c b/kernel/capability.c
64935index 283c529..36ac81e 100644
64936--- a/kernel/capability.c
64937+++ b/kernel/capability.c
64938@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
ae4e228f
MT
64939 * before modification is attempted and the application
64940 * fails.
64941 */
64942+ if (tocopy > ARRAY_SIZE(kdata))
64943+ return -EFAULT;
64944+
64945 if (copy_to_user(dataptr, kdata, tocopy
64946 * sizeof(struct __user_cap_data_struct))) {
64947 return -EFAULT;
fe2de317 64948@@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
58c5fc13
MT
64949 BUG();
64950 }
64951
66a7e928
MT
64952- if (security_capable(ns, current_cred(), cap) == 0) {
64953+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
ae4e228f 64954 current->flags |= PF_SUPERPRIV;
66a7e928 64955 return true;
ae4e228f 64956 }
fe2de317 64957@@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *ns, int cap)
ae4e228f 64958 }
66a7e928
MT
64959 EXPORT_SYMBOL(ns_capable);
64960
64961+bool ns_capable_nolog(struct user_namespace *ns, int cap)
bc901d79
MT
64962+{
64963+ if (unlikely(!cap_valid(cap))) {
64964+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
64965+ BUG();
64966+ }
64967+
66a7e928 64968+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
bc901d79 64969+ current->flags |= PF_SUPERPRIV;
66a7e928 64970+ return true;
bc901d79 64971+ }
66a7e928 64972+ return false;
bc901d79 64973+}
66a7e928 64974+EXPORT_SYMBOL(ns_capable_nolog);
58c5fc13 64975+
66a7e928
MT
64976+bool capable_nolog(int cap)
64977+{
64978+ return ns_capable_nolog(&init_user_ns, cap);
64979+}
58c5fc13 64980+EXPORT_SYMBOL(capable_nolog);
66a7e928
MT
64981+
64982 /**
64983 * task_ns_capable - Determine whether current task has a superior
64984 * capability targeted at a specific task's user namespace.
fe2de317 64985@@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct *t, int cap)
66a7e928
MT
64986 }
64987 EXPORT_SYMBOL(task_ns_capable);
64988
64989+bool task_ns_capable_nolog(struct task_struct *t, int cap)
64990+{
64991+ return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
64992+}
64993+EXPORT_SYMBOL(task_ns_capable_nolog);
64994+
64995 /**
64996 * nsown_capable - Check superior capability to one's own user_ns
64997 * @cap: The capability in question
fe2de317
MT
64998diff --git a/kernel/cgroup.c b/kernel/cgroup.c
64999index 1d2b6ce..87bf267 100644
65000--- a/kernel/cgroup.c
65001+++ b/kernel/cgroup.c
6e9df6a3 65002@@ -595,6 +595,8 @@ static struct css_set *find_css_set(
66a7e928
MT
65003 struct hlist_head *hhead;
65004 struct cg_cgroup_link *link;
65005
65006+ pax_track_stack();
65007+
65008 /* First see if we already have a cgroup group that matches
65009 * the desired set */
65010 read_lock(&css_set_lock);
fe2de317
MT
65011diff --git a/kernel/compat.c b/kernel/compat.c
65012index e2435ee..8e82199 100644
65013--- a/kernel/compat.c
65014+++ b/kernel/compat.c
57199397
MT
65015@@ -13,6 +13,7 @@
65016
65017 #include <linux/linkage.h>
65018 #include <linux/compat.h>
65019+#include <linux/module.h>
65020 #include <linux/errno.h>
65021 #include <linux/time.h>
65022 #include <linux/signal.h>
fe2de317 65023@@ -167,7 +168,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
6e9df6a3
MT
65024 mm_segment_t oldfs;
65025 long ret;
65026
65027- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
65028+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
65029 oldfs = get_fs();
65030 set_fs(KERNEL_DS);
65031 ret = hrtimer_nanosleep_restart(restart);
fe2de317 65032@@ -199,7 +200,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
6e9df6a3
MT
65033 oldfs = get_fs();
65034 set_fs(KERNEL_DS);
65035 ret = hrtimer_nanosleep(&tu,
65036- rmtp ? (struct timespec __user *)&rmt : NULL,
65037+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
65038 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
65039 set_fs(oldfs);
65040
fe2de317 65041@@ -308,7 +309,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
6e9df6a3
MT
65042 mm_segment_t old_fs = get_fs();
65043
65044 set_fs(KERNEL_DS);
65045- ret = sys_sigpending((old_sigset_t __user *) &s);
65046+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
65047 set_fs(old_fs);
65048 if (ret == 0)
65049 ret = put_user(s, set);
fe2de317 65050@@ -331,8 +332,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
6e9df6a3
MT
65051 old_fs = get_fs();
65052 set_fs(KERNEL_DS);
65053 ret = sys_sigprocmask(how,
65054- set ? (old_sigset_t __user *) &s : NULL,
65055- oset ? (old_sigset_t __user *) &s : NULL);
65056+ set ? (old_sigset_t __force_user *) &s : NULL,
65057+ oset ? (old_sigset_t __force_user *) &s : NULL);
65058 set_fs(old_fs);
65059 if (ret == 0)
65060 if (oset)
fe2de317 65061@@ -369,7 +370,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
6e9df6a3
MT
65062 mm_segment_t old_fs = get_fs();
65063
65064 set_fs(KERNEL_DS);
65065- ret = sys_old_getrlimit(resource, &r);
65066+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
65067 set_fs(old_fs);
65068
65069 if (!ret) {
fe2de317 65070@@ -441,7 +442,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
6e9df6a3
MT
65071 mm_segment_t old_fs = get_fs();
65072
65073 set_fs(KERNEL_DS);
65074- ret = sys_getrusage(who, (struct rusage __user *) &r);
65075+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
65076 set_fs(old_fs);
65077
65078 if (ret)
fe2de317 65079@@ -468,8 +469,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
6e9df6a3
MT
65080 set_fs (KERNEL_DS);
65081 ret = sys_wait4(pid,
65082 (stat_addr ?
65083- (unsigned int __user *) &status : NULL),
65084- options, (struct rusage __user *) &r);
65085+ (unsigned int __force_user *) &status : NULL),
65086+ options, (struct rusage __force_user *) &r);
65087 set_fs (old_fs);
65088
65089 if (ret > 0) {
fe2de317 65090@@ -494,8 +495,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
6e9df6a3
MT
65091 memset(&info, 0, sizeof(info));
65092
65093 set_fs(KERNEL_DS);
65094- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
65095- uru ? (struct rusage __user *)&ru : NULL);
65096+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
65097+ uru ? (struct rusage __force_user *)&ru : NULL);
65098 set_fs(old_fs);
65099
65100 if ((ret < 0) || (info.si_signo == 0))
fe2de317 65101@@ -625,8 +626,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
6e9df6a3
MT
65102 oldfs = get_fs();
65103 set_fs(KERNEL_DS);
65104 err = sys_timer_settime(timer_id, flags,
65105- (struct itimerspec __user *) &newts,
65106- (struct itimerspec __user *) &oldts);
65107+ (struct itimerspec __force_user *) &newts,
65108+ (struct itimerspec __force_user *) &oldts);
65109 set_fs(oldfs);
65110 if (!err && old && put_compat_itimerspec(old, &oldts))
65111 return -EFAULT;
fe2de317 65112@@ -643,7 +644,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
6e9df6a3
MT
65113 oldfs = get_fs();
65114 set_fs(KERNEL_DS);
65115 err = sys_timer_gettime(timer_id,
65116- (struct itimerspec __user *) &ts);
65117+ (struct itimerspec __force_user *) &ts);
65118 set_fs(oldfs);
65119 if (!err && put_compat_itimerspec(setting, &ts))
65120 return -EFAULT;
fe2de317 65121@@ -662,7 +663,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
6e9df6a3
MT
65122 oldfs = get_fs();
65123 set_fs(KERNEL_DS);
65124 err = sys_clock_settime(which_clock,
65125- (struct timespec __user *) &ts);
65126+ (struct timespec __force_user *) &ts);
65127 set_fs(oldfs);
65128 return err;
65129 }
fe2de317 65130@@ -677,7 +678,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
6e9df6a3
MT
65131 oldfs = get_fs();
65132 set_fs(KERNEL_DS);
65133 err = sys_clock_gettime(which_clock,
65134- (struct timespec __user *) &ts);
65135+ (struct timespec __force_user *) &ts);
65136 set_fs(oldfs);
65137 if (!err && put_compat_timespec(&ts, tp))
65138 return -EFAULT;
fe2de317 65139@@ -697,7 +698,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
6e9df6a3
MT
65140
65141 oldfs = get_fs();
65142 set_fs(KERNEL_DS);
65143- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
65144+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
65145 set_fs(oldfs);
65146
65147 err = compat_put_timex(utp, &txc);
fe2de317 65148@@ -717,7 +718,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
6e9df6a3
MT
65149 oldfs = get_fs();
65150 set_fs(KERNEL_DS);
65151 err = sys_clock_getres(which_clock,
65152- (struct timespec __user *) &ts);
65153+ (struct timespec __force_user *) &ts);
65154 set_fs(oldfs);
65155 if (!err && tp && put_compat_timespec(&ts, tp))
65156 return -EFAULT;
fe2de317 65157@@ -729,9 +730,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
6e9df6a3
MT
65158 long err;
65159 mm_segment_t oldfs;
65160 struct timespec tu;
65161- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
65162+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
65163
65164- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
65165+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
65166 oldfs = get_fs();
65167 set_fs(KERNEL_DS);
65168 err = clock_nanosleep_restart(restart);
fe2de317 65169@@ -763,8 +764,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
6e9df6a3
MT
65170 oldfs = get_fs();
65171 set_fs(KERNEL_DS);
65172 err = sys_clock_nanosleep(which_clock, flags,
65173- (struct timespec __user *) &in,
65174- (struct timespec __user *) &out);
65175+ (struct timespec __force_user *) &in,
65176+ (struct timespec __force_user *) &out);
65177 set_fs(oldfs);
65178
65179 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
fe2de317
MT
65180diff --git a/kernel/configs.c b/kernel/configs.c
65181index 42e8fa0..9e7406b 100644
65182--- a/kernel/configs.c
65183+++ b/kernel/configs.c
bc901d79 65184@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
58c5fc13
MT
65185 struct proc_dir_entry *entry;
65186
65187 /* create the current config file */
65188+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
65189+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
65190+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
65191+ &ikconfig_file_ops);
65192+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65193+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
65194+ &ikconfig_file_ops);
65195+#endif
65196+#else
65197 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
65198 &ikconfig_file_ops);
65199+#endif
65200+
65201 if (!entry)
65202 return -ENOMEM;
65203
fe2de317
MT
65204diff --git a/kernel/cred.c b/kernel/cred.c
65205index 8ef31f5..f63d997 100644
65206--- a/kernel/cred.c
65207+++ b/kernel/cred.c
65208@@ -158,6 +158,8 @@ static void put_cred_rcu(struct rcu_head *rcu)
66a7e928
MT
65209 */
65210 void __put_cred(struct cred *cred)
65211 {
65212+ pax_track_stack();
65213+
65214 kdebug("__put_cred(%p{%d,%d})", cred,
65215 atomic_read(&cred->usage),
65216 read_cred_subscribers(cred));
65217@@ -182,6 +184,8 @@ void exit_creds(struct task_struct *tsk)
65218 {
65219 struct cred *cred;
65220
65221+ pax_track_stack();
65222+
65223 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
65224 atomic_read(&tsk->cred->usage),
65225 read_cred_subscribers(tsk->cred));
fe2de317 65226@@ -220,6 +224,8 @@ const struct cred *get_task_cred(struct task_struct *task)
66a7e928
MT
65227 {
65228 const struct cred *cred;
65229
65230+ pax_track_stack();
65231+
65232 rcu_read_lock();
65233
65234 do {
65235@@ -239,6 +245,8 @@ struct cred *cred_alloc_blank(void)
65236 {
65237 struct cred *new;
65238
65239+ pax_track_stack();
65240+
65241 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
65242 if (!new)
65243 return NULL;
65244@@ -287,6 +295,8 @@ struct cred *prepare_creds(void)
65245 const struct cred *old;
65246 struct cred *new;
65247
65248+ pax_track_stack();
65249+
65250 validate_process_creds();
65251
65252 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
65253@@ -333,6 +343,8 @@ struct cred *prepare_exec_creds(void)
65254 struct thread_group_cred *tgcred = NULL;
65255 struct cred *new;
65256
65257+ pax_track_stack();
65258+
65259 #ifdef CONFIG_KEYS
65260 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
65261 if (!tgcred)
fe2de317 65262@@ -385,6 +397,8 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
66a7e928
MT
65263 struct cred *new;
65264 int ret;
65265
65266+ pax_track_stack();
65267+
65268 if (
65269 #ifdef CONFIG_KEYS
65270 !p->cred->thread_keyring &&
65271@@ -475,6 +489,8 @@ int commit_creds(struct cred *new)
65272 struct task_struct *task = current;
65273 const struct cred *old = task->real_cred;
65274
65275+ pax_track_stack();
65276+
65277 kdebug("commit_creds(%p{%d,%d})", new,
65278 atomic_read(&new->usage),
65279 read_cred_subscribers(new));
65280@@ -489,6 +505,8 @@ int commit_creds(struct cred *new)
58c5fc13
MT
65281
65282 get_cred(new); /* we will require a ref for the subj creds too */
65283
65284+ gr_set_role_label(task, new->uid, new->gid);
65285+
65286 /* dumpability changes */
65287 if (old->euid != new->euid ||
65288 old->egid != new->egid ||
6e9df6a3 65289@@ -549,6 +567,8 @@ EXPORT_SYMBOL(commit_creds);
66a7e928
MT
65290 */
65291 void abort_creds(struct cred *new)
65292 {
65293+ pax_track_stack();
65294+
65295 kdebug("abort_creds(%p{%d,%d})", new,
65296 atomic_read(&new->usage),
65297 read_cred_subscribers(new));
fe2de317 65298@@ -572,6 +592,8 @@ const struct cred *override_creds(const struct cred *new)
66a7e928
MT
65299 {
65300 const struct cred *old = current->cred;
65301
65302+ pax_track_stack();
65303+
65304 kdebug("override_creds(%p{%d,%d})", new,
65305 atomic_read(&new->usage),
65306 read_cred_subscribers(new));
fe2de317 65307@@ -601,6 +623,8 @@ void revert_creds(const struct cred *old)
66a7e928
MT
65308 {
65309 const struct cred *override = current->cred;
65310
65311+ pax_track_stack();
65312+
65313 kdebug("revert_creds(%p{%d,%d})", old,
65314 atomic_read(&old->usage),
65315 read_cred_subscribers(old));
fe2de317 65316@@ -647,6 +671,8 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
66a7e928
MT
65317 const struct cred *old;
65318 struct cred *new;
65319
65320+ pax_track_stack();
65321+
65322 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
65323 if (!new)
65324 return NULL;
6e9df6a3 65325@@ -701,6 +727,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
66a7e928
MT
65326 */
65327 int set_security_override(struct cred *new, u32 secid)
65328 {
65329+ pax_track_stack();
65330+
65331 return security_kernel_act_as(new, secid);
65332 }
65333 EXPORT_SYMBOL(set_security_override);
fe2de317 65334@@ -720,6 +748,8 @@ int set_security_override_from_ctx(struct cred *new, const char *secctx)
66a7e928
MT
65335 u32 secid;
65336 int ret;
65337
65338+ pax_track_stack();
65339+
65340 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
65341 if (ret < 0)
65342 return ret;
fe2de317
MT
65343diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
65344index 0d7c087..01b8cef 100644
65345--- a/kernel/debug/debug_core.c
65346+++ b/kernel/debug/debug_core.c
65347@@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
8308f9c9
MT
65348 */
65349 static atomic_t masters_in_kgdb;
65350 static atomic_t slaves_in_kgdb;
65351-static atomic_t kgdb_break_tasklet_var;
65352+static atomic_unchecked_t kgdb_break_tasklet_var;
65353 atomic_t kgdb_setting_breakpoint;
65354
65355 struct task_struct *kgdb_usethread;
65356@@ -129,7 +129,7 @@ int kgdb_single_step;
65357 static pid_t kgdb_sstep_pid;
65358
65359 /* to keep track of the CPU which is doing the single stepping*/
65360-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
65361+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
65362
65363 /*
65364 * If you are debugging a problem where roundup (the collection of
65365@@ -542,7 +542,7 @@ return_normal:
65366 * kernel will only try for the value of sstep_tries before
65367 * giving up and continuing on.
65368 */
65369- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
65370+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
65371 (kgdb_info[cpu].task &&
65372 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
65373 atomic_set(&kgdb_active, -1);
65374@@ -636,8 +636,8 @@ cpu_master_loop:
65375 }
65376
65377 kgdb_restore:
65378- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
65379- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
65380+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
65381+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
65382 if (kgdb_info[sstep_cpu].task)
65383 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
65384 else
fe2de317 65385@@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(void)
8308f9c9
MT
65386 static void kgdb_tasklet_bpt(unsigned long ing)
65387 {
65388 kgdb_breakpoint();
65389- atomic_set(&kgdb_break_tasklet_var, 0);
65390+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
65391 }
65392
65393 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
65394
65395 void kgdb_schedule_breakpoint(void)
65396 {
65397- if (atomic_read(&kgdb_break_tasklet_var) ||
65398+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
65399 atomic_read(&kgdb_active) != -1 ||
65400 atomic_read(&kgdb_setting_breakpoint))
65401 return;
65402- atomic_inc(&kgdb_break_tasklet_var);
65403+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
65404 tasklet_schedule(&kgdb_tasklet_breakpoint);
65405 }
65406 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
fe2de317
MT
65407diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
65408index 63786e7..0780cac 100644
65409--- a/kernel/debug/kdb/kdb_main.c
65410+++ b/kernel/debug/kdb/kdb_main.c
65411@@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const char **argv)
57199397
MT
65412 list_for_each_entry(mod, kdb_modules, list) {
65413
65414 kdb_printf("%-20s%8u 0x%p ", mod->name,
65415- mod->core_size, (void *)mod);
65416+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
65417 #ifdef CONFIG_MODULE_UNLOAD
65418 kdb_printf("%4d ", module_refcount(mod));
65419 #endif
fe2de317 65420@@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const char **argv)
57199397
MT
65421 kdb_printf(" (Loading)");
65422 else
65423 kdb_printf(" (Live)");
65424- kdb_printf(" 0x%p", mod->module_core);
65425+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
65426
65427 #ifdef CONFIG_MODULE_UNLOAD
65428 {
fe2de317
MT
65429diff --git a/kernel/events/core.c b/kernel/events/core.c
65430index 0f85778..0d43716 100644
65431--- a/kernel/events/core.c
65432+++ b/kernel/events/core.c
65433@@ -172,7 +172,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
15a11c5b
MT
65434 return 0;
65435 }
65436
65437-static atomic64_t perf_event_id;
65438+static atomic64_unchecked_t perf_event_id;
65439
65440 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
65441 enum event_type_t event_type);
fe2de317 65442@@ -2535,7 +2535,7 @@ static void __perf_event_read(void *info)
15a11c5b
MT
65443
65444 static inline u64 perf_event_count(struct perf_event *event)
65445 {
65446- return local64_read(&event->count) + atomic64_read(&event->child_count);
65447+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
65448 }
65449
65450 static u64 perf_event_read(struct perf_event *event)
fe2de317 65451@@ -3060,9 +3060,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
15a11c5b
MT
65452 mutex_lock(&event->child_mutex);
65453 total += perf_event_read(event);
65454 *enabled += event->total_time_enabled +
65455- atomic64_read(&event->child_total_time_enabled);
65456+ atomic64_read_unchecked(&event->child_total_time_enabled);
65457 *running += event->total_time_running +
65458- atomic64_read(&event->child_total_time_running);
65459+ atomic64_read_unchecked(&event->child_total_time_running);
65460
65461 list_for_each_entry(child, &event->child_list, child_list) {
65462 total += perf_event_read(child);
fe2de317 65463@@ -3448,10 +3448,10 @@ void perf_event_update_userpage(struct perf_event *event)
15a11c5b
MT
65464 userpg->offset -= local64_read(&event->hw.prev_count);
65465
6e9df6a3 65466 userpg->time_enabled = enabled +
15a11c5b
MT
65467- atomic64_read(&event->child_total_time_enabled);
65468+ atomic64_read_unchecked(&event->child_total_time_enabled);
65469
6e9df6a3 65470 userpg->time_running = running +
15a11c5b
MT
65471- atomic64_read(&event->child_total_time_running);
65472+ atomic64_read_unchecked(&event->child_total_time_running);
65473
65474 barrier();
65475 ++userpg->lock;
fe2de317 65476@@ -3822,11 +3822,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
15a11c5b
MT
65477 values[n++] = perf_event_count(event);
65478 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
65479 values[n++] = enabled +
65480- atomic64_read(&event->child_total_time_enabled);
65481+ atomic64_read_unchecked(&event->child_total_time_enabled);
65482 }
65483 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
65484 values[n++] = running +
65485- atomic64_read(&event->child_total_time_running);
65486+ atomic64_read_unchecked(&event->child_total_time_running);
65487 }
65488 if (read_format & PERF_FORMAT_ID)
65489 values[n++] = primary_event_id(event);
fe2de317 65490@@ -4477,12 +4477,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
15a11c5b
MT
65491 * need to add enough zero bytes after the string to handle
65492 * the 64bit alignment we do later.
65493 */
65494- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
65495+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
65496 if (!buf) {
65497 name = strncpy(tmp, "//enomem", sizeof(tmp));
65498 goto got_name;
65499 }
65500- name = d_path(&file->f_path, buf, PATH_MAX);
65501+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
65502 if (IS_ERR(name)) {
65503 name = strncpy(tmp, "//toolong", sizeof(tmp));
65504 goto got_name;
fe2de317 65505@@ -5833,7 +5833,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
15a11c5b
MT
65506 event->parent = parent_event;
65507
65508 event->ns = get_pid_ns(current->nsproxy->pid_ns);
65509- event->id = atomic64_inc_return(&perf_event_id);
65510+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
65511
65512 event->state = PERF_EVENT_STATE_INACTIVE;
65513
fe2de317 65514@@ -6355,10 +6355,10 @@ static void sync_child_event(struct perf_event *child_event,
15a11c5b
MT
65515 /*
65516 * Add back the child's count to the parent's count:
65517 */
65518- atomic64_add(child_val, &parent_event->child_count);
65519- atomic64_add(child_event->total_time_enabled,
65520+ atomic64_add_unchecked(child_val, &parent_event->child_count);
65521+ atomic64_add_unchecked(child_event->total_time_enabled,
65522 &parent_event->child_total_time_enabled);
65523- atomic64_add(child_event->total_time_running,
65524+ atomic64_add_unchecked(child_event->total_time_running,
65525 &parent_event->child_total_time_running);
65526
65527 /*
fe2de317
MT
65528diff --git a/kernel/exit.c b/kernel/exit.c
65529index 2913b35..4465c81 100644
65530--- a/kernel/exit.c
65531+++ b/kernel/exit.c
bc901d79 65532@@ -57,6 +57,10 @@
57199397 65533 #include <asm/pgtable.h>
58c5fc13 65534 #include <asm/mmu_context.h>
58c5fc13
MT
65535
65536+#ifdef CONFIG_GRKERNSEC
65537+extern rwlock_t grsec_exec_file_lock;
65538+#endif
65539+
65540 static void exit_mm(struct task_struct * tsk);
65541
57199397 65542 static void __unhash_process(struct task_struct *p, bool group_dead)
fe2de317 65543@@ -168,6 +172,10 @@ void release_task(struct task_struct * p)
58c5fc13
MT
65544 struct task_struct *leader;
65545 int zap_leader;
65546 repeat:
15a11c5b 65547+#ifdef CONFIG_NET
58c5fc13 65548+ gr_del_task_from_ip_table(p);
15a11c5b 65549+#endif
58c5fc13 65550+
58c5fc13 65551 /* don't need to get the RCU readlock here - the process is dead and
df50ba0c 65552 * can't be modifying its own credentials. But shut RCU-lockdep up */
6e9df6a3 65553 rcu_read_lock();
fe2de317 65554@@ -380,7 +388,7 @@ int allow_signal(int sig)
ae4e228f
MT
65555 * know it'll be handled, so that they don't get converted to
65556 * SIGKILL or just silently dropped.
65557 */
65558- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
65559+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
65560 recalc_sigpending();
65561 spin_unlock_irq(&current->sighand->siglock);
65562 return 0;
fe2de317 65563@@ -416,6 +424,17 @@ void daemonize(const char *name, ...)
58c5fc13
MT
65564 vsnprintf(current->comm, sizeof(current->comm), name, args);
65565 va_end(args);
65566
65567+#ifdef CONFIG_GRKERNSEC
65568+ write_lock(&grsec_exec_file_lock);
65569+ if (current->exec_file) {
65570+ fput(current->exec_file);
65571+ current->exec_file = NULL;
65572+ }
65573+ write_unlock(&grsec_exec_file_lock);
65574+#endif
65575+
65576+ gr_set_kernel_label(current);
65577+
65578 /*
65579 * If we were started as result of loading a module, close all of the
65580 * user space pages. We don't need them, and if we didn't close them
fe2de317 65581@@ -895,6 +914,8 @@ NORET_TYPE void do_exit(long code)
bc901d79
MT
65582 struct task_struct *tsk = current;
65583 int group_dead;
65584
6e9df6a3
MT
65585+ set_fs(USER_DS);
65586+
65587 profile_task_exit(tsk);
bc901d79 65588
6e9df6a3 65589 WARN_ON(blk_needs_flush_plug(tsk));
fe2de317 65590@@ -911,7 +932,6 @@ NORET_TYPE void do_exit(long code)
6e9df6a3
MT
65591 * mm_release()->clear_child_tid() from writing to a user-controlled
65592 * kernel address.
bc901d79 65593 */
6e9df6a3 65594- set_fs(USER_DS);
bc901d79 65595
6e9df6a3 65596 ptrace_event(PTRACE_EVENT_EXIT, code);
bc901d79 65597
fe2de317 65598@@ -973,6 +993,9 @@ NORET_TYPE void do_exit(long code)
58c5fc13
MT
65599 tsk->exit_code = code;
65600 taskstats_exit(tsk, group_dead);
65601
65602+ gr_acl_handle_psacct(tsk, code);
65603+ gr_acl_handle_exit();
65604+
65605 exit_mm(tsk);
65606
65607 if (group_dead)
fe2de317
MT
65608diff --git a/kernel/fork.c b/kernel/fork.c
65609index 8e6b6f4..9dccf00 100644
65610--- a/kernel/fork.c
65611+++ b/kernel/fork.c
65612@@ -285,7 +285,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
58c5fc13
MT
65613 *stackend = STACK_END_MAGIC; /* for overflow detection */
65614
65615 #ifdef CONFIG_CC_STACKPROTECTOR
65616- tsk->stack_canary = get_random_int();
65617+ tsk->stack_canary = pax_get_random_long();
65618 #endif
65619
6e9df6a3
MT
65620 /*
65621@@ -309,13 +309,77 @@ out:
57199397
MT
65622 }
65623
65624 #ifdef CONFIG_MMU
65625+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
65626+{
65627+ struct vm_area_struct *tmp;
65628+ unsigned long charge;
65629+ struct mempolicy *pol;
65630+ struct file *file;
65631+
65632+ charge = 0;
65633+ if (mpnt->vm_flags & VM_ACCOUNT) {
65634+ unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
65635+ if (security_vm_enough_memory(len))
65636+ goto fail_nomem;
65637+ charge = len;
65638+ }
65639+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
65640+ if (!tmp)
65641+ goto fail_nomem;
65642+ *tmp = *mpnt;
65643+ tmp->vm_mm = mm;
65644+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
65645+ pol = mpol_dup(vma_policy(mpnt));
65646+ if (IS_ERR(pol))
65647+ goto fail_nomem_policy;
65648+ vma_set_policy(tmp, pol);
65649+ if (anon_vma_fork(tmp, mpnt))
65650+ goto fail_nomem_anon_vma_fork;
65651+ tmp->vm_flags &= ~VM_LOCKED;
6892158b 65652+ tmp->vm_next = tmp->vm_prev = NULL;
57199397
MT
65653+ tmp->vm_mirror = NULL;
65654+ file = tmp->vm_file;
65655+ if (file) {
65656+ struct inode *inode = file->f_path.dentry->d_inode;
65657+ struct address_space *mapping = file->f_mapping;
65658+
65659+ get_file(file);
65660+ if (tmp->vm_flags & VM_DENYWRITE)
65661+ atomic_dec(&inode->i_writecount);
15a11c5b 65662+ mutex_lock(&mapping->i_mmap_mutex);
57199397
MT
65663+ if (tmp->vm_flags & VM_SHARED)
65664+ mapping->i_mmap_writable++;
57199397
MT
65665+ flush_dcache_mmap_lock(mapping);
65666+ /* insert tmp into the share list, just after mpnt */
65667+ vma_prio_tree_add(tmp, mpnt);
65668+ flush_dcache_mmap_unlock(mapping);
15a11c5b 65669+ mutex_unlock(&mapping->i_mmap_mutex);
57199397
MT
65670+ }
65671+
65672+ /*
65673+ * Clear hugetlb-related page reserves for children. This only
65674+ * affects MAP_PRIVATE mappings. Faults generated by the child
65675+ * are not guaranteed to succeed, even if read-only
65676+ */
65677+ if (is_vm_hugetlb_page(tmp))
65678+ reset_vma_resv_huge_pages(tmp);
65679+
65680+ return tmp;
65681+
65682+fail_nomem_anon_vma_fork:
65683+ mpol_put(pol);
65684+fail_nomem_policy:
65685+ kmem_cache_free(vm_area_cachep, tmp);
65686+fail_nomem:
65687+ vm_unacct_memory(charge);
65688+ return NULL;
65689+}
65690+
65691 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65692 {
65693 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
65694 struct rb_node **rb_link, *rb_parent;
65695 int retval;
65696- unsigned long charge;
65697- struct mempolicy *pol;
65698
65699 down_write(&oldmm->mmap_sem);
65700 flush_cache_dup_mm(oldmm);
fe2de317 65701@@ -327,8 +391,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
58c5fc13
MT
65702 mm->locked_vm = 0;
65703 mm->mmap = NULL;
65704 mm->mmap_cache = NULL;
65705- mm->free_area_cache = oldmm->mmap_base;
65706- mm->cached_hole_size = ~0UL;
65707+ mm->free_area_cache = oldmm->free_area_cache;
65708+ mm->cached_hole_size = oldmm->cached_hole_size;
65709 mm->map_count = 0;
65710 cpumask_clear(mm_cpumask(mm));
65711 mm->mm_rb = RB_ROOT;
fe2de317 65712@@ -344,8 +408,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
57199397
MT
65713
65714 prev = NULL;
65715 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
65716- struct file *file;
65717-
65718 if (mpnt->vm_flags & VM_DONTCOPY) {
65719 long pages = vma_pages(mpnt);
65720 mm->total_vm -= pages;
fe2de317 65721@@ -353,53 +415,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
57199397
MT
65722 -pages);
65723 continue;
65724 }
65725- charge = 0;
65726- if (mpnt->vm_flags & VM_ACCOUNT) {
65727- unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
65728- if (security_vm_enough_memory(len))
65729- goto fail_nomem;
65730- charge = len;
fe2de317
MT
65731+ tmp = dup_vma(mm, mpnt);
65732+ if (!tmp) {
65733+ retval = -ENOMEM;
65734+ goto out;
65735 }
57199397
MT
65736- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
65737- if (!tmp)
65738- goto fail_nomem;
65739- *tmp = *mpnt;
65740- INIT_LIST_HEAD(&tmp->anon_vma_chain);
65741- pol = mpol_dup(vma_policy(mpnt));
65742- retval = PTR_ERR(pol);
65743- if (IS_ERR(pol))
65744- goto fail_nomem_policy;
65745- vma_set_policy(tmp, pol);
6892158b 65746- tmp->vm_mm = mm;
57199397
MT
65747- if (anon_vma_fork(tmp, mpnt))
65748- goto fail_nomem_anon_vma_fork;
65749- tmp->vm_flags &= ~VM_LOCKED;
57199397
MT
65750- tmp->vm_next = tmp->vm_prev = NULL;
65751- file = tmp->vm_file;
65752- if (file) {
65753- struct inode *inode = file->f_path.dentry->d_inode;
65754- struct address_space *mapping = file->f_mapping;
65755-
65756- get_file(file);
65757- if (tmp->vm_flags & VM_DENYWRITE)
65758- atomic_dec(&inode->i_writecount);
15a11c5b 65759- mutex_lock(&mapping->i_mmap_mutex);
57199397
MT
65760- if (tmp->vm_flags & VM_SHARED)
65761- mapping->i_mmap_writable++;
57199397
MT
65762- flush_dcache_mmap_lock(mapping);
65763- /* insert tmp into the share list, just after mpnt */
65764- vma_prio_tree_add(tmp, mpnt);
65765- flush_dcache_mmap_unlock(mapping);
15a11c5b 65766- mutex_unlock(&mapping->i_mmap_mutex);
fe2de317
MT
65767- }
65768-
65769- /*
57199397
MT
65770- * Clear hugetlb-related page reserves for children. This only
65771- * affects MAP_PRIVATE mappings. Faults generated by the child
65772- * are not guaranteed to succeed, even if read-only
65773- */
65774- if (is_vm_hugetlb_page(tmp))
65775- reset_vma_resv_huge_pages(tmp);
fe2de317
MT
65776
65777 /*
57199397 65778 * Link in the new vma and copy the page table entries.
fe2de317 65779@@ -422,6 +442,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
58c5fc13
MT
65780 if (retval)
65781 goto out;
65782 }
65783+
65784+#ifdef CONFIG_PAX_SEGMEXEC
65785+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
65786+ struct vm_area_struct *mpnt_m;
65787+
65788+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
65789+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
65790+
65791+ if (!mpnt->vm_mirror)
65792+ continue;
65793+
65794+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
65795+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
65796+ mpnt->vm_mirror = mpnt_m;
65797+ } else {
65798+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
65799+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
65800+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
65801+ mpnt->vm_mirror->vm_mirror = mpnt;
65802+ }
65803+ }
65804+ BUG_ON(mpnt_m);
65805+ }
65806+#endif
65807+
65808 /* a new mm has just been created */
65809 arch_dup_mmap(oldmm, mm);
65810 retval = 0;
6e9df6a3 65811@@ -430,14 +475,6 @@ out:
57199397
MT
65812 flush_tlb_mm(oldmm);
65813 up_write(&oldmm->mmap_sem);
65814 return retval;
65815-fail_nomem_anon_vma_fork:
65816- mpol_put(pol);
65817-fail_nomem_policy:
65818- kmem_cache_free(vm_area_cachep, tmp);
65819-fail_nomem:
65820- retval = -ENOMEM;
65821- vm_unacct_memory(charge);
65822- goto out;
65823 }
65824
6e9df6a3 65825 static inline int mm_alloc_pgd(struct mm_struct *mm)
fe2de317 65826@@ -837,13 +874,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
6892158b 65827 spin_unlock(&fs->lock);
58c5fc13
MT
65828 return -EAGAIN;
65829 }
65830- fs->users++;
65831+ atomic_inc(&fs->users);
6892158b 65832 spin_unlock(&fs->lock);
58c5fc13
MT
65833 return 0;
65834 }
df50ba0c
MT
65835 tsk->fs = copy_fs_struct(fs);
65836 if (!tsk->fs)
65837 return -ENOMEM;
65838+ gr_set_chroot_entries(tsk, &tsk->fs->root);
65839 return 0;
65840 }
65841
fe2de317 65842@@ -1105,6 +1143,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
58c5fc13
MT
65843 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
65844 #endif
65845 retval = -EAGAIN;
65846+
65847+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
65848+
65849 if (atomic_read(&p->real_cred->user->processes) >=
df50ba0c 65850 task_rlimit(p, RLIMIT_NPROC)) {
6e9df6a3 65851 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
fe2de317 65852@@ -1264,6 +1305,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
15a11c5b
MT
65853 if (clone_flags & CLONE_THREAD)
65854 p->tgid = current->tgid;
58c5fc13
MT
65855
65856+ gr_copy_label(p);
65857+
65858 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
65859 /*
65860 * Clear TID on mm_release()?
6e9df6a3 65861@@ -1428,6 +1471,8 @@ bad_fork_cleanup_count:
58c5fc13
MT
65862 bad_fork_free:
65863 free_task(p);
65864 fork_out:
65865+ gr_log_forkfail(retval);
65866+
65867 return ERR_PTR(retval);
65868 }
65869
6e9df6a3 65870@@ -1528,6 +1573,8 @@ long do_fork(unsigned long clone_flags,
58c5fc13
MT
65871 if (clone_flags & CLONE_PARENT_SETTID)
65872 put_user(nr, parent_tidptr);
65873
65874+ gr_handle_brute_check();
65875+
65876 if (clone_flags & CLONE_VFORK) {
65877 p->vfork_done = &vfork;
65878 init_completion(&vfork);
fe2de317 65879@@ -1637,7 +1684,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
58c5fc13
MT
65880 return 0;
65881
65882 /* don't need lock here; in the worst case we'll do useless copy */
65883- if (fs->users == 1)
65884+ if (atomic_read(&fs->users) == 1)
65885 return 0;
65886
65887 *new_fsp = copy_fs_struct(fs);
fe2de317 65888@@ -1726,7 +1773,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
58c5fc13 65889 fs = current->fs;
6892158b 65890 spin_lock(&fs->lock);
58c5fc13
MT
65891 current->fs = new_fs;
65892- if (--fs->users)
df50ba0c 65893+ gr_set_chroot_entries(current, &current->fs->root);
58c5fc13
MT
65894+ if (atomic_dec_return(&fs->users))
65895 new_fs = NULL;
65896 else
65897 new_fs = fs;
fe2de317
MT
65898diff --git a/kernel/futex.c b/kernel/futex.c
65899index 11cbe05..9ff191b 100644
65900--- a/kernel/futex.c
65901+++ b/kernel/futex.c
ae4e228f
MT
65902@@ -54,6 +54,7 @@
65903 #include <linux/mount.h>
65904 #include <linux/pagemap.h>
65905 #include <linux/syscalls.h>
65906+#include <linux/ptrace.h>
65907 #include <linux/signal.h>
65908 #include <linux/module.h>
65909 #include <linux/magic.h>
fe2de317 65910@@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
16454cff 65911 struct page *page, *page_head;
15a11c5b 65912 int err, ro = 0;
58c5fc13
MT
65913
65914+#ifdef CONFIG_PAX_SEGMEXEC
65915+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
65916+ return -EFAULT;
65917+#endif
65918+
65919 /*
65920 * The futex address must be "naturally" aligned.
65921 */
fe2de317 65922@@ -1863,6 +1869,8 @@ static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
66a7e928
MT
65923 struct futex_q q = futex_q_init;
65924 int ret;
65925
65926+ pax_track_stack();
65927+
65928 if (!bitset)
65929 return -EINVAL;
65930 q.bitset = bitset;
fe2de317 65931@@ -2259,6 +2267,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
66a7e928
MT
65932 struct futex_q q = futex_q_init;
65933 int res, ret;
65934
65935+ pax_track_stack();
65936+
65937 if (!bitset)
65938 return -EINVAL;
65939
fe2de317 65940@@ -2431,7 +2441,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
ae4e228f
MT
65941 {
65942 struct robust_list_head __user *head;
65943 unsigned long ret;
ae4e228f 65944+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
57199397 65945 const struct cred *cred = current_cred(), *pcred;
ae4e228f
MT
65946+#endif
65947
65948 if (!futex_cmpxchg_enabled)
65949 return -ENOSYS;
fe2de317 65950@@ -2447,6 +2459,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
ae4e228f
MT
65951 if (!p)
65952 goto err_unlock;
65953 ret = -EPERM;
65954+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65955+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
65956+ goto err_unlock;
65957+#else
65958 pcred = __task_cred(p);
66a7e928
MT
65959 /* If victim is in different user_ns, then uids are not
65960 comparable, so we must have CAP_SYS_PTRACE */
fe2de317 65961@@ -2461,6 +2477,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
66a7e928 65962 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
ae4e228f 65963 goto err_unlock;
66a7e928 65964 ok:
ae4e228f
MT
65965+#endif
65966 head = p->robust_list;
65967 rcu_read_unlock();
65968 }
15a11c5b 65969@@ -2712,6 +2729,7 @@ static int __init futex_init(void)
58c5fc13 65970 {
bc901d79
MT
65971 u32 curval;
65972 int i;
65973+ mm_segment_t oldfs;
58c5fc13 65974
bc901d79
MT
65975 /*
65976 * This will fail and we want it. Some arch implementations do
15a11c5b 65977@@ -2723,8 +2741,11 @@ static int __init futex_init(void)
bc901d79
MT
65978 * implementation, the non-functional ones will return
65979 * -ENOSYS.
65980 */
65981+ oldfs = get_fs();
65982+ set_fs(USER_DS);
66a7e928 65983 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
bc901d79 65984 futex_cmpxchg_enabled = 1;
66a7e928 65985+ set_fs(oldfs);
bc901d79 65986
66a7e928 65987 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
6e9df6a3 65988 plist_head_init(&futex_queues[i].chain);
fe2de317
MT
65989diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
65990index 5f9e689..03afa21 100644
65991--- a/kernel/futex_compat.c
65992+++ b/kernel/futex_compat.c
ae4e228f
MT
65993@@ -10,6 +10,7 @@
65994 #include <linux/compat.h>
65995 #include <linux/nsproxy.h>
65996 #include <linux/futex.h>
65997+#include <linux/ptrace.h>
65998
65999 #include <asm/uaccess.h>
66000
fe2de317 66001@@ -136,7 +137,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
ae4e228f
MT
66002 {
66003 struct compat_robust_list_head __user *head;
66004 unsigned long ret;
66005- const struct cred *cred = current_cred(), *pcred;
ae4e228f 66006+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
57199397 66007+ const struct cred *cred = current_cred();
ae4e228f
MT
66008+ const struct cred *pcred;
66009+#endif
66010
66011 if (!futex_cmpxchg_enabled)
66012 return -ENOSYS;
fe2de317 66013@@ -152,6 +156,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
ae4e228f
MT
66014 if (!p)
66015 goto err_unlock;
66016 ret = -EPERM;
66017+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66018+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
66019+ goto err_unlock;
66020+#else
66021 pcred = __task_cred(p);
66a7e928
MT
66022 /* If victim is in different user_ns, then uids are not
66023 comparable, so we must have CAP_SYS_PTRACE */
fe2de317 66024@@ -166,6 +174,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
66a7e928 66025 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
ae4e228f 66026 goto err_unlock;
66a7e928 66027 ok:
ae4e228f
MT
66028+#endif
66029 head = p->compat_robust_list;
df50ba0c 66030 rcu_read_unlock();
ae4e228f 66031 }
fe2de317
MT
66032diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
66033index 9b22d03..6295b62 100644
66034--- a/kernel/gcov/base.c
66035+++ b/kernel/gcov/base.c
58c5fc13
MT
66036@@ -102,11 +102,6 @@ void gcov_enable_events(void)
66037 }
66038
66039 #ifdef CONFIG_MODULES
66040-static inline int within(void *addr, void *start, unsigned long size)
66041-{
66042- return ((addr >= start) && (addr < start + size));
66043-}
66044-
66045 /* Update list and generate events when modules are unloaded. */
66046 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
66047 void *data)
fe2de317 66048@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
58c5fc13
MT
66049 prev = NULL;
66050 /* Remove entries located in module from linked list. */
66051 for (info = gcov_info_head; info; info = info->next) {
66052- if (within(info, mod->module_core, mod->core_size)) {
66053+ if (within_module_core_rw((unsigned long)info, mod)) {
66054 if (prev)
66055 prev->next = info->next;
66056 else
fe2de317
MT
66057diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
66058index 2043c08..ec81a69 100644
66059--- a/kernel/hrtimer.c
66060+++ b/kernel/hrtimer.c
66061@@ -1393,7 +1393,7 @@ void hrtimer_peek_ahead_timers(void)
ae4e228f
MT
66062 local_irq_restore(flags);
66063 }
66064
66065-static void run_hrtimer_softirq(struct softirq_action *h)
66066+static void run_hrtimer_softirq(void)
66067 {
66068 hrtimer_peek_ahead_timers();
66069 }
fe2de317
MT
66070diff --git a/kernel/jump_label.c b/kernel/jump_label.c
66071index e6f1f24..6c19597 100644
66072--- a/kernel/jump_label.c
66073+++ b/kernel/jump_label.c
66074@@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
15a11c5b
MT
66075
66076 size = (((unsigned long)stop - (unsigned long)start)
66077 / sizeof(struct jump_entry));
66078+ pax_open_kernel();
66079 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
66080+ pax_close_kernel();
bc901d79
MT
66081 }
66082
15a11c5b 66083 static void jump_label_update(struct jump_label_key *key, int enable);
fe2de317 66084@@ -298,10 +300,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
15a11c5b
MT
66085 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
66086 struct jump_entry *iter;
66087
bc901d79 66088+ pax_open_kernel();
15a11c5b
MT
66089 for (iter = iter_start; iter < iter_stop; iter++) {
66090 if (within_module_init(iter->code, mod))
66091 iter->code = 0;
66092 }
bc901d79 66093+ pax_close_kernel();
15a11c5b 66094 }
bc901d79 66095
15a11c5b 66096 static int
fe2de317
MT
66097diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
66098index 079f1d3..a407562 100644
66099--- a/kernel/kallsyms.c
66100+++ b/kernel/kallsyms.c
58c5fc13
MT
66101@@ -11,6 +11,9 @@
66102 * Changed the compression method from stem compression to "table lookup"
66103 * compression (see scripts/kallsyms.c for a more complete description)
66104 */
66105+#ifdef CONFIG_GRKERNSEC_HIDESYM
66106+#define __INCLUDED_BY_HIDESYM 1
66107+#endif
66108 #include <linux/kallsyms.h>
66109 #include <linux/module.h>
66110 #include <linux/init.h>
fe2de317 66111@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
58c5fc13
MT
66112
66113 static inline int is_kernel_inittext(unsigned long addr)
66114 {
66115+ if (system_state != SYSTEM_BOOTING)
66116+ return 0;
66117+
66118 if (addr >= (unsigned long)_sinittext
66119 && addr <= (unsigned long)_einittext)
66120 return 1;
57199397
MT
66121 return 0;
66122 }
58c5fc13 66123
ae4e228f 66124+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
df50ba0c 66125+#ifdef CONFIG_MODULES
57199397
MT
66126+static inline int is_module_text(unsigned long addr)
66127+{
66128+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
66129+ return 1;
66130+
66131+ addr = ktla_ktva(addr);
66132+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
66133+}
66134+#else
66135+static inline int is_module_text(unsigned long addr)
66136+{
66137+ return 0;
66138+}
66139+#endif
df50ba0c 66140+#endif
58c5fc13 66141+
57199397
MT
66142 static inline int is_kernel_text(unsigned long addr)
66143 {
66144 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
fe2de317 66145@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
57199397
MT
66146
66147 static inline int is_kernel(unsigned long addr)
66148 {
ae4e228f 66149+
57199397
MT
66150+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66151+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
58c5fc13 66152+ return 1;
ae4e228f 66153+
57199397
MT
66154+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
66155+#else
66156 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
ae4e228f 66157+#endif
58c5fc13 66158+
58c5fc13 66159 return 1;
66a7e928 66160 return in_gate_area_no_mm(addr);
57199397
MT
66161 }
66162
66163 static int is_ksym_addr(unsigned long addr)
66164 {
66165+
66166+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66167+ if (is_module_text(addr))
66168+ return 0;
66169+#endif
66170+
66171 if (all_var)
66172 return is_kernel(addr);
66173
fe2de317 66174@@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
58c5fc13
MT
66175
66176 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
66177 {
66178- iter->name[0] = '\0';
66179 iter->nameoff = get_symbol_offset(new_pos);
66180 iter->pos = new_pos;
66181 }
fe2de317 66182@@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, void *p)
ae4e228f
MT
66183 {
66184 struct kallsym_iter *iter = m->private;
66185
66186+#ifdef CONFIG_GRKERNSEC_HIDESYM
66187+ if (current_uid())
66188+ return 0;
66189+#endif
66190+
66191 /* Some debugging symbols have no name. Ignore them. */
66192 if (!iter->name[0])
66193 return 0;
fe2de317 66194@@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
58c5fc13
MT
66195 struct kallsym_iter *iter;
66196 int ret;
66197
66198- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
66199+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
66200 if (!iter)
66201 return -ENOMEM;
66202 reset_iter(iter, 0);
fe2de317
MT
66203diff --git a/kernel/kexec.c b/kernel/kexec.c
66204index 296fbc8..84cb857 100644
66205--- a/kernel/kexec.c
66206+++ b/kernel/kexec.c
66207@@ -1033,7 +1033,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
6e9df6a3
MT
66208 unsigned long flags)
66209 {
66210 struct compat_kexec_segment in;
66211- struct kexec_segment out, __user *ksegments;
66212+ struct kexec_segment out;
66213+ struct kexec_segment __user *ksegments;
66214 unsigned long i, result;
66215
66216 /* Don't allow clients that don't understand the native
fe2de317
MT
66217diff --git a/kernel/kmod.c b/kernel/kmod.c
66218index a4bea97..7a1ae9a 100644
66219--- a/kernel/kmod.c
66220+++ b/kernel/kmod.c
66221@@ -73,13 +73,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
71d190be
MT
66222 * If module auto-loading support is disabled then this function
66223 * becomes a no-operation.
66224 */
66225-int __request_module(bool wait, const char *fmt, ...)
66226+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
66227 {
66228- va_list args;
66229 char module_name[MODULE_NAME_LEN];
66230 unsigned int max_modprobes;
66231 int ret;
66232- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
66233+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
66234 static char *envp[] = { "HOME=/",
66235 "TERM=linux",
66236 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
fe2de317 66237@@ -88,9 +87,7 @@ int __request_module(bool wait, const char *fmt, ...)
71d190be
MT
66238 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
66239 static int kmod_loop_msg;
66240
66241- va_start(args, fmt);
66242- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
66243- va_end(args);
66244+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
66245 if (ret >= MODULE_NAME_LEN)
66246 return -ENAMETOOLONG;
66247
fe2de317 66248@@ -98,6 +95,20 @@ int __request_module(bool wait, const char *fmt, ...)
ae4e228f
MT
66249 if (ret)
66250 return ret;
58c5fc13
MT
66251
66252+#ifdef CONFIG_GRKERNSEC_MODHARDEN
71d190be
MT
66253+ if (!current_uid()) {
66254+ /* hack to workaround consolekit/udisks stupidity */
66255+ read_lock(&tasklist_lock);
66256+ if (!strcmp(current->comm, "mount") &&
66257+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
66258+ read_unlock(&tasklist_lock);
66259+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
66260+ return -EPERM;
66261+ }
66262+ read_unlock(&tasklist_lock);
58c5fc13
MT
66263+ }
66264+#endif
66265+
66266 /* If modprobe needs a service that is in a module, we get a recursive
66267 * loop. Limit the number of running kmod threads to max_threads/2 or
66268 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
fe2de317 66269@@ -133,6 +144,47 @@ int __request_module(bool wait, const char *fmt, ...)
71d190be
MT
66270 atomic_dec(&kmod_concurrent);
66271 return ret;
66272 }
66273+
66274+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
66275+{
66276+ va_list args;
66277+ int ret;
66278+
66279+ va_start(args, fmt);
66280+ ret = ____request_module(wait, module_param, fmt, args);
66281+ va_end(args);
66282+
66283+ return ret;
66284+}
66285+
66286+int __request_module(bool wait, const char *fmt, ...)
66287+{
66288+ va_list args;
66289+ int ret;
66290+
66291+#ifdef CONFIG_GRKERNSEC_MODHARDEN
66292+ if (current_uid()) {
66293+ char module_param[MODULE_NAME_LEN];
66294+
66295+ memset(module_param, 0, sizeof(module_param));
66296+
66297+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
66298+
66299+ va_start(args, fmt);
66300+ ret = ____request_module(wait, module_param, fmt, args);
66301+ va_end(args);
66302+
66303+ return ret;
66304+ }
66305+#endif
66306+
66307+ va_start(args, fmt);
66308+ ret = ____request_module(wait, NULL, fmt, args);
66309+ va_end(args);
66310+
66311+ return ret;
66312+}
66313+
66314 EXPORT_SYMBOL(__request_module);
66315 #endif /* CONFIG_MODULES */
66316
6e9df6a3
MT
66317@@ -222,7 +274,7 @@ static int wait_for_helper(void *data)
66318 *
66319 * Thus the __user pointer cast is valid here.
66320 */
66321- sys_wait4(pid, (int __user *)&ret, 0, NULL);
66322+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
66323
66324 /*
66325 * If ret is 0, either ____call_usermodehelper failed and the
fe2de317
MT
66326diff --git a/kernel/kprobes.c b/kernel/kprobes.c
66327index b30fd54..11821ec 100644
66328--- a/kernel/kprobes.c
66329+++ b/kernel/kprobes.c
66330@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
58c5fc13
MT
66331 * kernel image and loaded module images reside. This is required
66332 * so x86_64 can correctly handle the %rip-relative fixups.
66333 */
66334- kip->insns = module_alloc(PAGE_SIZE);
66335+ kip->insns = module_alloc_exec(PAGE_SIZE);
66336 if (!kip->insns) {
66337 kfree(kip);
66338 return NULL;
fe2de317 66339@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
ae4e228f 66340 */
df50ba0c 66341 if (!list_is_singular(&kip->list)) {
ae4e228f 66342 list_del(&kip->list);
58c5fc13
MT
66343- module_free(NULL, kip->insns);
66344+ module_free_exec(NULL, kip->insns);
66345 kfree(kip);
66346 }
66347 return 1;
6e9df6a3 66348@@ -1949,7 +1949,7 @@ static int __init init_kprobes(void)
df50ba0c
MT
66349 {
66350 int i, err = 0;
66351 unsigned long offset = 0, size = 0;
66352- char *modname, namebuf[128];
66353+ char *modname, namebuf[KSYM_NAME_LEN];
66354 const char *symbol_name;
66355 void *addr;
66356 struct kprobe_blackpoint *kb;
fe2de317 66357@@ -2075,7 +2075,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
df50ba0c
MT
66358 const char *sym = NULL;
66359 unsigned int i = *(loff_t *) v;
66360 unsigned long offset = 0;
66361- char *modname, namebuf[128];
66362+ char *modname, namebuf[KSYM_NAME_LEN];
66363
66364 head = &kprobe_table[i];
66365 preempt_disable();
fe2de317
MT
66366diff --git a/kernel/lockdep.c b/kernel/lockdep.c
66367index 91d67ce..ac259df 100644
66368--- a/kernel/lockdep.c
66369+++ b/kernel/lockdep.c
15a11c5b 66370@@ -583,6 +583,10 @@ static int static_obj(void *obj)
df50ba0c
MT
66371 end = (unsigned long) &_end,
66372 addr = (unsigned long) obj;
58c5fc13
MT
66373
66374+#ifdef CONFIG_PAX_KERNEXEC
ae4e228f 66375+ start = ktla_ktva(start);
58c5fc13
MT
66376+#endif
66377+
66378 /*
66379 * static variable?
66380 */
fe2de317 66381@@ -718,6 +722,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
ae4e228f
MT
66382 if (!static_obj(lock->key)) {
66383 debug_locks_off();
66384 printk("INFO: trying to register non-static key.\n");
66385+ printk("lock:%pS key:%pS.\n", lock, lock->key);
66386 printk("the code is fine but needs lockdep annotation.\n");
66387 printk("turning off the locking correctness validator.\n");
66388 dump_stack();
fe2de317 66389@@ -2948,7 +2953,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
bc901d79
MT
66390 if (!class)
66391 return 0;
66392 }
66393- atomic_inc((atomic_t *)&class->ops);
66394+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
66395 if (very_verbose(class)) {
66396 printk("\nacquire class [%p] %s", class->key, class->name);
66397 if (class->name_version > 1)
fe2de317
MT
66398diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
66399index 71edd2f..e0542a5 100644
66400--- a/kernel/lockdep_proc.c
66401+++ b/kernel/lockdep_proc.c
66402@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
df50ba0c
MT
66403
66404 static void print_name(struct seq_file *m, struct lock_class *class)
66405 {
66406- char str[128];
66407+ char str[KSYM_NAME_LEN];
66408 const char *name = class->name;
66409
66410 if (!name) {
fe2de317
MT
66411diff --git a/kernel/module.c b/kernel/module.c
66412index 04379f92..fba2faf 100644
66413--- a/kernel/module.c
66414+++ b/kernel/module.c
15a11c5b 66415@@ -58,6 +58,7 @@
71d190be
MT
66416 #include <linux/jump_label.h>
66417 #include <linux/pfn.h>
15a11c5b 66418 #include <linux/bsearch.h>
71d190be
MT
66419+#include <linux/grsecurity.h>
66420
66421 #define CREATE_TRACE_POINTS
66422 #include <trace/events/module.h>
fe2de317 66423@@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
58c5fc13 66424
57199397
MT
66425 /* Bounds of module allocation, for speeding __module_address.
66426 * Protected by module_mutex. */
58c5fc13
MT
66427-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
66428+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
66429+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
66430
66431 int register_module_notifier(struct notifier_block * nb)
66432 {
fe2de317 66433@@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
58c5fc13
MT
66434 return true;
66435
66436 list_for_each_entry_rcu(mod, &modules, list) {
66437- struct symsearch arr[] = {
66438+ struct symsearch modarr[] = {
66439 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
66440 NOT_GPL_ONLY, false },
66441 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
fe2de317 66442@@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
58c5fc13
MT
66443 #endif
66444 };
66445
66446- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
66447+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
66448 return true;
66449 }
66450 return false;
fe2de317 66451@@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
df50ba0c
MT
66452 static int percpu_modalloc(struct module *mod,
66453 unsigned long size, unsigned long align)
ae4e228f 66454 {
58c5fc13
MT
66455- if (align > PAGE_SIZE) {
66456+ if (align-1 >= PAGE_SIZE) {
66457 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
df50ba0c 66458 mod->name, align, PAGE_SIZE);
58c5fc13 66459 align = PAGE_SIZE;
6e9df6a3 66460@@ -1183,7 +1185,7 @@ resolve_symbol_wait(struct module *mod,
c52201e0
MT
66461 */
66462 #ifdef CONFIG_SYSFS
66463
66464-#ifdef CONFIG_KALLSYMS
66465+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66466 static inline bool sect_empty(const Elf_Shdr *sect)
66467 {
66468 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
fe2de317 66469@@ -1649,21 +1651,21 @@ static void set_section_ro_nx(void *base,
16454cff 66470
15a11c5b
MT
66471 static void unset_module_core_ro_nx(struct module *mod)
66472 {
66473- set_page_attributes(mod->module_core + mod->core_text_size,
66474- mod->module_core + mod->core_size,
66475+ set_page_attributes(mod->module_core_rw,
66476+ mod->module_core_rw + mod->core_size_rw,
66477 set_memory_x);
66478- set_page_attributes(mod->module_core,
66479- mod->module_core + mod->core_ro_size,
66480+ set_page_attributes(mod->module_core_rx,
66481+ mod->module_core_rx + mod->core_size_rx,
66482 set_memory_rw);
66483 }
16454cff 66484
15a11c5b
MT
66485 static void unset_module_init_ro_nx(struct module *mod)
66486 {
66487- set_page_attributes(mod->module_init + mod->init_text_size,
66488- mod->module_init + mod->init_size,
66489+ set_page_attributes(mod->module_init_rw,
66490+ mod->module_init_rw + mod->init_size_rw,
66491 set_memory_x);
66492- set_page_attributes(mod->module_init,
66493- mod->module_init + mod->init_ro_size,
66494+ set_page_attributes(mod->module_init_rx,
66495+ mod->module_init_rx + mod->init_size_rx,
66496 set_memory_rw);
16454cff
MT
66497 }
66498
6e9df6a3 66499@@ -1674,14 +1676,14 @@ void set_all_modules_text_rw(void)
16454cff
MT
66500
66501 mutex_lock(&module_mutex);
66502 list_for_each_entry_rcu(mod, &modules, list) {
66503- if ((mod->module_core) && (mod->core_text_size)) {
66504- set_page_attributes(mod->module_core,
66505- mod->module_core + mod->core_text_size,
66506+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
66507+ set_page_attributes(mod->module_core_rx,
66508+ mod->module_core_rx + mod->core_size_rx,
66509 set_memory_rw);
66510 }
66511- if ((mod->module_init) && (mod->init_text_size)) {
66512- set_page_attributes(mod->module_init,
66513- mod->module_init + mod->init_text_size,
66514+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
66515+ set_page_attributes(mod->module_init_rx,
66516+ mod->module_init_rx + mod->init_size_rx,
66517 set_memory_rw);
66518 }
66519 }
6e9df6a3 66520@@ -1695,14 +1697,14 @@ void set_all_modules_text_ro(void)
16454cff
MT
66521
66522 mutex_lock(&module_mutex);
66523 list_for_each_entry_rcu(mod, &modules, list) {
66524- if ((mod->module_core) && (mod->core_text_size)) {
66525- set_page_attributes(mod->module_core,
66526- mod->module_core + mod->core_text_size,
66527+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
66528+ set_page_attributes(mod->module_core_rx,
66529+ mod->module_core_rx + mod->core_size_rx,
66530 set_memory_ro);
66531 }
66532- if ((mod->module_init) && (mod->init_text_size)) {
66533- set_page_attributes(mod->module_init,
66534- mod->module_init + mod->init_text_size,
66535+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
66536+ set_page_attributes(mod->module_init_rx,
66537+ mod->module_init_rx + mod->init_size_rx,
66538 set_memory_ro);
66539 }
66540 }
fe2de317 66541@@ -1748,16 +1750,19 @@ static void free_module(struct module *mod)
58c5fc13
MT
66542
66543 /* This may be NULL, but that's OK */
15a11c5b 66544 unset_module_init_ro_nx(mod);
58c5fc13
MT
66545- module_free(mod, mod->module_init);
66546+ module_free(mod, mod->module_init_rw);
66547+ module_free_exec(mod, mod->module_init_rx);
66548 kfree(mod->args);
df50ba0c 66549 percpu_modfree(mod);
6892158b 66550
58c5fc13
MT
66551 /* Free lock-classes: */
66552- lockdep_free_key_range(mod->module_core, mod->core_size);
66553+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
66554+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
66555
66556 /* Finally, free the core (containing the module structure) */
15a11c5b 66557 unset_module_core_ro_nx(mod);
58c5fc13
MT
66558- module_free(mod, mod->module_core);
66559+ module_free_exec(mod, mod->module_core_rx);
66560+ module_free(mod, mod->module_core_rw);
58c5fc13 66561
ae4e228f
MT
66562 #ifdef CONFIG_MPU
66563 update_protections(current->mm);
fe2de317 66564@@ -1826,10 +1831,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
71d190be
MT
66565 unsigned int i;
66566 int ret = 0;
66567 const struct kernel_symbol *ksym;
66568+#ifdef CONFIG_GRKERNSEC_MODHARDEN
66569+ int is_fs_load = 0;
66570+ int register_filesystem_found = 0;
8308f9c9 66571+ char *p;
71d190be 66572+
8308f9c9
MT
66573+ p = strstr(mod->args, "grsec_modharden_fs");
66574+ if (p) {
66575+ char *endptr = p + strlen("grsec_modharden_fs");
66576+ /* copy \0 as well */
66577+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
71d190be 66578+ is_fs_load = 1;
8308f9c9 66579+ }
71d190be
MT
66580+#endif
66581
66582 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
66583 const char *name = info->strtab + sym[i].st_name;
66584
66585+#ifdef CONFIG_GRKERNSEC_MODHARDEN
66586+ /* it's a real shame this will never get ripped and copied
66587+ upstream! ;(
66588+ */
66589+ if (is_fs_load && !strcmp(name, "register_filesystem"))
66590+ register_filesystem_found = 1;
66591+#endif
66592+
66593 switch (sym[i].st_shndx) {
66594 case SHN_COMMON:
66595 /* We compiled with -fno-common. These are not
fe2de317 66596@@ -1850,7 +1876,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
6892158b 66597 ksym = resolve_symbol_wait(mod, info, name);
58c5fc13 66598 /* Ok if resolved. */
57199397 66599 if (ksym && !IS_ERR(ksym)) {
ae4e228f 66600+ pax_open_kernel();
58c5fc13 66601 sym[i].st_value = ksym->value;
ae4e228f 66602+ pax_close_kernel();
58c5fc13
MT
66603 break;
66604 }
66605
fe2de317 66606@@ -1869,11 +1897,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
df50ba0c 66607 secbase = (unsigned long)mod_percpu(mod);
58c5fc13 66608 else
6892158b 66609 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
ae4e228f 66610+ pax_open_kernel();
58c5fc13 66611 sym[i].st_value += secbase;
ae4e228f 66612+ pax_close_kernel();
58c5fc13
MT
66613 break;
66614 }
66615 }
71d190be
MT
66616
66617+#ifdef CONFIG_GRKERNSEC_MODHARDEN
66618+ if (is_fs_load && !register_filesystem_found) {
66619+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
66620+ ret = -EPERM;
66621+ }
66622+#endif
66623+
66624 return ret;
66625 }
66626
fe2de317 66627@@ -1977,22 +2014,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
58c5fc13 66628 || s->sh_entsize != ~0UL
6892158b 66629 || strstarts(sname, ".init"))
58c5fc13
MT
66630 continue;
66631- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
66632+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
66633+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
66634+ else
66635+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
6892158b 66636 DEBUGP("\t%s\n", name);
58c5fc13 66637 }
16454cff
MT
66638- switch (m) {
66639- case 0: /* executable */
66640- mod->core_size = debug_align(mod->core_size);
58c5fc13 66641- mod->core_text_size = mod->core_size;
16454cff
MT
66642- break;
66643- case 1: /* RO: text and ro-data */
66644- mod->core_size = debug_align(mod->core_size);
66645- mod->core_ro_size = mod->core_size;
66646- break;
66647- case 3: /* whole core */
66648- mod->core_size = debug_align(mod->core_size);
66649- break;
66650- }
58c5fc13
MT
66651 }
66652
66653 DEBUGP("Init section allocation order:\n");
fe2de317 66654@@ -2006,23 +2033,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
58c5fc13 66655 || s->sh_entsize != ~0UL
6892158b 66656 || !strstarts(sname, ".init"))
58c5fc13
MT
66657 continue;
66658- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
66659- | INIT_OFFSET_MASK);
66660+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
66661+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
66662+ else
66663+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
66664+ s->sh_entsize |= INIT_OFFSET_MASK;
6892158b 66665 DEBUGP("\t%s\n", sname);
58c5fc13 66666 }
16454cff
MT
66667- switch (m) {
66668- case 0: /* executable */
66669- mod->init_size = debug_align(mod->init_size);
58c5fc13 66670- mod->init_text_size = mod->init_size;
16454cff
MT
66671- break;
66672- case 1: /* RO: text and ro-data */
66673- mod->init_size = debug_align(mod->init_size);
66674- mod->init_ro_size = mod->init_size;
66675- break;
66676- case 3: /* whole init */
66677- mod->init_size = debug_align(mod->init_size);
66678- break;
66679- }
58c5fc13
MT
66680 }
66681 }
66682
fe2de317 66683@@ -2187,7 +2204,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
58c5fc13 66684
ae4e228f
MT
66685 /* Put symbol section at end of init part of module. */
66686 symsect->sh_flags |= SHF_ALLOC;
66687- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
66688+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
6892158b
MT
66689 info->index.sym) | INIT_OFFSET_MASK;
66690 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
ae4e228f 66691
fe2de317 66692@@ -2204,19 +2221,19 @@ static void layout_symtab(struct module *mod, struct load_info *info)
ae4e228f
MT
66693 }
66694
66695 /* Append room for core symbols at end of core part. */
6892158b
MT
66696- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
66697- mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
66698+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
66699+ mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
ae4e228f
MT
66700
66701 /* Put string table section at end of init part of module. */
66702 strsect->sh_flags |= SHF_ALLOC;
66703- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
66704+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
6892158b
MT
66705 info->index.str) | INIT_OFFSET_MASK;
66706 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
ae4e228f
MT
66707
66708 /* Append room for core symbols' strings at end of core part. */
6892158b
MT
66709- info->stroffs = mod->core_size;
66710+ info->stroffs = mod->core_size_rx;
66711 __set_bit(0, info->strmap);
66712- mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
66713+ mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
ae4e228f 66714 }
6892158b
MT
66715
66716 static void add_kallsyms(struct module *mod, const struct load_info *info)
fe2de317 66717@@ -2232,11 +2249,13 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
6892158b
MT
66718 /* Make sure we get permanent strtab: don't use info->strtab. */
66719 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
58c5fc13 66720
ae4e228f
MT
66721+ pax_open_kernel();
66722+
58c5fc13 66723 /* Set types up while we still have access to sections. */
ae4e228f 66724 for (i = 0; i < mod->num_symtab; i++)
6892158b 66725 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
ae4e228f 66726
6892158b
MT
66727- mod->core_symtab = dst = mod->module_core + info->symoffs;
66728+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
ae4e228f
MT
66729 src = mod->symtab;
66730 *dst = *src;
66731 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
fe2de317 66732@@ -2249,10 +2268,12 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
ae4e228f
MT
66733 }
66734 mod->core_num_syms = ndst;
66735
6892158b
MT
66736- mod->core_strtab = s = mod->module_core + info->stroffs;
66737+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
66738 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
66739 if (test_bit(i, info->strmap))
ae4e228f 66740 *++s = mod->strtab[i];
58c5fc13 66741+
ae4e228f 66742+ pax_close_kernel();
58c5fc13
MT
66743 }
66744 #else
6892158b 66745 static inline void layout_symtab(struct module *mod, struct load_info *info)
fe2de317 66746@@ -2286,17 +2307,33 @@ void * __weak module_alloc(unsigned long size)
6e9df6a3 66747 return size == 0 ? NULL : vmalloc_exec(size);
58c5fc13
MT
66748 }
66749
66750-static void *module_alloc_update_bounds(unsigned long size)
66751+static void *module_alloc_update_bounds_rw(unsigned long size)
66752 {
66753 void *ret = module_alloc(size);
66754
66755 if (ret) {
57199397 66756 mutex_lock(&module_mutex);
58c5fc13
MT
66757 /* Update module bounds. */
66758- if ((unsigned long)ret < module_addr_min)
66759- module_addr_min = (unsigned long)ret;
66760- if ((unsigned long)ret + size > module_addr_max)
66761- module_addr_max = (unsigned long)ret + size;
66762+ if ((unsigned long)ret < module_addr_min_rw)
66763+ module_addr_min_rw = (unsigned long)ret;
66764+ if ((unsigned long)ret + size > module_addr_max_rw)
66765+ module_addr_max_rw = (unsigned long)ret + size;
57199397 66766+ mutex_unlock(&module_mutex);
58c5fc13
MT
66767+ }
66768+ return ret;
66769+}
66770+
66771+static void *module_alloc_update_bounds_rx(unsigned long size)
66772+{
66773+ void *ret = module_alloc_exec(size);
66774+
66775+ if (ret) {
57199397 66776+ mutex_lock(&module_mutex);
58c5fc13
MT
66777+ /* Update module bounds. */
66778+ if ((unsigned long)ret < module_addr_min_rx)
66779+ module_addr_min_rx = (unsigned long)ret;
66780+ if ((unsigned long)ret + size > module_addr_max_rx)
66781+ module_addr_max_rx = (unsigned long)ret + size;
57199397 66782 mutex_unlock(&module_mutex);
58c5fc13
MT
66783 }
66784 return ret;
fe2de317
MT
66785@@ -2474,8 +2511,14 @@ static struct module *setup_load_info(struct load_info *info)
66786 static int check_modinfo(struct module *mod, struct load_info *info)
66787 {
66788 const char *modmagic = get_modinfo(info, "vermagic");
66789+ const char *license = get_modinfo(info, "license");
66790 int err;
66791
66792+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
66793+ if (!license || !license_is_gpl_compatible(license))
66794+ return -ENOEXEC;
66795+#endif
66796+
66797 /* This is allowed: modprobe --force will invalidate it. */
66798 if (!modmagic) {
66799 err = try_to_force_load(mod, "bad vermagic");
66800@@ -2495,7 +2538,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
66801 }
66802
66803 /* Set up license info based on the info section */
66804- set_license(mod, get_modinfo(info, "license"));
66805+ set_license(mod, license);
66806
66807 return 0;
66808 }
66809@@ -2589,7 +2632,7 @@ static int move_module(struct module *mod, struct load_info *info)
6892158b 66810 void *ptr;
58c5fc13
MT
66811
66812 /* Do the allocs. */
66813- ptr = module_alloc_update_bounds(mod->core_size);
66814+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
66815 /*
66816 * The pointer to this block is stored in the module structure
66817 * which is inside the block. Just mark it as not being a
fe2de317 66818@@ -2599,23 +2642,50 @@ static int move_module(struct module *mod, struct load_info *info)
6892158b
MT
66819 if (!ptr)
66820 return -ENOMEM;
66821
58c5fc13
MT
66822- memset(ptr, 0, mod->core_size);
66823- mod->module_core = ptr;
66824+ memset(ptr, 0, mod->core_size_rw);
66825+ mod->module_core_rw = ptr;
66826
66827- ptr = module_alloc_update_bounds(mod->init_size);
66828+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
66829 /*
66830 * The pointer to this block is stored in the module structure
66831 * which is inside the block. This block doesn't need to be
66832 * scanned as it contains data and code that will be freed
66833 * after the module is initialized.
66834 */
66835- kmemleak_ignore(ptr);
66836- if (!ptr && mod->init_size) {
6892158b 66837- module_free(mod, mod->module_core);
58c5fc13
MT
66838+ kmemleak_not_leak(ptr);
66839+ if (!ptr && mod->init_size_rw) {
6892158b 66840+ module_free(mod, mod->module_core_rw);
16454cff
MT
66841 return -ENOMEM;
66842 }
66843- memset(ptr, 0, mod->init_size);
66844- mod->module_init = ptr;
58c5fc13
MT
66845+ memset(ptr, 0, mod->init_size_rw);
66846+ mod->module_init_rw = ptr;
66847+
66848+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
66849+ kmemleak_not_leak(ptr);
66850+ if (!ptr) {
6892158b
MT
66851+ module_free(mod, mod->module_init_rw);
66852+ module_free(mod, mod->module_core_rw);
c52201e0
MT
66853+ return -ENOMEM;
66854+ }
58c5fc13 66855+
ae4e228f 66856+ pax_open_kernel();
58c5fc13 66857+ memset(ptr, 0, mod->core_size_rx);
ae4e228f 66858+ pax_close_kernel();
58c5fc13
MT
66859+ mod->module_core_rx = ptr;
66860+
66861+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
66862+ kmemleak_not_leak(ptr);
66863+ if (!ptr && mod->init_size_rx) {
6892158b
MT
66864+ module_free_exec(mod, mod->module_core_rx);
66865+ module_free(mod, mod->module_init_rw);
66866+ module_free(mod, mod->module_core_rw);
16454cff
MT
66867+ return -ENOMEM;
66868+ }
58c5fc13 66869+
ae4e228f 66870+ pax_open_kernel();
58c5fc13 66871+ memset(ptr, 0, mod->init_size_rx);
ae4e228f 66872+ pax_close_kernel();
58c5fc13
MT
66873+ mod->module_init_rx = ptr;
66874
66875 /* Transfer each section which specifies SHF_ALLOC */
66876 DEBUGP("final section addresses:\n");
fe2de317 66877@@ -2626,16 +2696,45 @@ static int move_module(struct module *mod, struct load_info *info)
6892158b 66878 if (!(shdr->sh_flags & SHF_ALLOC))
58c5fc13
MT
66879 continue;
66880
6892158b 66881- if (shdr->sh_entsize & INIT_OFFSET_MASK)
58c5fc13 66882- dest = mod->module_init
6892158b 66883- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
58c5fc13 66884- else
6892158b
MT
66885- dest = mod->module_core + shdr->sh_entsize;
66886+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
66887+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
58c5fc13 66888+ dest = mod->module_init_rw
6892158b 66889+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
58c5fc13
MT
66890+ else
66891+ dest = mod->module_init_rx
6892158b 66892+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
58c5fc13 66893+ } else {
6892158b
MT
66894+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
66895+ dest = mod->module_core_rw + shdr->sh_entsize;
58c5fc13 66896+ else
6892158b 66897+ dest = mod->module_core_rx + shdr->sh_entsize;
58c5fc13
MT
66898+ }
66899+
6892158b
MT
66900+ if (shdr->sh_type != SHT_NOBITS) {
66901+
58c5fc13 66902+#ifdef CONFIG_PAX_KERNEXEC
bc901d79
MT
66903+#ifdef CONFIG_X86_64
66904+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
66905+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
66906+#endif
6892158b 66907+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
ae4e228f 66908+ pax_open_kernel();
6892158b 66909+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
ae4e228f 66910+ pax_close_kernel();
58c5fc13
MT
66911+ } else
66912+#endif
6892158b
MT
66913
66914- if (shdr->sh_type != SHT_NOBITS)
66915 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
58c5fc13
MT
66916+ }
66917 /* Update sh_addr to point to copy in image. */
6892158b 66918- shdr->sh_addr = (unsigned long)dest;
58c5fc13
MT
66919+
66920+#ifdef CONFIG_PAX_KERNEXEC
6892158b
MT
66921+ if (shdr->sh_flags & SHF_EXECINSTR)
66922+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
58c5fc13
MT
66923+ else
66924+#endif
66925+
6892158b
MT
66926+ shdr->sh_addr = (unsigned long)dest;
66927 DEBUGP("\t0x%lx %s\n",
66928 shdr->sh_addr, info->secstrings + shdr->sh_name);
58c5fc13 66929 }
fe2de317 66930@@ -2686,12 +2785,12 @@ static void flush_module_icache(const struct module *mod)
58c5fc13
MT
66931 * Do it before processing of module parameters, so the module
66932 * can provide parameter accessor functions of its own.
66933 */
66934- if (mod->module_init)
66935- flush_icache_range((unsigned long)mod->module_init,
66936- (unsigned long)mod->module_init
66937- + mod->init_size);
66938- flush_icache_range((unsigned long)mod->module_core,
66939- (unsigned long)mod->module_core + mod->core_size);
66940+ if (mod->module_init_rx)
66941+ flush_icache_range((unsigned long)mod->module_init_rx,
66942+ (unsigned long)mod->module_init_rx
66943+ + mod->init_size_rx);
66944+ flush_icache_range((unsigned long)mod->module_core_rx,
66945+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
66946
66947 set_fs(old_fs);
6892158b 66948 }
fe2de317 66949@@ -2771,8 +2870,10 @@ static void module_deallocate(struct module *mod, struct load_info *info)
6892158b
MT
66950 {
66951 kfree(info->strmap);
66952 percpu_modfree(mod);
58c5fc13 66953- module_free(mod, mod->module_init);
58c5fc13
MT
66954- module_free(mod, mod->module_core);
66955+ module_free_exec(mod, mod->module_init_rx);
58c5fc13 66956+ module_free_exec(mod, mod->module_core_rx);
58c5fc13 66957+ module_free(mod, mod->module_init_rw);
58c5fc13 66958+ module_free(mod, mod->module_core_rw);
6892158b
MT
66959 }
66960
6e9df6a3 66961 int __weak module_finalize(const Elf_Ehdr *hdr,
fe2de317 66962@@ -2836,9 +2937,38 @@ static struct module *load_module(void __user *umod,
71d190be
MT
66963 if (err)
66964 goto free_unload;
66965
66966+ /* Now copy in args */
66967+ mod->args = strndup_user(uargs, ~0UL >> 1);
66968+ if (IS_ERR(mod->args)) {
66969+ err = PTR_ERR(mod->args);
66970+ goto free_unload;
66971+ }
66972+
66973 /* Set up MODINFO_ATTR fields */
66974 setup_modinfo(mod, &info);
66975
66976+#ifdef CONFIG_GRKERNSEC_MODHARDEN
66977+ {
66978+ char *p, *p2;
66979+
66980+ if (strstr(mod->args, "grsec_modharden_netdev")) {
66981+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
66982+ err = -EPERM;
66983+ goto free_modinfo;
66984+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
66985+ p += strlen("grsec_modharden_normal");
66986+ p2 = strstr(p, "_");
66987+ if (p2) {
66988+ *p2 = '\0';
66989+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
66990+ *p2 = '_';
66991+ }
66992+ err = -EPERM;
66993+ goto free_modinfo;
66994+ }
66995+ }
66996+#endif
66997+
66998 /* Fix up syms, so that st_value is a pointer to location. */
66999 err = simplify_symbols(mod, &info);
67000 if (err < 0)
fe2de317 67001@@ -2854,13 +2984,6 @@ static struct module *load_module(void __user *umod,
71d190be
MT
67002
67003 flush_module_icache(mod);
67004
67005- /* Now copy in args */
67006- mod->args = strndup_user(uargs, ~0UL >> 1);
67007- if (IS_ERR(mod->args)) {
67008- err = PTR_ERR(mod->args);
67009- goto free_arch_cleanup;
67010- }
67011-
67012 /* Mark state as coming so strong_try_module_get() ignores us. */
67013 mod->state = MODULE_STATE_COMING;
67014
fe2de317 67015@@ -2920,11 +3043,10 @@ static struct module *load_module(void __user *umod,
71d190be
MT
67016 unlock:
67017 mutex_unlock(&module_mutex);
67018 synchronize_sched();
67019- kfree(mod->args);
67020- free_arch_cleanup:
67021 module_arch_cleanup(mod);
67022 free_modinfo:
67023 free_modinfo(mod);
67024+ kfree(mod->args);
67025 free_unload:
67026 module_unload_free(mod);
67027 free_module:
fe2de317 67028@@ -2965,16 +3087,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
16454cff
MT
67029 MODULE_STATE_COMING, mod);
67030
67031 /* Set RO and NX regions for core */
67032- set_section_ro_nx(mod->module_core,
67033- mod->core_text_size,
67034- mod->core_ro_size,
67035- mod->core_size);
67036+ set_section_ro_nx(mod->module_core_rx,
67037+ mod->core_size_rx,
67038+ mod->core_size_rx,
67039+ mod->core_size_rx);
67040
67041 /* Set RO and NX regions for init */
67042- set_section_ro_nx(mod->module_init,
67043- mod->init_text_size,
67044- mod->init_ro_size,
67045- mod->init_size);
67046+ set_section_ro_nx(mod->module_init_rx,
67047+ mod->init_size_rx,
67048+ mod->init_size_rx,
67049+ mod->init_size_rx);
67050
67051 do_mod_ctors(mod);
67052 /* Start the module */
fe2de317 67053@@ -3020,11 +3142,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
ae4e228f
MT
67054 mod->strtab = mod->core_strtab;
67055 #endif
15a11c5b 67056 unset_module_init_ro_nx(mod);
58c5fc13
MT
67057- module_free(mod, mod->module_init);
67058- mod->module_init = NULL;
67059- mod->init_size = 0;
15a11c5b 67060- mod->init_ro_size = 0;
58c5fc13
MT
67061- mod->init_text_size = 0;
67062+ module_free(mod, mod->module_init_rw);
67063+ module_free_exec(mod, mod->module_init_rx);
67064+ mod->module_init_rw = NULL;
67065+ mod->module_init_rx = NULL;
67066+ mod->init_size_rw = 0;
67067+ mod->init_size_rx = 0;
67068 mutex_unlock(&module_mutex);
67069
67070 return 0;
fe2de317 67071@@ -3055,10 +3178,16 @@ static const char *get_ksymbol(struct module *mod,
58c5fc13
MT
67072 unsigned long nextval;
67073
67074 /* At worse, next value is at end of module */
67075- if (within_module_init(addr, mod))
67076- nextval = (unsigned long)mod->module_init+mod->init_text_size;
67077+ if (within_module_init_rx(addr, mod))
67078+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
67079+ else if (within_module_init_rw(addr, mod))
67080+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
67081+ else if (within_module_core_rx(addr, mod))
67082+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
67083+ else if (within_module_core_rw(addr, mod))
67084+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
67085 else
67086- nextval = (unsigned long)mod->module_core+mod->core_text_size;
67087+ return NULL;
67088
66a7e928 67089 /* Scan for closest preceding symbol, and next symbol. (ELF
58c5fc13 67090 starts real symbols at 1). */
fe2de317 67091@@ -3304,7 +3433,7 @@ static int m_show(struct seq_file *m, void *p)
58c5fc13
MT
67092 char buf[8];
67093
67094 seq_printf(m, "%s %u",
67095- mod->name, mod->init_size + mod->core_size);
67096+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
67097 print_unload_info(m, mod);
67098
67099 /* Informative for users. */
fe2de317 67100@@ -3313,7 +3442,7 @@ static int m_show(struct seq_file *m, void *p)
58c5fc13
MT
67101 mod->state == MODULE_STATE_COMING ? "Loading":
67102 "Live");
67103 /* Used by oprofile and other similar tools. */
66a7e928
MT
67104- seq_printf(m, " 0x%pK", mod->module_core);
67105+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
58c5fc13
MT
67106
67107 /* Taints info */
67108 if (mod->taints)
fe2de317 67109@@ -3349,7 +3478,17 @@ static const struct file_operations proc_modules_operations = {
58c5fc13
MT
67110
67111 static int __init proc_modules_init(void)
67112 {
67113+#ifndef CONFIG_GRKERNSEC_HIDESYM
67114+#ifdef CONFIG_GRKERNSEC_PROC_USER
67115+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
67116+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67117+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
67118+#else
67119 proc_create("modules", 0, NULL, &proc_modules_operations);
67120+#endif
67121+#else
67122+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
67123+#endif
67124 return 0;
67125 }
67126 module_init(proc_modules_init);
fe2de317 67127@@ -3408,12 +3547,12 @@ struct module *__module_address(unsigned long addr)
58c5fc13
MT
67128 {
67129 struct module *mod;
67130
67131- if (addr < module_addr_min || addr > module_addr_max)
67132+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
67133+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
67134 return NULL;
67135
67136 list_for_each_entry_rcu(mod, &modules, list)
67137- if (within_module_core(addr, mod)
67138- || within_module_init(addr, mod))
67139+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
67140 return mod;
67141 return NULL;
67142 }
fe2de317 67143@@ -3447,11 +3586,20 @@ bool is_module_text_address(unsigned long addr)
58c5fc13
MT
67144 */
67145 struct module *__module_text_address(unsigned long addr)
67146 {
67147- struct module *mod = __module_address(addr);
67148+ struct module *mod;
67149+
67150+#ifdef CONFIG_X86_32
67151+ addr = ktla_ktva(addr);
67152+#endif
67153+
67154+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
67155+ return NULL;
67156+
67157+ mod = __module_address(addr);
67158+
67159 if (mod) {
67160 /* Make sure it's within the text section. */
67161- if (!within(addr, mod->module_init, mod->init_text_size)
67162- && !within(addr, mod->module_core, mod->core_text_size))
67163+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
67164 mod = NULL;
67165 }
67166 return mod;
fe2de317
MT
67167diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
67168index 73da83a..fe46e99 100644
67169--- a/kernel/mutex-debug.c
67170+++ b/kernel/mutex-debug.c
67171@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
71d190be
MT
67172 }
67173
67174 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
67175- struct thread_info *ti)
67176+ struct task_struct *task)
67177 {
67178 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
67179
67180 /* Mark the current thread as blocked on the lock: */
67181- ti->task->blocked_on = waiter;
67182+ task->blocked_on = waiter;
67183 }
67184
67185 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
67186- struct thread_info *ti)
67187+ struct task_struct *task)
67188 {
67189 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
67190- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
67191- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
67192- ti->task->blocked_on = NULL;
67193+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
66a7e928 67194+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
71d190be
MT
67195+ task->blocked_on = NULL;
67196
67197 list_del_init(&waiter->list);
67198 waiter->task = NULL;
fe2de317
MT
67199diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
67200index 0799fd3..d06ae3b 100644
67201--- a/kernel/mutex-debug.h
67202+++ b/kernel/mutex-debug.h
67203@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
71d190be
MT
67204 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
67205 extern void debug_mutex_add_waiter(struct mutex *lock,
67206 struct mutex_waiter *waiter,
67207- struct thread_info *ti);
67208+ struct task_struct *task);
67209 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
67210- struct thread_info *ti);
67211+ struct task_struct *task);
67212 extern void debug_mutex_unlock(struct mutex *lock);
67213 extern void debug_mutex_init(struct mutex *lock, const char *name,
67214 struct lock_class_key *key);
fe2de317
MT
67215diff --git a/kernel/mutex.c b/kernel/mutex.c
67216index d607ed5..58d0a52 100644
67217--- a/kernel/mutex.c
67218+++ b/kernel/mutex.c
67219@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
67220 spin_lock_mutex(&lock->wait_lock, flags);
67221
67222 debug_mutex_lock_common(lock, &waiter);
67223- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
67224+ debug_mutex_add_waiter(lock, &waiter, task);
67225
67226 /* add waiting tasks to the end of the waitqueue (FIFO): */
67227 list_add_tail(&waiter.list, &lock->wait_list);
67228@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
67229 * TASK_UNINTERRUPTIBLE case.)
67230 */
67231 if (unlikely(signal_pending_state(state, task))) {
67232- mutex_remove_waiter(lock, &waiter,
67233- task_thread_info(task));
67234+ mutex_remove_waiter(lock, &waiter, task);
67235 mutex_release(&lock->dep_map, 1, ip);
67236 spin_unlock_mutex(&lock->wait_lock, flags);
67237
67238@@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
67239 done:
67240 lock_acquired(&lock->dep_map, ip);
67241 /* got the lock - rejoice! */
67242- mutex_remove_waiter(lock, &waiter, current_thread_info());
67243+ mutex_remove_waiter(lock, &waiter, task);
67244 mutex_set_owner(lock);
67245
67246 /* set it to 0 if there are no waiters left: */
67247diff --git a/kernel/padata.c b/kernel/padata.c
67248index b91941d..0871d60 100644
67249--- a/kernel/padata.c
67250+++ b/kernel/padata.c
67251@@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_instance *pinst,
8308f9c9
MT
67252 padata->pd = pd;
67253 padata->cb_cpu = cb_cpu;
67254
67255- if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
67256- atomic_set(&pd->seq_nr, -1);
67257+ if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
67258+ atomic_set_unchecked(&pd->seq_nr, -1);
67259
67260- padata->seq_nr = atomic_inc_return(&pd->seq_nr);
67261+ padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
67262
67263 target_cpu = padata_cpu_hash(padata);
67264 queue = per_cpu_ptr(pd->pqueue, target_cpu);
fe2de317 67265@@ -444,7 +444,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
8308f9c9
MT
67266 padata_init_pqueues(pd);
67267 padata_init_squeues(pd);
67268 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
67269- atomic_set(&pd->seq_nr, -1);
67270+ atomic_set_unchecked(&pd->seq_nr, -1);
67271 atomic_set(&pd->reorder_objects, 0);
67272 atomic_set(&pd->refcnt, 0);
67273 pd->pinst = pinst;
fe2de317
MT
67274diff --git a/kernel/panic.c b/kernel/panic.c
67275index d7bb697..9ef9f19 100644
67276--- a/kernel/panic.c
67277+++ b/kernel/panic.c
67278@@ -371,7 +371,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
bc901d79
MT
67279 const char *board;
67280
67281 printk(KERN_WARNING "------------[ cut here ]------------\n");
67282- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
67283+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
67284 board = dmi_get_system_info(DMI_PRODUCT_NAME);
67285 if (board)
67286 printk(KERN_WARNING "Hardware name: %s\n", board);
6e9df6a3 67287@@ -426,7 +426,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
58c5fc13
MT
67288 */
67289 void __stack_chk_fail(void)
67290 {
67291- panic("stack-protector: Kernel stack is corrupted in: %p\n",
67292+ dump_stack();
bc901d79 67293+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
58c5fc13
MT
67294 __builtin_return_address(0));
67295 }
67296 EXPORT_SYMBOL(__stack_chk_fail);
fe2de317
MT
67297diff --git a/kernel/pid.c b/kernel/pid.c
67298index e432057..a2b2ac5 100644
67299--- a/kernel/pid.c
67300+++ b/kernel/pid.c
58c5fc13
MT
67301@@ -33,6 +33,7 @@
67302 #include <linux/rculist.h>
67303 #include <linux/bootmem.h>
67304 #include <linux/hash.h>
67305+#include <linux/security.h>
67306 #include <linux/pid_namespace.h>
67307 #include <linux/init_task.h>
67308 #include <linux/syscalls.h>
fe2de317 67309@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
58c5fc13
MT
67310
67311 int pid_max = PID_MAX_DEFAULT;
67312
67313-#define RESERVED_PIDS 300
67314+#define RESERVED_PIDS 500
67315
67316 int pid_max_min = RESERVED_PIDS + 1;
67317 int pid_max_max = PID_MAX_LIMIT;
6e9df6a3 67318@@ -418,8 +419,15 @@ EXPORT_SYMBOL(pid_task);
58c5fc13
MT
67319 */
67320 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
67321 {
58c5fc13 67322+ struct task_struct *task;
bc901d79
MT
67323+
67324 rcu_lockdep_assert(rcu_read_lock_held());
67325- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
58c5fc13
MT
67326+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
67327+
67328+ if (gr_pid_is_chrooted(task))
67329+ return NULL;
67330+
67331+ return task;
67332 }
67333
67334 struct task_struct *find_task_by_vpid(pid_t vnr)
fe2de317 67335@@ -427,6 +435,12 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
15a11c5b
MT
67336 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
67337 }
67338
67339+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
67340+{
67341+ rcu_lockdep_assert(rcu_read_lock_held());
67342+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
67343+}
67344+
67345 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
67346 {
67347 struct pid *pid;
fe2de317
MT
67348diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
67349index 640ded8..3dafb85 100644
67350--- a/kernel/posix-cpu-timers.c
67351+++ b/kernel/posix-cpu-timers.c
58c5fc13
MT
67352@@ -6,6 +6,7 @@
67353 #include <linux/posix-timers.h>
67354 #include <linux/errno.h>
67355 #include <linux/math64.h>
67356+#include <linux/security.h>
67357 #include <asm/uaccess.h>
67358 #include <linux/kernel_stat.h>
ae4e228f 67359 #include <trace/events/timer.h>
6e9df6a3 67360@@ -1606,14 +1607,14 @@ struct k_clock clock_posix_cpu = {
66a7e928
MT
67361
67362 static __init int init_posix_cpu_timers(void)
67363 {
67364- struct k_clock process = {
15a11c5b 67365+ static struct k_clock process = {
66a7e928
MT
67366 .clock_getres = process_cpu_clock_getres,
67367 .clock_get = process_cpu_clock_get,
67368 .timer_create = process_cpu_timer_create,
67369 .nsleep = process_cpu_nsleep,
67370 .nsleep_restart = process_cpu_nsleep_restart,
67371 };
67372- struct k_clock thread = {
15a11c5b 67373+ static struct k_clock thread = {
66a7e928
MT
67374 .clock_getres = thread_cpu_clock_getres,
67375 .clock_get = thread_cpu_clock_get,
67376 .timer_create = thread_cpu_timer_create,
fe2de317
MT
67377diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
67378index 4556182..9335419 100644
67379--- a/kernel/posix-timers.c
67380+++ b/kernel/posix-timers.c
66a7e928 67381@@ -43,6 +43,7 @@
bc901d79 67382 #include <linux/idr.h>
66a7e928 67383 #include <linux/posix-clock.h>
bc901d79
MT
67384 #include <linux/posix-timers.h>
67385+#include <linux/grsecurity.h>
67386 #include <linux/syscalls.h>
67387 #include <linux/wait.h>
67388 #include <linux/workqueue.h>
15a11c5b
MT
67389@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
67390 * which we beg off on and pass to do_sys_settimeofday().
67391 */
67392
67393-static struct k_clock posix_clocks[MAX_CLOCKS];
67394+static struct k_clock *posix_clocks[MAX_CLOCKS];
67395
67396 /*
67397 * These ones are defined below.
fe2de317 67398@@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
66a7e928
MT
67399 */
67400 static __init int init_posix_timers(void)
67401 {
67402- struct k_clock clock_realtime = {
15a11c5b 67403+ static struct k_clock clock_realtime = {
66a7e928
MT
67404 .clock_getres = hrtimer_get_res,
67405 .clock_get = posix_clock_realtime_get,
67406 .clock_set = posix_clock_realtime_set,
fe2de317 67407@@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
66a7e928
MT
67408 .timer_get = common_timer_get,
67409 .timer_del = common_timer_del,
67410 };
67411- struct k_clock clock_monotonic = {
15a11c5b 67412+ static struct k_clock clock_monotonic = {
66a7e928
MT
67413 .clock_getres = hrtimer_get_res,
67414 .clock_get = posix_ktime_get_ts,
67415 .nsleep = common_nsleep,
fe2de317 67416@@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
66a7e928
MT
67417 .timer_get = common_timer_get,
67418 .timer_del = common_timer_del,
67419 };
67420- struct k_clock clock_monotonic_raw = {
15a11c5b 67421+ static struct k_clock clock_monotonic_raw = {
66a7e928
MT
67422 .clock_getres = hrtimer_get_res,
67423 .clock_get = posix_get_monotonic_raw,
67424 };
67425- struct k_clock clock_realtime_coarse = {
15a11c5b 67426+ static struct k_clock clock_realtime_coarse = {
66a7e928
MT
67427 .clock_getres = posix_get_coarse_res,
67428 .clock_get = posix_get_realtime_coarse,
67429 };
67430- struct k_clock clock_monotonic_coarse = {
15a11c5b 67431+ static struct k_clock clock_monotonic_coarse = {
66a7e928
MT
67432 .clock_getres = posix_get_coarse_res,
67433 .clock_get = posix_get_monotonic_coarse,
67434 };
67435- struct k_clock clock_boottime = {
15a11c5b 67436+ static struct k_clock clock_boottime = {
66a7e928
MT
67437 .clock_getres = hrtimer_get_res,
67438 .clock_get = posix_get_boottime,
67439 .nsleep = common_nsleep,
fe2de317 67440@@ -272,6 +273,8 @@ static __init int init_posix_timers(void)
66a7e928
MT
67441 .timer_del = common_timer_del,
67442 };
67443
67444+ pax_track_stack();
67445+
67446 posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
67447 posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
67448 posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
fe2de317 67449@@ -473,7 +476,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
15a11c5b
MT
67450 return;
67451 }
66a7e928 67452
15a11c5b
MT
67453- posix_clocks[clock_id] = *new_clock;
67454+ posix_clocks[clock_id] = new_clock;
66a7e928 67455 }
15a11c5b 67456 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
66a7e928 67457
fe2de317 67458@@ -519,9 +522,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
66a7e928 67459 return (id & CLOCKFD_MASK) == CLOCKFD ?
15a11c5b 67460 &clock_posix_dynamic : &clock_posix_cpu;
66a7e928 67461
15a11c5b
MT
67462- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
67463+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
67464 return NULL;
67465- return &posix_clocks[id];
67466+ return posix_clocks[id];
67467 }
66a7e928 67468
15a11c5b 67469 static int common_timer_create(struct k_itimer *new_timer)
fe2de317 67470@@ -959,6 +962,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
bc901d79
MT
67471 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
67472 return -EFAULT;
df50ba0c 67473
bc901d79
MT
67474+ /* only the CLOCK_REALTIME clock can be set, all other clocks
67475+ have their clock_set fptr set to a nosettime dummy function
67476+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
67477+ call common_clock_set, which calls do_sys_settimeofday, which
67478+ we hook
67479+ */
67480+
66a7e928 67481 return kc->clock_set(which_clock, &new_tp);
bc901d79
MT
67482 }
67483
fe2de317
MT
67484diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
67485index d523593..68197a4 100644
67486--- a/kernel/power/poweroff.c
67487+++ b/kernel/power/poweroff.c
67488@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
58c5fc13
MT
67489 .enable_mask = SYSRQ_ENABLE_BOOT,
67490 };
67491
67492-static int pm_sysrq_init(void)
67493+static int __init pm_sysrq_init(void)
67494 {
67495 register_sysrq_key('o', &sysrq_poweroff_op);
67496 return 0;
fe2de317
MT
67497diff --git a/kernel/power/process.c b/kernel/power/process.c
67498index 0cf3a27..5481be4 100644
67499--- a/kernel/power/process.c
67500+++ b/kernel/power/process.c
67501@@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_only)
58c5fc13
MT
67502 u64 elapsed_csecs64;
67503 unsigned int elapsed_csecs;
bc901d79 67504 bool wakeup = false;
58c5fc13
MT
67505+ bool timedout = false;
67506
67507 do_gettimeofday(&start);
67508
fe2de317 67509@@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_only)
6892158b 67510
ae4e228f 67511 while (true) {
58c5fc13
MT
67512 todo = 0;
67513+ if (time_after(jiffies, end_time))
67514+ timedout = true;
67515 read_lock(&tasklist_lock);
67516 do_each_thread(g, p) {
16454cff 67517 if (frozen(p) || !freezable(p))
fe2de317 67518@@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_only)
16454cff
MT
67519 * try_to_stop() after schedule() in ptrace/signal
67520 * stop sees TIF_FREEZE.
58c5fc13
MT
67521 */
67522- if (!task_is_stopped_or_traced(p) &&
67523- !freezer_should_skip(p))
67524+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
67525 todo++;
67526+ if (timedout) {
67527+ printk(KERN_ERR "Task refusing to freeze:\n");
67528+ sched_show_task(p);
67529+ }
67530+ }
67531 } while_each_thread(g, p);
67532 read_unlock(&tasklist_lock);
6892158b 67533
fe2de317 67534@@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_only)
6892158b
MT
67535 todo += wq_busy;
67536 }
67537
ae4e228f
MT
67538- if (!todo || time_after(jiffies, end_time))
67539+ if (!todo || timedout)
67540 break;
67541
16454cff 67542 if (pm_wakeup_pending()) {
fe2de317
MT
67543diff --git a/kernel/printk.c b/kernel/printk.c
67544index 28a40d8..2411bec 100644
67545--- a/kernel/printk.c
67546+++ b/kernel/printk.c
67547@@ -313,6 +313,11 @@ static int check_syslog_permissions(int type, bool from_file)
16454cff
MT
67548 if (from_file && type != SYSLOG_ACTION_OPEN)
67549 return 0;
58c5fc13
MT
67550
67551+#ifdef CONFIG_GRKERNSEC_DMESG
16454cff 67552+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
58c5fc13
MT
67553+ return -EPERM;
67554+#endif
67555+
16454cff
MT
67556 if (syslog_action_restricted(type)) {
67557 if (capable(CAP_SYSLOG))
67558 return 0;
fe2de317
MT
67559diff --git a/kernel/profile.c b/kernel/profile.c
67560index 961b389..c451353 100644
67561--- a/kernel/profile.c
67562+++ b/kernel/profile.c
8308f9c9
MT
67563@@ -39,7 +39,7 @@ struct profile_hit {
67564 /* Oprofile timer tick hook */
67565 static int (*timer_hook)(struct pt_regs *) __read_mostly;
67566
67567-static atomic_t *prof_buffer;
67568+static atomic_unchecked_t *prof_buffer;
67569 static unsigned long prof_len, prof_shift;
67570
67571 int prof_on __read_mostly;
15a11c5b 67572@@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
8308f9c9
MT
67573 hits[i].pc = 0;
67574 continue;
67575 }
67576- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
67577+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
67578 hits[i].hits = hits[i].pc = 0;
67579 }
67580 }
fe2de317 67581@@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
8308f9c9
MT
67582 * Add the current hit(s) and flush the write-queue out
67583 * to the global buffer:
67584 */
67585- atomic_add(nr_hits, &prof_buffer[pc]);
67586+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
67587 for (i = 0; i < NR_PROFILE_HIT; ++i) {
67588- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
67589+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
67590 hits[i].pc = hits[i].hits = 0;
67591 }
67592 out:
fe2de317 67593@@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
15a11c5b
MT
67594 {
67595 unsigned long pc;
8308f9c9
MT
67596 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
67597- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
67598+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
67599 }
67600 #endif /* !CONFIG_SMP */
15a11c5b 67601
fe2de317 67602@@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
8308f9c9
MT
67603 return -EFAULT;
67604 buf++; p++; count--; read++;
67605 }
67606- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
67607+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
67608 if (copy_to_user(buf, (void *)pnt, count))
67609 return -EFAULT;
67610 read += count;
fe2de317 67611@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
8308f9c9
MT
67612 }
67613 #endif
67614 profile_discard_flip_buffers();
67615- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
67616+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
67617 return count;
67618 }
67619
fe2de317
MT
67620diff --git a/kernel/ptrace.c b/kernel/ptrace.c
67621index a70d2a5..cbd4b4f 100644
67622--- a/kernel/ptrace.c
67623+++ b/kernel/ptrace.c
67624@@ -161,7 +161,8 @@ int ptrace_check_attach(struct task_struct *child, bool ignore_state)
16454cff
MT
67625 return ret;
67626 }
67627
67628-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
67629+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
67630+ unsigned int log)
67631 {
67632 const struct cred *cred = current_cred(), *tcred;
67633
fe2de317 67634@@ -187,7 +188,8 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
66a7e928
MT
67635 cred->gid == tcred->sgid &&
67636 cred->gid == tcred->gid))
67637 goto ok;
67638- if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
67639+ if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
67640+ (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
67641 goto ok;
67642 rcu_read_unlock();
67643 return -EPERM;
6e9df6a3 67644@@ -196,7 +198,9 @@ ok:
58c5fc13
MT
67645 smp_rmb();
67646 if (task->mm)
67647 dumpable = get_dumpable(task->mm);
66a7e928 67648- if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
16454cff 67649+ if (!dumpable &&
66a7e928
MT
67650+ ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
67651+ (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
58c5fc13
MT
67652 return -EPERM;
67653
ae4e228f 67654 return security_ptrace_access_check(task, mode);
fe2de317 67655@@ -206,7 +210,16 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
16454cff
MT
67656 {
67657 int err;
67658 task_lock(task);
67659- err = __ptrace_may_access(task, mode);
67660+ err = __ptrace_may_access(task, mode, 0);
67661+ task_unlock(task);
67662+ return !err;
67663+}
67664+
67665+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
67666+{
67667+ int err;
67668+ task_lock(task);
67669+ err = __ptrace_may_access(task, mode, 1);
67670 task_unlock(task);
67671 return !err;
67672 }
fe2de317 67673@@ -251,7 +264,7 @@ static int ptrace_attach(struct task_struct *task, long request,
16454cff
MT
67674 goto out;
67675
67676 task_lock(task);
67677- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
67678+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
67679 task_unlock(task);
67680 if (retval)
67681 goto unlock_creds;
fe2de317 67682@@ -266,7 +279,7 @@ static int ptrace_attach(struct task_struct *task, long request,
58c5fc13 67683 task->ptrace = PT_PTRACED;
6e9df6a3
MT
67684 if (seize)
67685 task->ptrace |= PT_SEIZED;
66a7e928
MT
67686- if (task_ns_capable(task, CAP_SYS_PTRACE))
67687+ if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
58c5fc13
MT
67688 task->ptrace |= PT_PTRACE_CAP;
67689
67690 __ptrace_link(task, current);
fe2de317 67691@@ -461,6 +474,8 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
66a7e928
MT
67692 {
67693 int copied = 0;
67694
67695+ pax_track_stack();
67696+
67697 while (len > 0) {
67698 char buf[128];
67699 int this_len, retval;
fe2de317 67700@@ -472,7 +487,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
ae4e228f
MT
67701 break;
67702 return -EIO;
67703 }
67704- if (copy_to_user(dst, buf, retval))
67705+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
67706 return -EFAULT;
67707 copied += retval;
67708 src += retval;
fe2de317 67709@@ -486,6 +501,8 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds
66a7e928
MT
67710 {
67711 int copied = 0;
67712
67713+ pax_track_stack();
67714+
67715 while (len > 0) {
67716 char buf[128];
67717 int this_len, retval;
fe2de317 67718@@ -669,10 +686,12 @@ int ptrace_request(struct task_struct *child, long request,
6e9df6a3 67719 bool seized = child->ptrace & PT_SEIZED;
bc901d79 67720 int ret = -EIO;
6e9df6a3 67721 siginfo_t siginfo, *si;
bc901d79
MT
67722- void __user *datavp = (void __user *) data;
67723+ void __user *datavp = (__force void __user *) data;
67724 unsigned long __user *datalp = datavp;
6e9df6a3 67725 unsigned long flags;
ae4e228f 67726
66a7e928
MT
67727+ pax_track_stack();
67728+
bc901d79 67729 switch (request) {
66a7e928
MT
67730 case PTRACE_PEEKTEXT:
67731 case PTRACE_PEEKDATA:
fe2de317 67732@@ -871,14 +890,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
ae4e228f
MT
67733 goto out;
67734 }
58c5fc13
MT
67735
67736+ if (gr_handle_ptrace(child, request)) {
67737+ ret = -EPERM;
67738+ goto out_put_task_struct;
67739+ }
67740+
6e9df6a3
MT
67741 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
67742 ret = ptrace_attach(child, request, data);
ae4e228f
MT
67743 /*
67744 * Some architectures need to do book-keeping after
67745 * a ptrace attach.
67746 */
67747- if (!ret)
67748+ if (!ret) {
67749 arch_ptrace_attach(child);
67750+ gr_audit_ptrace(child);
67751+ }
67752 goto out_put_task_struct;
67753 }
58c5fc13 67754
fe2de317 67755@@ -904,7 +930,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
ae4e228f
MT
67756 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
67757 if (copied != sizeof(tmp))
67758 return -EIO;
67759- return put_user(tmp, (unsigned long __user *)data);
67760+ return put_user(tmp, (__force unsigned long __user *)data);
58c5fc13
MT
67761 }
67762
bc901d79 67763 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
fe2de317 67764@@ -927,6 +953,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
66a7e928
MT
67765 siginfo_t siginfo;
67766 int ret;
67767
67768+ pax_track_stack();
67769+
67770 switch (request) {
67771 case PTRACE_PEEKTEXT:
67772 case PTRACE_PEEKDATA:
fe2de317 67773@@ -1014,14 +1042,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
bc901d79
MT
67774 goto out;
67775 }
67776
67777+ if (gr_handle_ptrace(child, request)) {
67778+ ret = -EPERM;
67779+ goto out_put_task_struct;
67780+ }
67781+
6e9df6a3
MT
67782 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
67783 ret = ptrace_attach(child, request, data);
bc901d79
MT
67784 /*
67785 * Some architectures need to do book-keeping after
67786 * a ptrace attach.
67787 */
67788- if (!ret)
67789+ if (!ret) {
67790 arch_ptrace_attach(child);
67791+ gr_audit_ptrace(child);
67792+ }
67793 goto out_put_task_struct;
67794 }
67795
fe2de317
MT
67796diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
67797index 98f51b1..30b950c 100644
67798--- a/kernel/rcutorture.c
67799+++ b/kernel/rcutorture.c
67800@@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
8308f9c9
MT
67801 { 0 };
67802 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
67803 { 0 };
67804-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
67805-static atomic_t n_rcu_torture_alloc;
67806-static atomic_t n_rcu_torture_alloc_fail;
67807-static atomic_t n_rcu_torture_free;
67808-static atomic_t n_rcu_torture_mberror;
67809-static atomic_t n_rcu_torture_error;
67810+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
67811+static atomic_unchecked_t n_rcu_torture_alloc;
67812+static atomic_unchecked_t n_rcu_torture_alloc_fail;
67813+static atomic_unchecked_t n_rcu_torture_free;
67814+static atomic_unchecked_t n_rcu_torture_mberror;
67815+static atomic_unchecked_t n_rcu_torture_error;
67816 static long n_rcu_torture_boost_ktrerror;
67817 static long n_rcu_torture_boost_rterror;
15a11c5b
MT
67818 static long n_rcu_torture_boost_failure;
67819@@ -223,11 +223,11 @@ rcu_torture_alloc(void)
8308f9c9
MT
67820
67821 spin_lock_bh(&rcu_torture_lock);
67822 if (list_empty(&rcu_torture_freelist)) {
67823- atomic_inc(&n_rcu_torture_alloc_fail);
67824+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
67825 spin_unlock_bh(&rcu_torture_lock);
67826 return NULL;
67827 }
67828- atomic_inc(&n_rcu_torture_alloc);
67829+ atomic_inc_unchecked(&n_rcu_torture_alloc);
67830 p = rcu_torture_freelist.next;
67831 list_del_init(p);
67832 spin_unlock_bh(&rcu_torture_lock);
15a11c5b 67833@@ -240,7 +240,7 @@ rcu_torture_alloc(void)
8308f9c9
MT
67834 static void
67835 rcu_torture_free(struct rcu_torture *p)
67836 {
67837- atomic_inc(&n_rcu_torture_free);
67838+ atomic_inc_unchecked(&n_rcu_torture_free);
67839 spin_lock_bh(&rcu_torture_lock);
67840 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
67841 spin_unlock_bh(&rcu_torture_lock);
15a11c5b 67842@@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p)
8308f9c9
MT
67843 i = rp->rtort_pipe_count;
67844 if (i > RCU_TORTURE_PIPE_LEN)
67845 i = RCU_TORTURE_PIPE_LEN;
67846- atomic_inc(&rcu_torture_wcount[i]);
67847+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
67848 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
67849 rp->rtort_mbtest = 0;
67850 rcu_torture_free(rp);
fe2de317 67851@@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
8308f9c9
MT
67852 i = rp->rtort_pipe_count;
67853 if (i > RCU_TORTURE_PIPE_LEN)
67854 i = RCU_TORTURE_PIPE_LEN;
67855- atomic_inc(&rcu_torture_wcount[i]);
67856+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
67857 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
67858 rp->rtort_mbtest = 0;
67859 list_del(&rp->rtort_free);
66a7e928 67860@@ -882,7 +882,7 @@ rcu_torture_writer(void *arg)
8308f9c9
MT
67861 i = old_rp->rtort_pipe_count;
67862 if (i > RCU_TORTURE_PIPE_LEN)
67863 i = RCU_TORTURE_PIPE_LEN;
67864- atomic_inc(&rcu_torture_wcount[i]);
67865+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
67866 old_rp->rtort_pipe_count++;
67867 cur_ops->deferred_free(old_rp);
67868 }
fe2de317 67869@@ -950,7 +950,7 @@ static void rcu_torture_timer(unsigned long unused)
8308f9c9
MT
67870 return;
67871 }
67872 if (p->rtort_mbtest == 0)
67873- atomic_inc(&n_rcu_torture_mberror);
67874+ atomic_inc_unchecked(&n_rcu_torture_mberror);
67875 spin_lock(&rand_lock);
67876 cur_ops->read_delay(&rand);
67877 n_rcu_torture_timers++;
6e9df6a3 67878@@ -1011,7 +1011,7 @@ rcu_torture_reader(void *arg)
8308f9c9
MT
67879 continue;
67880 }
67881 if (p->rtort_mbtest == 0)
67882- atomic_inc(&n_rcu_torture_mberror);
67883+ atomic_inc_unchecked(&n_rcu_torture_mberror);
67884 cur_ops->read_delay(&rand);
67885 preempt_disable();
67886 pipe_count = p->rtort_pipe_count;
6e9df6a3 67887@@ -1070,16 +1070,16 @@ rcu_torture_printk(char *page)
8308f9c9
MT
67888 rcu_torture_current,
67889 rcu_torture_current_version,
67890 list_empty(&rcu_torture_freelist),
67891- atomic_read(&n_rcu_torture_alloc),
67892- atomic_read(&n_rcu_torture_alloc_fail),
67893- atomic_read(&n_rcu_torture_free),
67894- atomic_read(&n_rcu_torture_mberror),
67895+ atomic_read_unchecked(&n_rcu_torture_alloc),
67896+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
67897+ atomic_read_unchecked(&n_rcu_torture_free),
67898+ atomic_read_unchecked(&n_rcu_torture_mberror),
67899 n_rcu_torture_boost_ktrerror,
67900 n_rcu_torture_boost_rterror,
8308f9c9
MT
67901 n_rcu_torture_boost_failure,
67902 n_rcu_torture_boosts,
67903 n_rcu_torture_timers);
67904- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
67905+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
67906 n_rcu_torture_boost_ktrerror != 0 ||
67907 n_rcu_torture_boost_rterror != 0 ||
15a11c5b 67908 n_rcu_torture_boost_failure != 0)
6e9df6a3 67909@@ -1087,7 +1087,7 @@ rcu_torture_printk(char *page)
8308f9c9
MT
67910 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
67911 if (i > 1) {
67912 cnt += sprintf(&page[cnt], "!!! ");
67913- atomic_inc(&n_rcu_torture_error);
67914+ atomic_inc_unchecked(&n_rcu_torture_error);
67915 WARN_ON_ONCE(1);
67916 }
67917 cnt += sprintf(&page[cnt], "Reader Pipe: ");
6e9df6a3 67918@@ -1101,7 +1101,7 @@ rcu_torture_printk(char *page)
8308f9c9
MT
67919 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
67920 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
67921 cnt += sprintf(&page[cnt], " %d",
67922- atomic_read(&rcu_torture_wcount[i]));
67923+ atomic_read_unchecked(&rcu_torture_wcount[i]));
67924 }
67925 cnt += sprintf(&page[cnt], "\n");
67926 if (cur_ops->stats)
6e9df6a3 67927@@ -1410,7 +1410,7 @@ rcu_torture_cleanup(void)
8308f9c9
MT
67928
67929 if (cur_ops->cleanup)
67930 cur_ops->cleanup();
67931- if (atomic_read(&n_rcu_torture_error))
67932+ if (atomic_read_unchecked(&n_rcu_torture_error))
67933 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
67934 else
67935 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
6e9df6a3 67936@@ -1474,17 +1474,17 @@ rcu_torture_init(void)
8308f9c9
MT
67937
67938 rcu_torture_current = NULL;
67939 rcu_torture_current_version = 0;
67940- atomic_set(&n_rcu_torture_alloc, 0);
67941- atomic_set(&n_rcu_torture_alloc_fail, 0);
67942- atomic_set(&n_rcu_torture_free, 0);
67943- atomic_set(&n_rcu_torture_mberror, 0);
67944- atomic_set(&n_rcu_torture_error, 0);
67945+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
67946+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
67947+ atomic_set_unchecked(&n_rcu_torture_free, 0);
67948+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
67949+ atomic_set_unchecked(&n_rcu_torture_error, 0);
67950 n_rcu_torture_boost_ktrerror = 0;
67951 n_rcu_torture_boost_rterror = 0;
8308f9c9
MT
67952 n_rcu_torture_boost_failure = 0;
67953 n_rcu_torture_boosts = 0;
67954 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
67955- atomic_set(&rcu_torture_wcount[i], 0);
67956+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
67957 for_each_possible_cpu(cpu) {
67958 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
67959 per_cpu(rcu_torture_count, cpu)[i] = 0;
fe2de317
MT
67960diff --git a/kernel/rcutree.c b/kernel/rcutree.c
67961index ba06207..85d8ba8 100644
67962--- a/kernel/rcutree.c
67963+++ b/kernel/rcutree.c
15a11c5b
MT
67964@@ -356,9 +356,9 @@ void rcu_enter_nohz(void)
67965 }
67966 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
67967 smp_mb__before_atomic_inc(); /* See above. */
67968- atomic_inc(&rdtp->dynticks);
67969+ atomic_inc_unchecked(&rdtp->dynticks);
67970 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
67971- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
67972+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
67973 local_irq_restore(flags);
67974
67975 /* If the interrupt queued a callback, get out of dyntick mode. */
67976@@ -387,10 +387,10 @@ void rcu_exit_nohz(void)
67977 return;
67978 }
67979 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
67980- atomic_inc(&rdtp->dynticks);
67981+ atomic_inc_unchecked(&rdtp->dynticks);
67982 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
67983 smp_mb__after_atomic_inc(); /* See above. */
67984- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
67985+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
67986 local_irq_restore(flags);
67987 }
67988
67989@@ -406,14 +406,14 @@ void rcu_nmi_enter(void)
67990 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
67991
67992 if (rdtp->dynticks_nmi_nesting == 0 &&
67993- (atomic_read(&rdtp->dynticks) & 0x1))
67994+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
67995 return;
67996 rdtp->dynticks_nmi_nesting++;
67997 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
67998- atomic_inc(&rdtp->dynticks);
67999+ atomic_inc_unchecked(&rdtp->dynticks);
68000 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
68001 smp_mb__after_atomic_inc(); /* See above. */
68002- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
68003+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
68004 }
68005
68006 /**
68007@@ -432,9 +432,9 @@ void rcu_nmi_exit(void)
68008 return;
68009 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
68010 smp_mb__before_atomic_inc(); /* See above. */
68011- atomic_inc(&rdtp->dynticks);
68012+ atomic_inc_unchecked(&rdtp->dynticks);
68013 smp_mb__after_atomic_inc(); /* Force delay to next write. */
68014- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
68015+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
68016 }
68017
68018 /**
68019@@ -469,7 +469,7 @@ void rcu_irq_exit(void)
68020 */
68021 static int dyntick_save_progress_counter(struct rcu_data *rdp)
68022 {
68023- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
68024+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
68025 return 0;
68026 }
68027
fe2de317 68028@@ -484,7 +484,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
15a11c5b
MT
68029 unsigned long curr;
68030 unsigned long snap;
68031
68032- curr = (unsigned long)atomic_add_return(0, &rdp->dynticks->dynticks);
68033+ curr = (unsigned long)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
68034 snap = (unsigned long)rdp->dynticks_snap;
68035
68036 /*
fe2de317 68037@@ -1470,7 +1470,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
58c5fc13 68038 /*
ae4e228f 68039 * Do softirq processing for the current CPU.
58c5fc13 68040 */
ae4e228f
MT
68041-static void rcu_process_callbacks(struct softirq_action *unused)
68042+static void rcu_process_callbacks(void)
68043 {
15a11c5b
MT
68044 __rcu_process_callbacks(&rcu_sched_state,
68045 &__get_cpu_var(rcu_sched_data));
fe2de317
MT
68046diff --git a/kernel/rcutree.h b/kernel/rcutree.h
68047index 01b2ccd..4f5d80a 100644
68048--- a/kernel/rcutree.h
68049+++ b/kernel/rcutree.h
15a11c5b
MT
68050@@ -86,7 +86,7 @@
68051 struct rcu_dynticks {
68052 int dynticks_nesting; /* Track irq/process nesting level. */
68053 int dynticks_nmi_nesting; /* Track NMI nesting level. */
68054- atomic_t dynticks; /* Even value for dynticks-idle, else odd. */
68055+ atomic_unchecked_t dynticks; /* Even value for dynticks-idle, else odd. */
68056 };
68057
68058 /* RCU's kthread states for tracing. */
fe2de317
MT
68059diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
68060index 8aafbb8..2fca109 100644
68061--- a/kernel/rcutree_plugin.h
68062+++ b/kernel/rcutree_plugin.h
15a11c5b 68063@@ -822,7 +822,7 @@ void synchronize_rcu_expedited(void)
bc901d79
MT
68064
68065 /* Clean up and exit. */
68066 smp_mb(); /* ensure expedited GP seen before counter increment. */
68067- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
68068+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
68069 unlock_mb_ret:
68070 mutex_unlock(&sync_rcu_preempt_exp_mutex);
68071 mb_ret:
fe2de317 68072@@ -1774,8 +1774,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
8308f9c9
MT
68073
68074 #else /* #ifndef CONFIG_SMP */
68075
68076-static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
68077-static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
68078+static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
68079+static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
68080
68081 static int synchronize_sched_expedited_cpu_stop(void *data)
68082 {
15a11c5b 68083@@ -1830,7 +1830,7 @@ void synchronize_sched_expedited(void)
8308f9c9
MT
68084 int firstsnap, s, snap, trycount = 0;
68085
68086 /* Note that atomic_inc_return() implies full memory barrier. */
68087- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
68088+ firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
68089 get_online_cpus();
68090
68091 /*
15a11c5b 68092@@ -1851,7 +1851,7 @@ void synchronize_sched_expedited(void)
8308f9c9
MT
68093 }
68094
68095 /* Check to see if someone else did our work for us. */
68096- s = atomic_read(&sync_sched_expedited_done);
68097+ s = atomic_read_unchecked(&sync_sched_expedited_done);
68098 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
68099 smp_mb(); /* ensure test happens before caller kfree */
68100 return;
15a11c5b 68101@@ -1866,7 +1866,7 @@ void synchronize_sched_expedited(void)
8308f9c9
MT
68102 * grace period works for us.
68103 */
68104 get_online_cpus();
68105- snap = atomic_read(&sync_sched_expedited_started) - 1;
68106+ snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
68107 smp_mb(); /* ensure read is before try_stop_cpus(). */
68108 }
68109
15a11c5b 68110@@ -1877,12 +1877,12 @@ void synchronize_sched_expedited(void)
8308f9c9
MT
68111 * than we did beat us to the punch.
68112 */
68113 do {
68114- s = atomic_read(&sync_sched_expedited_done);
68115+ s = atomic_read_unchecked(&sync_sched_expedited_done);
68116 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
68117 smp_mb(); /* ensure test happens before caller kfree */
68118 break;
68119 }
68120- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
68121+ } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
68122
68123 put_online_cpus();
68124 }
fe2de317
MT
68125@@ -1953,7 +1953,7 @@ int rcu_needs_cpu(int cpu)
68126 for_each_online_cpu(thatcpu) {
68127 if (thatcpu == cpu)
68128 continue;
68129- snap = atomic_add_return(0, &per_cpu(rcu_dynticks,
68130+ snap = atomic_add_return_unchecked(0, &per_cpu(rcu_dynticks,
68131 thatcpu).dynticks);
68132 smp_mb(); /* Order sampling of snap with end of grace period. */
68133 if ((snap & 0x1) != 0) {
68134diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
68135index 3b0c098..43ba2d8 100644
68136--- a/kernel/rcutree_trace.c
68137+++ b/kernel/rcutree_trace.c
68138@@ -74,7 +74,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
68139 rdp->qs_pending);
68140 #ifdef CONFIG_NO_HZ
68141 seq_printf(m, " dt=%d/%d/%d df=%lu",
68142- atomic_read(&rdp->dynticks->dynticks),
68143+ atomic_read_unchecked(&rdp->dynticks->dynticks),
68144 rdp->dynticks->dynticks_nesting,
68145 rdp->dynticks->dynticks_nmi_nesting,
68146 rdp->dynticks_fqs);
68147@@ -148,7 +148,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
68148 rdp->qs_pending);
68149 #ifdef CONFIG_NO_HZ
68150 seq_printf(m, ",%d,%d,%d,%lu",
68151- atomic_read(&rdp->dynticks->dynticks),
68152+ atomic_read_unchecked(&rdp->dynticks->dynticks),
68153 rdp->dynticks->dynticks_nesting,
68154 rdp->dynticks->dynticks_nmi_nesting,
68155 rdp->dynticks_fqs);
68156diff --git a/kernel/relay.c b/kernel/relay.c
68157index 859ea5a..096e2fe 100644
68158--- a/kernel/relay.c
68159+++ b/kernel/relay.c
68160@@ -1236,6 +1236,8 @@ static ssize_t subbuf_splice_actor(struct file *in,
66a7e928
MT
68161 };
68162 ssize_t ret;
68163
68164+ pax_track_stack();
68165+
68166 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
68167 return 0;
68168 if (splice_grow_spd(pipe, &spd))
fe2de317
MT
68169diff --git a/kernel/resource.c b/kernel/resource.c
68170index c8dc249..f1e2359 100644
68171--- a/kernel/resource.c
68172+++ b/kernel/resource.c
68173@@ -141,8 +141,18 @@ static const struct file_operations proc_iomem_operations = {
58c5fc13
MT
68174
68175 static int __init ioresources_init(void)
68176 {
68177+#ifdef CONFIG_GRKERNSEC_PROC_ADD
68178+#ifdef CONFIG_GRKERNSEC_PROC_USER
68179+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
68180+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
68181+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68182+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
68183+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
68184+#endif
68185+#else
68186 proc_create("ioports", 0, NULL, &proc_ioports_operations);
68187 proc_create("iomem", 0, NULL, &proc_iomem_operations);
68188+#endif
68189 return 0;
68190 }
68191 __initcall(ioresources_init);
fe2de317
MT
68192diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
68193index 5c9ccd3..a35e22b 100644
68194--- a/kernel/rtmutex-tester.c
68195+++ b/kernel/rtmutex-tester.c
66a7e928 68196@@ -20,7 +20,7 @@
8308f9c9
MT
68197 #define MAX_RT_TEST_MUTEXES 8
68198
68199 static spinlock_t rttest_lock;
68200-static atomic_t rttest_event;
68201+static atomic_unchecked_t rttest_event;
68202
68203 struct test_thread_data {
68204 int opcode;
fe2de317 68205@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
8308f9c9
MT
68206
68207 case RTTEST_LOCKCONT:
68208 td->mutexes[td->opdata] = 1;
68209- td->event = atomic_add_return(1, &rttest_event);
68210+ td->event = atomic_add_return_unchecked(1, &rttest_event);
68211 return 0;
68212
68213 case RTTEST_RESET:
fe2de317 68214@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
8308f9c9
MT
68215 return 0;
68216
68217 case RTTEST_RESETEVENT:
68218- atomic_set(&rttest_event, 0);
68219+ atomic_set_unchecked(&rttest_event, 0);
68220 return 0;
68221
68222 default:
fe2de317 68223@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
8308f9c9
MT
68224 return ret;
68225
68226 td->mutexes[id] = 1;
68227- td->event = atomic_add_return(1, &rttest_event);
68228+ td->event = atomic_add_return_unchecked(1, &rttest_event);
68229 rt_mutex_lock(&mutexes[id]);
68230- td->event = atomic_add_return(1, &rttest_event);
68231+ td->event = atomic_add_return_unchecked(1, &rttest_event);
68232 td->mutexes[id] = 4;
68233 return 0;
68234
fe2de317 68235@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
8308f9c9
MT
68236 return ret;
68237
68238 td->mutexes[id] = 1;
68239- td->event = atomic_add_return(1, &rttest_event);
68240+ td->event = atomic_add_return_unchecked(1, &rttest_event);
68241 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
68242- td->event = atomic_add_return(1, &rttest_event);
68243+ td->event = atomic_add_return_unchecked(1, &rttest_event);
68244 td->mutexes[id] = ret ? 0 : 4;
68245 return ret ? -EINTR : 0;
68246
fe2de317 68247@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
8308f9c9
MT
68248 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
68249 return ret;
68250
68251- td->event = atomic_add_return(1, &rttest_event);
68252+ td->event = atomic_add_return_unchecked(1, &rttest_event);
68253 rt_mutex_unlock(&mutexes[id]);
68254- td->event = atomic_add_return(1, &rttest_event);
68255+ td->event = atomic_add_return_unchecked(1, &rttest_event);
68256 td->mutexes[id] = 0;
68257 return 0;
68258
fe2de317 68259@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
8308f9c9
MT
68260 break;
68261
68262 td->mutexes[dat] = 2;
68263- td->event = atomic_add_return(1, &rttest_event);
68264+ td->event = atomic_add_return_unchecked(1, &rttest_event);
68265 break;
68266
66a7e928 68267 default:
fe2de317 68268@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
8308f9c9
MT
68269 return;
68270
68271 td->mutexes[dat] = 3;
68272- td->event = atomic_add_return(1, &rttest_event);
68273+ td->event = atomic_add_return_unchecked(1, &rttest_event);
68274 break;
68275
68276 case RTTEST_LOCKNOWAIT:
fe2de317 68277@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
8308f9c9
MT
68278 return;
68279
68280 td->mutexes[dat] = 1;
68281- td->event = atomic_add_return(1, &rttest_event);
68282+ td->event = atomic_add_return_unchecked(1, &rttest_event);
68283 return;
68284
66a7e928 68285 default:
fe2de317
MT
68286diff --git a/kernel/sched.c b/kernel/sched.c
68287index b50b0f0..1c6c591 100644
68288--- a/kernel/sched.c
68289+++ b/kernel/sched.c
6e9df6a3 68290@@ -4264,6 +4264,8 @@ static void __sched __schedule(void)
66a7e928
MT
68291 struct rq *rq;
68292 int cpu;
68293
68294+ pax_track_stack();
68295+
68296 need_resched:
68297 preempt_disable();
68298 cpu = smp_processor_id();
fe2de317 68299@@ -4950,6 +4952,8 @@ int can_nice(const struct task_struct *p, const int nice)
58c5fc13
MT
68300 /* convert nice value [19,-20] to rlimit style value [1,40] */
68301 int nice_rlim = 20 - nice;
68302
68303+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
68304+
df50ba0c 68305 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
58c5fc13
MT
68306 capable(CAP_SYS_NICE));
68307 }
6e9df6a3 68308@@ -4983,7 +4987,8 @@ SYSCALL_DEFINE1(nice, int, increment)
58c5fc13
MT
68309 if (nice > 19)
68310 nice = 19;
68311
68312- if (increment < 0 && !can_nice(current, nice))
68313+ if (increment < 0 && (!can_nice(current, nice) ||
68314+ gr_handle_chroot_nice()))
68315 return -EPERM;
68316
68317 retval = security_task_setnice(current, nice);
6e9df6a3 68318@@ -5127,6 +5132,7 @@ recheck:
6892158b
MT
68319 unsigned long rlim_rtprio =
68320 task_rlimit(p, RLIMIT_RTPRIO);
58c5fc13 68321
6892158b 68322+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
df50ba0c
MT
68323 /* can't set/change the rt policy */
68324 if (policy != p->policy && !rlim_rtprio)
68325 return -EPERM;
fe2de317
MT
68326diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c
68327index 429242f..d7cca82 100644
68328--- a/kernel/sched_autogroup.c
68329+++ b/kernel/sched_autogroup.c
68330@@ -7,7 +7,7 @@
68331
68332 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
68333 static struct autogroup autogroup_default;
68334-static atomic_t autogroup_seq_nr;
68335+static atomic_unchecked_t autogroup_seq_nr;
68336
68337 static void __init autogroup_init(struct task_struct *init_task)
68338 {
68339@@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
68340
68341 kref_init(&ag->kref);
68342 init_rwsem(&ag->lock);
68343- ag->id = atomic_inc_return(&autogroup_seq_nr);
68344+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
68345 ag->tg = tg;
68346 #ifdef CONFIG_RT_GROUP_SCHED
68347 /*
68348diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
68349index bc8ee99..b6f6492 100644
68350--- a/kernel/sched_fair.c
68351+++ b/kernel/sched_fair.c
68352@@ -4062,7 +4062,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
6892158b
MT
68353 * run_rebalance_domains is triggered when needed from the scheduler tick.
68354 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
df50ba0c
MT
68355 */
68356-static void run_rebalance_domains(struct softirq_action *h)
68357+static void run_rebalance_domains(void)
68358 {
68359 int this_cpu = smp_processor_id();
68360 struct rq *this_rq = cpu_rq(this_cpu);
fe2de317
MT
68361diff --git a/kernel/signal.c b/kernel/signal.c
68362index 291c970..304bd03 100644
68363--- a/kernel/signal.c
68364+++ b/kernel/signal.c
68365@@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cachep;
df50ba0c
MT
68366
68367 int print_fatal_signals __read_mostly;
68368
68369-static void __user *sig_handler(struct task_struct *t, int sig)
68370+static __sighandler_t sig_handler(struct task_struct *t, int sig)
68371 {
68372 return t->sighand->action[sig - 1].sa.sa_handler;
68373 }
68374
68375-static int sig_handler_ignored(void __user *handler, int sig)
68376+static int sig_handler_ignored(__sighandler_t handler, int sig)
68377 {
68378 /* Is it explicitly or implicitly ignored? */
68379 return handler == SIG_IGN ||
fe2de317 68380@@ -60,7 +60,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
df50ba0c
MT
68381 static int sig_task_ignored(struct task_struct *t, int sig,
68382 int from_ancestor_ns)
68383 {
68384- void __user *handler;
68385+ __sighandler_t handler;
68386
68387 handler = sig_handler(t, sig);
68388
fe2de317 68389@@ -364,6 +364,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
58c5fc13 68390 atomic_inc(&user->sigpending);
ae4e228f
MT
68391 rcu_read_unlock();
68392
58c5fc13
MT
68393+ if (!override_rlimit)
68394+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
ae4e228f 68395+
58c5fc13
MT
68396 if (override_rlimit ||
68397 atomic_read(&user->sigpending) <=
df50ba0c 68398 task_rlimit(t, RLIMIT_SIGPENDING)) {
fe2de317 68399@@ -488,7 +491,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
df50ba0c
MT
68400
68401 int unhandled_signal(struct task_struct *tsk, int sig)
68402 {
68403- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
68404+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
68405 if (is_global_init(tsk))
68406 return 1;
68407 if (handler != SIG_IGN && handler != SIG_DFL)
fe2de317 68408@@ -815,6 +818,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
58c5fc13
MT
68409 }
68410 }
68411
15a11c5b
MT
68412+ /* allow glibc communication via tgkill to other threads in our
68413+ thread group */
68414+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
68415+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
68416+ && gr_handle_signal(t, sig))
58c5fc13
MT
68417+ return -EPERM;
68418+
68419 return security_task_kill(t, info, sig, 0);
68420 }
68421
fe2de317 68422@@ -1165,7 +1175,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
58c5fc13
MT
68423 return send_signal(sig, info, p, 1);
68424 }
68425
68426-static int
68427+int
68428 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
68429 {
68430 return send_signal(sig, info, t, 0);
fe2de317 68431@@ -1202,6 +1212,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
c52201e0
MT
68432 unsigned long int flags;
68433 int ret, blocked, ignored;
68434 struct k_sigaction *action;
68435+ int is_unhandled = 0;
68436
68437 spin_lock_irqsave(&t->sighand->siglock, flags);
68438 action = &t->sighand->action[sig-1];
fe2de317 68439@@ -1216,9 +1227,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
c52201e0
MT
68440 }
68441 if (action->sa.sa_handler == SIG_DFL)
68442 t->signal->flags &= ~SIGNAL_UNKILLABLE;
68443+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
68444+ is_unhandled = 1;
58c5fc13
MT
68445 ret = specific_send_sig_info(sig, info, t);
68446 spin_unlock_irqrestore(&t->sighand->siglock, flags);
68447
c52201e0
MT
68448+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
68449+ normal operation */
68450+ if (is_unhandled) {
68451+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
68452+ gr_handle_crash(t, sig);
68453+ }
58c5fc13
MT
68454+
68455 return ret;
68456 }
68457
fe2de317 68458@@ -1285,8 +1305,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
57199397
MT
68459 ret = check_kill_permission(sig, info, p);
68460 rcu_read_unlock();
ae4e228f
MT
68461
68462- if (!ret && sig)
68463+ if (!ret && sig) {
68464 ret = do_send_sig_info(sig, info, p, true);
58c5fc13
MT
68465+ if (!ret)
68466+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
ae4e228f 68467+ }
58c5fc13
MT
68468
68469 return ret;
ae4e228f 68470 }
fe2de317 68471@@ -1909,6 +1932,8 @@ static void ptrace_do_notify(int signr, int exit_code, int why)
66a7e928
MT
68472 {
68473 siginfo_t info;
68474
68475+ pax_track_stack();
68476+
66a7e928 68477 memset(&info, 0, sizeof info);
6e9df6a3
MT
68478 info.si_signo = signr;
68479 info.si_code = exit_code;
fe2de317 68480@@ -2748,7 +2773,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
15a11c5b
MT
68481 int error = -ESRCH;
68482
68483 rcu_read_lock();
68484- p = find_task_by_vpid(pid);
68485+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
68486+ /* allow glibc communication via tgkill to other threads in our
68487+ thread group */
68488+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
68489+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
68490+ p = find_task_by_vpid_unrestricted(pid);
68491+ else
68492+#endif
68493+ p = find_task_by_vpid(pid);
68494 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
68495 error = check_kill_permission(sig, info, p);
68496 /*
fe2de317
MT
68497diff --git a/kernel/smp.c b/kernel/smp.c
68498index fb67dfa..f819e2e 100644
68499--- a/kernel/smp.c
68500+++ b/kernel/smp.c
68501@@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
ae4e228f
MT
68502 }
68503 EXPORT_SYMBOL(smp_call_function);
68504
68505-void ipi_call_lock(void)
68506+void ipi_call_lock(void) __acquires(call_function.lock)
68507 {
68508 raw_spin_lock(&call_function.lock);
68509 }
68510
68511-void ipi_call_unlock(void)
68512+void ipi_call_unlock(void) __releases(call_function.lock)
68513 {
68514 raw_spin_unlock(&call_function.lock);
68515 }
68516
68517-void ipi_call_lock_irq(void)
68518+void ipi_call_lock_irq(void) __acquires(call_function.lock)
68519 {
68520 raw_spin_lock_irq(&call_function.lock);
68521 }
68522
68523-void ipi_call_unlock_irq(void)
68524+void ipi_call_unlock_irq(void) __releases(call_function.lock)
68525 {
68526 raw_spin_unlock_irq(&call_function.lock);
68527 }
fe2de317
MT
68528diff --git a/kernel/softirq.c b/kernel/softirq.c
68529index fca82c3..1db9690 100644
68530--- a/kernel/softirq.c
68531+++ b/kernel/softirq.c
68532@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
ae4e228f 68533
66a7e928 68534 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
ae4e228f
MT
68535
68536-char *softirq_to_name[NR_SOFTIRQS] = {
68537+const char * const softirq_to_name[NR_SOFTIRQS] = {
68538 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
15a11c5b 68539 "TASKLET", "SCHED", "HRTIMER", "RCU"
ae4e228f 68540 };
bc901d79
MT
68541@@ -235,7 +235,7 @@ restart:
68542 kstat_incr_softirqs_this_cpu(vec_nr);
ae4e228f 68543
bc901d79 68544 trace_softirq_entry(vec_nr);
ae4e228f
MT
68545- h->action(h);
68546+ h->action();
bc901d79 68547 trace_softirq_exit(vec_nr);
ae4e228f 68548 if (unlikely(prev_count != preempt_count())) {
bc901d79 68549 printk(KERN_ERR "huh, entered softirq %u %s %p"
15a11c5b 68550@@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
ae4e228f
MT
68551 local_irq_restore(flags);
68552 }
68553
68554-void open_softirq(int nr, void (*action)(struct softirq_action *))
68555+void open_softirq(int nr, void (*action)(void))
68556 {
15a11c5b
MT
68557- softirq_vec[nr].action = action;
68558+ pax_open_kernel();
68559+ *(void **)&softirq_vec[nr].action = action;
68560+ pax_close_kernel();
ae4e228f 68561 }
15a11c5b
MT
68562
68563 /*
fe2de317 68564@@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
ae4e228f
MT
68565
68566 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
68567
68568-static void tasklet_action(struct softirq_action *a)
68569+static void tasklet_action(void)
68570 {
68571 struct tasklet_struct *list;
68572
fe2de317 68573@@ -476,7 +478,7 @@ static void tasklet_action(struct softirq_action *a)
ae4e228f
MT
68574 }
68575 }
68576
68577-static void tasklet_hi_action(struct softirq_action *a)
68578+static void tasklet_hi_action(void)
68579 {
68580 struct tasklet_struct *list;
68581
fe2de317
MT
68582diff --git a/kernel/sys.c b/kernel/sys.c
68583index 1dbbe69..e96e1dd 100644
68584--- a/kernel/sys.c
68585+++ b/kernel/sys.c
68586@@ -157,6 +157,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
58c5fc13
MT
68587 error = -EACCES;
68588 goto out;
68589 }
68590+
68591+ if (gr_handle_chroot_setpriority(p, niceval)) {
68592+ error = -EACCES;
68593+ goto out;
68594+ }
68595+
68596 no_nice = security_task_setnice(p, niceval);
68597 if (no_nice) {
68598 error = no_nice;
fe2de317 68599@@ -571,6 +577,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
58c5fc13
MT
68600 goto error;
68601 }
68602
68603+ if (gr_check_group_change(new->gid, new->egid, -1))
68604+ goto error;
68605+
68606 if (rgid != (gid_t) -1 ||
68607 (egid != (gid_t) -1 && egid != old->gid))
68608 new->sgid = new->egid;
6e9df6a3 68609@@ -600,6 +609,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
57199397 68610 old = current_cred();
58c5fc13
MT
68611
68612 retval = -EPERM;
68613+
68614+ if (gr_check_group_change(gid, gid, gid))
68615+ goto error;
68616+
66a7e928 68617 if (nsown_capable(CAP_SETGID))
58c5fc13
MT
68618 new->gid = new->egid = new->sgid = new->fsgid = gid;
68619 else if (gid == old->gid || gid == old->sgid)
fe2de317 68620@@ -687,6 +700,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
58c5fc13
MT
68621 goto error;
68622 }
68623
68624+ if (gr_check_user_change(new->uid, new->euid, -1))
68625+ goto error;
68626+
68627 if (new->uid != old->uid) {
68628 retval = set_user(new);
68629 if (retval < 0)
6e9df6a3 68630@@ -731,6 +747,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
57199397 68631 old = current_cred();
58c5fc13
MT
68632
68633 retval = -EPERM;
68634+
68635+ if (gr_check_crash_uid(uid))
68636+ goto error;
68637+ if (gr_check_user_change(uid, uid, uid))
68638+ goto error;
68639+
66a7e928 68640 if (nsown_capable(CAP_SETUID)) {
58c5fc13
MT
68641 new->suid = new->uid = uid;
68642 if (uid != old->uid) {
fe2de317 68643@@ -785,6 +807,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
58c5fc13
MT
68644 goto error;
68645 }
68646
68647+ if (gr_check_user_change(ruid, euid, -1))
68648+ goto error;
68649+
68650 if (ruid != (uid_t) -1) {
68651 new->uid = ruid;
68652 if (ruid != old->uid) {
fe2de317 68653@@ -849,6 +874,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
58c5fc13
MT
68654 goto error;
68655 }
68656
68657+ if (gr_check_group_change(rgid, egid, -1))
68658+ goto error;
68659+
68660 if (rgid != (gid_t) -1)
68661 new->gid = rgid;
68662 if (egid != (gid_t) -1)
6e9df6a3 68663@@ -895,6 +923,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
57199397
MT
68664 old = current_cred();
68665 old_fsuid = old->fsuid;
58c5fc13
MT
68666
68667+ if (gr_check_user_change(-1, -1, uid))
68668+ goto error;
68669+
68670 if (uid == old->uid || uid == old->euid ||
68671 uid == old->suid || uid == old->fsuid ||
66a7e928 68672 nsown_capable(CAP_SETUID)) {
6e9df6a3 68673@@ -905,6 +936,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
57199397
MT
68674 }
68675 }
68676
68677+error:
68678 abort_creds(new);
68679 return old_fsuid;
68680
6e9df6a3 68681@@ -931,12 +963,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
58c5fc13
MT
68682 if (gid == old->gid || gid == old->egid ||
68683 gid == old->sgid || gid == old->fsgid ||
66a7e928 68684 nsown_capable(CAP_SETGID)) {
58c5fc13
MT
68685+ if (gr_check_group_change(-1, -1, gid))
68686+ goto error;
68687+
68688 if (gid != old_fsgid) {
68689 new->fsgid = gid;
68690 goto change_okay;
57199397
MT
68691 }
68692 }
68693
68694+error:
68695 abort_creds(new);
68696 return old_fsgid;
68697
fe2de317
MT
68698@@ -1188,7 +1224,10 @@ static int override_release(char __user *release, int len)
68699 }
68700 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
68701 snprintf(buf, len, "2.6.%u%s", v, rest);
68702- ret = copy_to_user(release, buf, len);
68703+ if (len > sizeof(buf))
68704+ ret = -EFAULT;
68705+ else
68706+ ret = copy_to_user(release, buf, len);
68707 }
68708 return ret;
68709 }
68710@@ -1242,19 +1281,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
6e9df6a3
MT
68711 return -EFAULT;
68712
68713 down_read(&uts_sem);
68714- error = __copy_to_user(&name->sysname, &utsname()->sysname,
68715+ error = __copy_to_user(name->sysname, &utsname()->sysname,
68716 __OLD_UTS_LEN);
68717 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
68718- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
68719+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
68720 __OLD_UTS_LEN);
68721 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
68722- error |= __copy_to_user(&name->release, &utsname()->release,
68723+ error |= __copy_to_user(name->release, &utsname()->release,
68724 __OLD_UTS_LEN);
68725 error |= __put_user(0, name->release + __OLD_UTS_LEN);
68726- error |= __copy_to_user(&name->version, &utsname()->version,
68727+ error |= __copy_to_user(name->version, &utsname()->version,
68728 __OLD_UTS_LEN);
68729 error |= __put_user(0, name->version + __OLD_UTS_LEN);
68730- error |= __copy_to_user(&name->machine, &utsname()->machine,
68731+ error |= __copy_to_user(name->machine, &utsname()->machine,
68732 __OLD_UTS_LEN);
68733 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
68734 up_read(&uts_sem);
fe2de317 68735@@ -1717,7 +1756,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
58c5fc13
MT
68736 error = get_dumpable(me->mm);
68737 break;
68738 case PR_SET_DUMPABLE:
68739- if (arg2 < 0 || arg2 > 1) {
68740+ if (arg2 > 1) {
68741 error = -EINVAL;
68742 break;
68743 }
fe2de317
MT
68744diff --git a/kernel/sysctl.c b/kernel/sysctl.c
68745index 11d65b5..6957b37 100644
68746--- a/kernel/sysctl.c
68747+++ b/kernel/sysctl.c
15a11c5b 68748@@ -85,6 +85,13 @@
ae4e228f 68749
58c5fc13
MT
68750
68751 #if defined(CONFIG_SYSCTL)
68752+#include <linux/grsecurity.h>
68753+#include <linux/grinternal.h>
68754+
68755+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
68756+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
68757+ const int op);
68758+extern int gr_handle_chroot_sysctl(const int op);
68759
68760 /* External variables not in a header file. */
df50ba0c 68761 extern int sysctl_overcommit_memory;
fe2de317 68762@@ -197,6 +204,7 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
57199397
MT
68763 }
68764
58c5fc13 68765 #endif
57199397 68766+extern struct ctl_table grsecurity_table[];
58c5fc13
MT
68767
68768 static struct ctl_table root_table[];
68769 static struct ctl_table_root sysctl_table_root;
15a11c5b 68770@@ -226,6 +234,20 @@ extern struct ctl_table epoll_table[];
58c5fc13
MT
68771 int sysctl_legacy_va_layout;
68772 #endif
68773
68774+#ifdef CONFIG_PAX_SOFTMODE
68775+static ctl_table pax_table[] = {
68776+ {
58c5fc13
MT
68777+ .procname = "softmode",
68778+ .data = &pax_softmode,
68779+ .maxlen = sizeof(unsigned int),
68780+ .mode = 0600,
68781+ .proc_handler = &proc_dointvec,
68782+ },
68783+
ae4e228f 68784+ { }
58c5fc13
MT
68785+};
68786+#endif
68787+
df50ba0c 68788 /* The default sysctl tables: */
58c5fc13 68789
df50ba0c 68790 static struct ctl_table root_table[] = {
15a11c5b 68791@@ -272,6 +294,22 @@ static int max_extfrag_threshold = 1000;
58c5fc13
MT
68792 #endif
68793
68794 static struct ctl_table kern_table[] = {
ae4e228f 68795+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
58c5fc13 68796+ {
58c5fc13
MT
68797+ .procname = "grsecurity",
68798+ .mode = 0500,
68799+ .child = grsecurity_table,
68800+ },
68801+#endif
68802+
68803+#ifdef CONFIG_PAX_SOFTMODE
68804+ {
58c5fc13
MT
68805+ .procname = "pax",
68806+ .mode = 0500,
68807+ .child = pax_table,
68808+ },
68809+#endif
68810+
58c5fc13 68811 {
ae4e228f
MT
68812 .procname = "sched_child_runs_first",
68813 .data = &sysctl_sched_child_runs_first,
15a11c5b 68814@@ -546,7 +584,7 @@ static struct ctl_table kern_table[] = {
bc901d79
MT
68815 .data = &modprobe_path,
68816 .maxlen = KMOD_PATH_LEN,
68817 .mode = 0644,
68818- .proc_handler = proc_dostring,
68819+ .proc_handler = proc_dostring_modpriv,
68820 },
68821 {
68822 .procname = "modules_disabled",
15a11c5b 68823@@ -713,16 +751,20 @@ static struct ctl_table kern_table[] = {
16454cff
MT
68824 .extra1 = &zero,
68825 .extra2 = &one,
68826 },
68827+#endif
68828 {
68829 .procname = "kptr_restrict",
68830 .data = &kptr_restrict,
68831 .maxlen = sizeof(int),
68832 .mode = 0644,
ea610fa8 68833 .proc_handler = proc_dmesg_restrict,
16454cff
MT
68834+#ifdef CONFIG_GRKERNSEC_HIDESYM
68835+ .extra1 = &two,
68836+#else
68837 .extra1 = &zero,
68838+#endif
68839 .extra2 = &two,
68840 },
68841-#endif
68842 {
68843 .procname = "ngroups_max",
68844 .data = &ngroups_max,
15a11c5b 68845@@ -1205,6 +1247,13 @@ static struct ctl_table vm_table[] = {
57199397
MT
68846 .proc_handler = proc_dointvec_minmax,
68847 .extra1 = &zero,
68848 },
68849+ {
68850+ .procname = "heap_stack_gap",
68851+ .data = &sysctl_heap_stack_gap,
68852+ .maxlen = sizeof(sysctl_heap_stack_gap),
68853+ .mode = 0644,
68854+ .proc_handler = proc_doulongvec_minmax,
68855+ },
68856 #else
68857 {
68858 .procname = "nr_trim_pages",
6e9df6a3 68859@@ -1709,6 +1758,17 @@ static int test_perm(int mode, int op)
66a7e928
MT
68860 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
68861 {
58c5fc13 68862 int mode;
66a7e928
MT
68863+ int error;
68864+
58c5fc13
MT
68865+ if (table->parent != NULL && table->parent->procname != NULL &&
68866+ table->procname != NULL &&
68867+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
68868+ return -EACCES;
68869+ if (gr_handle_chroot_sysctl(op))
68870+ return -EACCES;
68871+ error = gr_handle_sysctl(table, op);
68872+ if (error)
68873+ return error;
66a7e928
MT
68874
68875 if (root->permissions)
68876 mode = root->permissions(root, current->nsproxy, table);
fe2de317 68877@@ -2113,6 +2173,16 @@ int proc_dostring(struct ctl_table *table, int write,
bc901d79
MT
68878 buffer, lenp, ppos);
68879 }
68880
68881+int proc_dostring_modpriv(struct ctl_table *table, int write,
68882+ void __user *buffer, size_t *lenp, loff_t *ppos)
68883+{
68884+ if (write && !capable(CAP_SYS_MODULE))
68885+ return -EPERM;
68886+
68887+ return _proc_do_string(table->data, table->maxlen, write,
68888+ buffer, lenp, ppos);
68889+}
68890+
68891 static size_t proc_skip_spaces(char **buf)
68892 {
68893 size_t ret;
fe2de317 68894@@ -2218,6 +2288,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
57199397
MT
68895 len = strlen(tmp);
68896 if (len > *size)
68897 len = *size;
68898+ if (len > sizeof(tmp))
68899+ len = sizeof(tmp);
68900 if (copy_to_user(*buf, tmp, len))
68901 return -EFAULT;
68902 *size -= len;
fe2de317 68903@@ -2534,8 +2606,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
6892158b
MT
68904 *i = val;
68905 } else {
68906 val = convdiv * (*i) / convmul;
68907- if (!first)
68908+ if (!first) {
68909 err = proc_put_char(&buffer, &left, '\t');
68910+ if (err)
68911+ break;
68912+ }
68913 err = proc_put_long(&buffer, &left, val, false);
68914 if (err)
68915 break;
fe2de317 68916@@ -2930,6 +3005,12 @@ int proc_dostring(struct ctl_table *table, int write,
bc901d79
MT
68917 return -ENOSYS;
68918 }
68919
68920+int proc_dostring_modpriv(struct ctl_table *table, int write,
68921+ void __user *buffer, size_t *lenp, loff_t *ppos)
68922+{
68923+ return -ENOSYS;
68924+}
68925+
68926 int proc_dointvec(struct ctl_table *table, int write,
68927 void __user *buffer, size_t *lenp, loff_t *ppos)
68928 {
6e9df6a3 68929@@ -2986,6 +3067,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
bc901d79
MT
68930 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
68931 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
68932 EXPORT_SYMBOL(proc_dostring);
68933+EXPORT_SYMBOL(proc_dostring_modpriv);
68934 EXPORT_SYMBOL(proc_doulongvec_minmax);
68935 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
68936 EXPORT_SYMBOL(register_sysctl_table);
fe2de317
MT
68937diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
68938index e8bffbe..2344401 100644
68939--- a/kernel/sysctl_binary.c
68940+++ b/kernel/sysctl_binary.c
68941@@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
68942 int i;
68943
68944 set_fs(KERNEL_DS);
68945- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
68946+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
68947 set_fs(old_fs);
68948 if (result < 0)
68949 goto out_kfree;
68950@@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
68951 }
68952
68953 set_fs(KERNEL_DS);
68954- result = vfs_write(file, buffer, str - buffer, &pos);
68955+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
68956 set_fs(old_fs);
68957 if (result < 0)
68958 goto out_kfree;
68959@@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
68960 int i;
68961
68962 set_fs(KERNEL_DS);
68963- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
68964+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
68965 set_fs(old_fs);
68966 if (result < 0)
68967 goto out_kfree;
68968@@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
68969 }
68970
68971 set_fs(KERNEL_DS);
68972- result = vfs_write(file, buffer, str - buffer, &pos);
68973+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
68974 set_fs(old_fs);
68975 if (result < 0)
68976 goto out_kfree;
68977@@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
68978 int i;
68979
68980 set_fs(KERNEL_DS);
68981- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
68982+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
68983 set_fs(old_fs);
68984 if (result < 0)
68985 goto out;
68986@@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
68987 __le16 dnaddr;
68988
68989 set_fs(KERNEL_DS);
68990- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
68991+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
68992 set_fs(old_fs);
68993 if (result < 0)
68994 goto out;
68995@@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file,
68996 le16_to_cpu(dnaddr) & 0x3ff);
68997
68998 set_fs(KERNEL_DS);
68999- result = vfs_write(file, buf, len, &pos);
69000+ result = vfs_write(file, (const char __force_user *)buf, len, &pos);
69001 set_fs(old_fs);
69002 if (result < 0)
69003 goto out;
69004diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
69005index 362da65..ab8ef8c 100644
69006--- a/kernel/sysctl_check.c
69007+++ b/kernel/sysctl_check.c
69008@@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
bc901d79
MT
69009 set_fail(&fail, table, "Directory with extra2");
69010 } else {
69011 if ((table->proc_handler == proc_dostring) ||
69012+ (table->proc_handler == proc_dostring_modpriv) ||
69013 (table->proc_handler == proc_dointvec) ||
69014 (table->proc_handler == proc_dointvec_minmax) ||
69015 (table->proc_handler == proc_dointvec_jiffies) ||
fe2de317
MT
69016diff --git a/kernel/taskstats.c b/kernel/taskstats.c
69017index e660464..c8b9e67 100644
69018--- a/kernel/taskstats.c
69019+++ b/kernel/taskstats.c
df50ba0c 69020@@ -27,9 +27,12 @@
58c5fc13
MT
69021 #include <linux/cgroup.h>
69022 #include <linux/fs.h>
69023 #include <linux/file.h>
69024+#include <linux/grsecurity.h>
69025 #include <net/genetlink.h>
6e9df6a3 69026 #include <linux/atomic.h>
58c5fc13
MT
69027
69028+extern int gr_is_taskstats_denied(int pid);
69029+
69030 /*
69031 * Maximum length of a cpumask that can be specified in
69032 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
6e9df6a3 69033@@ -556,6 +559,9 @@ err:
58c5fc13 69034
bc901d79
MT
69035 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
69036 {
58c5fc13
MT
69037+ if (gr_is_taskstats_denied(current->pid))
69038+ return -EACCES;
69039+
bc901d79
MT
69040 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
69041 return cmd_attr_register_cpumask(info);
69042 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
fe2de317
MT
69043diff --git a/kernel/time.c b/kernel/time.c
69044index d776062..fa8d186 100644
69045--- a/kernel/time.c
69046+++ b/kernel/time.c
69047@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
69048 return error;
69049
69050 if (tz) {
69051+ /* we log in do_settimeofday called below, so don't log twice
69052+ */
69053+ if (!tv)
69054+ gr_log_timechange();
69055+
69056 /* SMP safe, global irq locking makes it work. */
69057 sys_tz = *tz;
69058 update_vsyscall_tz();
69059diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
69060index ea5e1a9..8b8df07 100644
69061--- a/kernel/time/alarmtimer.c
69062+++ b/kernel/time/alarmtimer.c
6e9df6a3 69063@@ -693,7 +693,7 @@ static int __init alarmtimer_init(void)
15a11c5b
MT
69064 {
69065 int error = 0;
69066 int i;
69067- struct k_clock alarm_clock = {
69068+ static struct k_clock alarm_clock = {
69069 .clock_getres = alarm_clock_getres,
69070 .clock_get = alarm_clock_get,
69071 .timer_create = alarm_timer_create,
fe2de317
MT
69072diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
69073index 7a90d02..6d8585a 100644
69074--- a/kernel/time/tick-broadcast.c
69075+++ b/kernel/time/tick-broadcast.c
69076@@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
58c5fc13
MT
69077 * then clear the broadcast bit.
69078 */
69079 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
69080- int cpu = smp_processor_id();
69081+ cpu = smp_processor_id();
69082
69083 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
69084 tick_broadcast_clear_oneshot(cpu);
fe2de317
MT
69085diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
69086index 6f9798b..f8c4087 100644
69087--- a/kernel/time/timekeeping.c
69088+++ b/kernel/time/timekeeping.c
bc901d79
MT
69089@@ -14,6 +14,7 @@
69090 #include <linux/init.h>
69091 #include <linux/mm.h>
69092 #include <linux/sched.h>
69093+#include <linux/grsecurity.h>
66a7e928 69094 #include <linux/syscore_ops.h>
bc901d79
MT
69095 #include <linux/clocksource.h>
69096 #include <linux/jiffies.h>
fe2de317 69097@@ -365,6 +366,8 @@ int do_settimeofday(const struct timespec *tv)
bc901d79
MT
69098 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
69099 return -EINVAL;
69100
69101+ gr_log_timechange();
69102+
69103 write_seqlock_irqsave(&xtime_lock, flags);
69104
69105 timekeeping_forward_now();
fe2de317
MT
69106diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
69107index 3258455..f35227d 100644
69108--- a/kernel/time/timer_list.c
69109+++ b/kernel/time/timer_list.c
69110@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
57199397
MT
69111
69112 static void print_name_offset(struct seq_file *m, void *sym)
69113 {
69114+#ifdef CONFIG_GRKERNSEC_HIDESYM
69115+ SEQ_printf(m, "<%p>", NULL);
69116+#else
69117 char symname[KSYM_NAME_LEN];
69118
69119 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
16454cff 69120 SEQ_printf(m, "<%pK>", sym);
57199397
MT
69121 else
69122 SEQ_printf(m, "%s", symname);
69123+#endif
69124 }
69125
69126 static void
69127@@ -112,7 +116,11 @@ next_one:
69128 static void
69129 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
69130 {
69131+#ifdef CONFIG_GRKERNSEC_HIDESYM
69132+ SEQ_printf(m, " .base: %p\n", NULL);
69133+#else
16454cff 69134 SEQ_printf(m, " .base: %pK\n", base);
57199397
MT
69135+#endif
69136 SEQ_printf(m, " .index: %d\n",
69137 base->index);
69138 SEQ_printf(m, " .resolution: %Lu nsecs\n",
fe2de317 69139@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
57199397
MT
69140 {
69141 struct proc_dir_entry *pe;
69142
69143+#ifdef CONFIG_GRKERNSEC_PROC_ADD
69144+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
69145+#else
69146 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
69147+#endif
69148 if (!pe)
69149 return -ENOMEM;
69150 return 0;
fe2de317
MT
69151diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
69152index a5d0a3a..60c7948 100644
69153--- a/kernel/time/timer_stats.c
69154+++ b/kernel/time/timer_stats.c
8308f9c9
MT
69155@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
69156 static unsigned long nr_entries;
69157 static struct entry entries[MAX_ENTRIES];
69158
69159-static atomic_t overflow_count;
69160+static atomic_unchecked_t overflow_count;
69161
69162 /*
69163 * The entries are in a hash-table, for fast lookup:
69164@@ -140,7 +140,7 @@ static void reset_entries(void)
69165 nr_entries = 0;
69166 memset(entries, 0, sizeof(entries));
69167 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
69168- atomic_set(&overflow_count, 0);
69169+ atomic_set_unchecked(&overflow_count, 0);
69170 }
69171
69172 static struct entry *alloc_entry(void)
fe2de317 69173@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
8308f9c9
MT
69174 if (likely(entry))
69175 entry->count++;
69176 else
69177- atomic_inc(&overflow_count);
69178+ atomic_inc_unchecked(&overflow_count);
69179
69180 out_unlock:
69181 raw_spin_unlock_irqrestore(lock, flags);
fe2de317 69182@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
57199397
MT
69183
69184 static void print_name_offset(struct seq_file *m, unsigned long addr)
69185 {
69186+#ifdef CONFIG_GRKERNSEC_HIDESYM
69187+ seq_printf(m, "<%p>", NULL);
69188+#else
69189 char symname[KSYM_NAME_LEN];
69190
69191 if (lookup_symbol_name(addr, symname) < 0)
69192 seq_printf(m, "<%p>", (void *)addr);
69193 else
69194 seq_printf(m, "%s", symname);
69195+#endif
69196 }
69197
69198 static int tstats_show(struct seq_file *m, void *v)
fe2de317 69199@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
8308f9c9
MT
69200
69201 seq_puts(m, "Timer Stats Version: v0.2\n");
69202 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
69203- if (atomic_read(&overflow_count))
69204+ if (atomic_read_unchecked(&overflow_count))
69205 seq_printf(m, "Overflow: %d entries\n",
69206- atomic_read(&overflow_count));
69207+ atomic_read_unchecked(&overflow_count));
69208
69209 for (i = 0; i < nr_entries; i++) {
69210 entry = entries + i;
fe2de317 69211@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
57199397
MT
69212 {
69213 struct proc_dir_entry *pe;
69214
69215+#ifdef CONFIG_GRKERNSEC_PROC_ADD
69216+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
69217+#else
69218 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
69219+#endif
69220 if (!pe)
69221 return -ENOMEM;
69222 return 0;
fe2de317
MT
69223diff --git a/kernel/timer.c b/kernel/timer.c
69224index 8cff361..0fb5cd8 100644
69225--- a/kernel/timer.c
69226+++ b/kernel/timer.c
15a11c5b 69227@@ -1304,7 +1304,7 @@ void update_process_times(int user_tick)
ae4e228f
MT
69228 /*
69229 * This function runs timers and the timer-tq in bottom half context.
69230 */
69231-static void run_timer_softirq(struct softirq_action *h)
69232+static void run_timer_softirq(void)
69233 {
16454cff 69234 struct tvec_base *base = __this_cpu_read(tvec_bases);
58c5fc13 69235
fe2de317
MT
69236diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
69237index 7c910a5..8b72104 100644
69238--- a/kernel/trace/blktrace.c
69239+++ b/kernel/trace/blktrace.c
69240@@ -323,7 +323,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
8308f9c9
MT
69241 struct blk_trace *bt = filp->private_data;
69242 char buf[16];
69243
69244- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
69245+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
69246
69247 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
69248 }
fe2de317 69249@@ -388,7 +388,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
8308f9c9
MT
69250 return 1;
69251
69252 bt = buf->chan->private_data;
69253- atomic_inc(&bt->dropped);
69254+ atomic_inc_unchecked(&bt->dropped);
69255 return 0;
69256 }
69257
fe2de317 69258@@ -489,7 +489,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
8308f9c9
MT
69259
69260 bt->dir = dir;
69261 bt->dev = dev;
69262- atomic_set(&bt->dropped, 0);
69263+ atomic_set_unchecked(&bt->dropped, 0);
69264
69265 ret = -EIO;
69266 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
fe2de317
MT
69267diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
69268index 48d3762..3b61fce 100644
69269--- a/kernel/trace/ftrace.c
69270+++ b/kernel/trace/ftrace.c
69271@@ -1584,12 +1584,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
15a11c5b
MT
69272 if (unlikely(ftrace_disabled))
69273 return 0;
ae4e228f
MT
69274
69275+ ret = ftrace_arch_code_modify_prepare();
69276+ FTRACE_WARN_ON(ret);
69277+ if (ret)
69278+ return 0;
69279+
69280 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
69281+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
69282 if (ret) {
69283 ftrace_bug(ret, ip);
ae4e228f
MT
69284- return 0;
69285 }
69286- return 1;
69287+ return ret ? 0 : 1;
58c5fc13
MT
69288 }
69289
ae4e228f 69290 /*
fe2de317 69291@@ -2606,7 +2611,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
66a7e928
MT
69292
69293 int
69294 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
69295- void *data)
69296+ void *data)
69297 {
69298 struct ftrace_func_probe *entry;
69299 struct ftrace_page *pg;
fe2de317
MT
69300diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
69301index 17a2d44..85907e2 100644
69302--- a/kernel/trace/trace.c
69303+++ b/kernel/trace/trace.c
69304@@ -3451,6 +3451,8 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
66a7e928
MT
69305 size_t rem;
69306 unsigned int i;
69307
69308+ pax_track_stack();
69309+
69310 if (splice_grow_spd(pipe, &spd))
69311 return -ENOMEM;
69312
fe2de317 69313@@ -3926,6 +3928,8 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
66a7e928
MT
69314 int entries, size, i;
69315 size_t ret;
69316
69317+ pax_track_stack();
69318+
69319 if (splice_grow_spd(pipe, &spd))
69320 return -ENOMEM;
69321
fe2de317 69322@@ -4093,10 +4097,9 @@ static const struct file_operations tracing_dyn_info_fops = {
ae4e228f
MT
69323 };
69324 #endif
58c5fc13 69325
ae4e228f
MT
69326-static struct dentry *d_tracer;
69327-
69328 struct dentry *tracing_init_dentry(void)
69329 {
69330+ static struct dentry *d_tracer;
69331 static int once;
69332
69333 if (d_tracer)
6e9df6a3 69334@@ -4116,10 +4119,9 @@ struct dentry *tracing_init_dentry(void)
ae4e228f 69335 return d_tracer;
58c5fc13
MT
69336 }
69337
ae4e228f
MT
69338-static struct dentry *d_percpu;
69339-
69340 struct dentry *tracing_dentry_percpu(void)
69341 {
69342+ static struct dentry *d_percpu;
69343 static int once;
69344 struct dentry *d_tracer;
69345
fe2de317
MT
69346diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
69347index c212a7f..7b02394 100644
69348--- a/kernel/trace/trace_events.c
69349+++ b/kernel/trace/trace_events.c
69350@@ -1299,10 +1299,6 @@ static LIST_HEAD(ftrace_module_file_list);
bc901d79
MT
69351 struct ftrace_module_file_ops {
69352 struct list_head list;
69353 struct module *mod;
16454cff
MT
69354- struct file_operations id;
69355- struct file_operations enable;
69356- struct file_operations format;
69357- struct file_operations filter;
16454cff
MT
69358 };
69359
69360 static struct ftrace_module_file_ops *
fe2de317 69361@@ -1323,17 +1319,12 @@ trace_create_file_ops(struct module *mod)
15a11c5b
MT
69362
69363 file_ops->mod = mod;
66a7e928 69364
15a11c5b
MT
69365- file_ops->id = ftrace_event_id_fops;
69366- file_ops->id.owner = mod;
69367-
69368- file_ops->enable = ftrace_enable_fops;
69369- file_ops->enable.owner = mod;
69370-
69371- file_ops->filter = ftrace_event_filter_fops;
69372- file_ops->filter.owner = mod;
69373-
69374- file_ops->format = ftrace_event_format_fops;
69375- file_ops->format.owner = mod;
69376+ pax_open_kernel();
69377+ *(void **)&mod->trace_id.owner = mod;
69378+ *(void **)&mod->trace_enable.owner = mod;
69379+ *(void **)&mod->trace_filter.owner = mod;
69380+ *(void **)&mod->trace_format.owner = mod;
69381+ pax_close_kernel();
69382
69383 list_add(&file_ops->list, &ftrace_module_file_list);
69384
fe2de317 69385@@ -1357,8 +1348,8 @@ static void trace_module_add_events(struct module *mod)
15a11c5b
MT
69386
69387 for_each_event(call, start, end) {
69388 __trace_add_event_call(*call, mod,
69389- &file_ops->id, &file_ops->enable,
69390- &file_ops->filter, &file_ops->format);
69391+ &mod->trace_id, &mod->trace_enable,
69392+ &mod->trace_filter, &mod->trace_format);
69393 }
69394 }
69395
fe2de317
MT
69396diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
69397index 00d527c..7c5b1a3 100644
69398--- a/kernel/trace/trace_kprobe.c
69399+++ b/kernel/trace/trace_kprobe.c
69400@@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
6e9df6a3
MT
69401 long ret;
69402 int maxlen = get_rloc_len(*(u32 *)dest);
69403 u8 *dst = get_rloc_data(dest);
69404- u8 *src = addr;
69405+ const u8 __user *src = (const u8 __force_user *)addr;
69406 mm_segment_t old_fs = get_fs();
69407 if (!maxlen)
69408 return;
fe2de317 69409@@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
6e9df6a3
MT
69410 pagefault_disable();
69411 do
69412 ret = __copy_from_user_inatomic(dst++, src++, 1);
69413- while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
69414+ while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
69415 dst[-1] = '\0';
69416 pagefault_enable();
69417 set_fs(old_fs);
fe2de317 69418@@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
6e9df6a3
MT
69419 ((u8 *)get_rloc_data(dest))[0] = '\0';
69420 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
69421 } else
69422- *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
69423+ *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
69424 get_rloc_offs(*(u32 *)dest));
69425 }
69426 /* Return the length of string -- including null terminal byte */
fe2de317 69427@@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
6e9df6a3
MT
69428 set_fs(KERNEL_DS);
69429 pagefault_disable();
69430 do {
69431- ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
69432+ ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
69433 len++;
69434 } while (c && ret == 0 && len < MAX_STRING_SIZE);
69435 pagefault_enable();
fe2de317
MT
69436diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
69437index fd3c8aa..5f324a6 100644
69438--- a/kernel/trace/trace_mmiotrace.c
69439+++ b/kernel/trace/trace_mmiotrace.c
8308f9c9
MT
69440@@ -24,7 +24,7 @@ struct header_iter {
69441 static struct trace_array *mmio_trace_array;
69442 static bool overrun_detected;
69443 static unsigned long prev_overruns;
69444-static atomic_t dropped_count;
69445+static atomic_unchecked_t dropped_count;
69446
69447 static void mmio_reset_data(struct trace_array *tr)
69448 {
fe2de317 69449@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
8308f9c9
MT
69450
69451 static unsigned long count_overruns(struct trace_iterator *iter)
69452 {
69453- unsigned long cnt = atomic_xchg(&dropped_count, 0);
69454+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
69455 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
69456
69457 if (over > prev_overruns)
fe2de317 69458@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
8308f9c9
MT
69459 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
69460 sizeof(*entry), 0, pc);
69461 if (!event) {
69462- atomic_inc(&dropped_count);
69463+ atomic_inc_unchecked(&dropped_count);
69464 return;
69465 }
69466 entry = ring_buffer_event_data(event);
fe2de317 69467@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
8308f9c9
MT
69468 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
69469 sizeof(*entry), 0, pc);
69470 if (!event) {
69471- atomic_inc(&dropped_count);
69472+ atomic_inc_unchecked(&dropped_count);
69473 return;
69474 }
69475 entry = ring_buffer_event_data(event);
fe2de317
MT
69476diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
69477index 5199930..26c73a0 100644
69478--- a/kernel/trace/trace_output.c
69479+++ b/kernel/trace/trace_output.c
69480@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
ae4e228f 69481
58c5fc13
MT
69482 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
69483 if (!IS_ERR(p)) {
69484- p = mangle_path(s->buffer + s->len, p, "\n");
69485+ p = mangle_path(s->buffer + s->len, p, "\n\\");
69486 if (p) {
69487 s->len = p - s->buffer;
69488 return 1;
fe2de317
MT
69489diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
69490index 77575b3..6e623d1 100644
69491--- a/kernel/trace/trace_stack.c
69492+++ b/kernel/trace/trace_stack.c
ae4e228f
MT
69493@@ -50,7 +50,7 @@ static inline void check_stack(void)
69494 return;
58c5fc13 69495
ae4e228f
MT
69496 /* we do not handle interrupt stacks yet */
69497- if (!object_is_on_stack(&this_size))
69498+ if (!object_starts_on_stack(&this_size))
69499 return;
69500
69501 local_irq_save(flags);
fe2de317
MT
69502diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
69503index 209b379..7f76423 100644
69504--- a/kernel/trace/trace_workqueue.c
69505+++ b/kernel/trace/trace_workqueue.c
71d190be
MT
69506@@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
69507 int cpu;
69508 pid_t pid;
69509 /* Can be inserted from interrupt or user context, need to be atomic */
69510- atomic_t inserted;
69511+ atomic_unchecked_t inserted;
69512 /*
69513 * Don't need to be atomic, works are serialized in a single workqueue thread
69514 * on a single CPU.
69515@@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
69516 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
69517 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
69518 if (node->pid == wq_thread->pid) {
69519- atomic_inc(&node->inserted);
69520+ atomic_inc_unchecked(&node->inserted);
69521 goto found;
69522 }
69523 }
fe2de317 69524@@ -210,7 +210,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
71d190be
MT
69525 tsk = get_pid_task(pid, PIDTYPE_PID);
69526 if (tsk) {
69527 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
69528- atomic_read(&cws->inserted), cws->executed,
69529+ atomic_read_unchecked(&cws->inserted), cws->executed,
69530 tsk->comm);
69531 put_task_struct(tsk);
69532 }
fe2de317
MT
69533diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
69534index c0cb9c4..f33aa89 100644
69535--- a/lib/Kconfig.debug
69536+++ b/lib/Kconfig.debug
69537@@ -1091,6 +1091,7 @@ config LATENCYTOP
69538 depends on DEBUG_KERNEL
69539 depends on STACKTRACE_SUPPORT
69540 depends on PROC_FS
69541+ depends on !GRKERNSEC_HIDESYM
69542 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
69543 select KALLSYMS
69544 select KALLSYMS_ALL
69545diff --git a/lib/bitmap.c b/lib/bitmap.c
69546index 2f4412e..a557e27 100644
69547--- a/lib/bitmap.c
69548+++ b/lib/bitmap.c
69549@@ -419,7 +419,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
6e9df6a3
MT
69550 {
69551 int c, old_c, totaldigits, ndigits, nchunks, nbits;
69552 u32 chunk;
69553- const char __user *ubuf = buf;
69554+ const char __user *ubuf = (const char __force_user *)buf;
69555
69556 bitmap_zero(maskp, nmaskbits);
69557
fe2de317 69558@@ -504,7 +504,7 @@ int bitmap_parse_user(const char __user *ubuf,
6e9df6a3
MT
69559 {
69560 if (!access_ok(VERIFY_READ, ubuf, ulen))
69561 return -EFAULT;
69562- return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits);
69563+ return __bitmap_parse((const char __force_kernel *)ubuf, ulen, 1, maskp, nmaskbits);
69564 }
69565 EXPORT_SYMBOL(bitmap_parse_user);
69566
fe2de317 69567@@ -594,7 +594,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
6e9df6a3
MT
69568 {
69569 unsigned a, b;
69570 int c, old_c, totaldigits;
69571- const char __user *ubuf = buf;
69572+ const char __user *ubuf = (const char __force_user *)buf;
69573 int exp_digit, in_range;
69574
69575 totaldigits = c = 0;
fe2de317 69576@@ -694,7 +694,7 @@ int bitmap_parselist_user(const char __user *ubuf,
6e9df6a3
MT
69577 {
69578 if (!access_ok(VERIFY_READ, ubuf, ulen))
69579 return -EFAULT;
69580- return __bitmap_parselist((const char *)ubuf,
69581+ return __bitmap_parselist((const char __force_kernel *)ubuf,
69582 ulen, 1, maskp, nmaskbits);
69583 }
69584 EXPORT_SYMBOL(bitmap_parselist_user);
fe2de317
MT
69585diff --git a/lib/bug.c b/lib/bug.c
69586index 1955209..cbbb2ad 100644
69587--- a/lib/bug.c
69588+++ b/lib/bug.c
69589@@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
ae4e228f
MT
69590 return BUG_TRAP_TYPE_NONE;
69591
69592 bug = find_bug(bugaddr);
69593+ if (!bug)
69594+ return BUG_TRAP_TYPE_NONE;
69595
6892158b
MT
69596 file = NULL;
69597 line = 0;
fe2de317
MT
69598diff --git a/lib/debugobjects.c b/lib/debugobjects.c
69599index a78b7c6..2c73084 100644
69600--- a/lib/debugobjects.c
69601+++ b/lib/debugobjects.c
69602@@ -284,7 +284,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
ae4e228f
MT
69603 if (limit > 4)
69604 return;
69605
69606- is_on_stack = object_is_on_stack(addr);
69607+ is_on_stack = object_starts_on_stack(addr);
69608 if (is_on_stack == onstack)
69609 return;
69610
fe2de317
MT
69611diff --git a/lib/devres.c b/lib/devres.c
69612index 7c0e953..f642b5c 100644
69613--- a/lib/devres.c
69614+++ b/lib/devres.c
6e9df6a3
MT
69615@@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
69616 void devm_iounmap(struct device *dev, void __iomem *addr)
69617 {
69618 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
69619- (void *)addr));
69620+ (void __force *)addr));
69621 iounmap(addr);
69622 }
69623 EXPORT_SYMBOL(devm_iounmap);
fe2de317 69624@@ -141,7 +141,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
6e9df6a3
MT
69625 {
69626 ioport_unmap(addr);
69627 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
69628- devm_ioport_map_match, (void *)addr));
69629+ devm_ioport_map_match, (void __force *)addr));
69630 }
69631 EXPORT_SYMBOL(devm_ioport_unmap);
69632
fe2de317
MT
69633diff --git a/lib/dma-debug.c b/lib/dma-debug.c
69634index db07bfd..719b5ab 100644
69635--- a/lib/dma-debug.c
69636+++ b/lib/dma-debug.c
15a11c5b 69637@@ -870,7 +870,7 @@ out:
58c5fc13 69638
ae4e228f
MT
69639 static void check_for_stack(struct device *dev, void *addr)
69640 {
69641- if (object_is_on_stack(addr))
69642+ if (object_starts_on_stack(addr))
69643 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
69644 "stack [addr=%p]\n", addr);
69645 }
fe2de317
MT
69646diff --git a/lib/extable.c b/lib/extable.c
69647index 4cac81e..63e9b8f 100644
69648--- a/lib/extable.c
69649+++ b/lib/extable.c
15a11c5b
MT
69650@@ -13,6 +13,7 @@
69651 #include <linux/init.h>
69652 #include <linux/sort.h>
69653 #include <asm/uaccess.h>
69654+#include <asm/pgtable.h>
69655
69656 #ifndef ARCH_HAS_SORT_EXTABLE
69657 /*
fe2de317 69658@@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const void *b)
15a11c5b
MT
69659 void sort_extable(struct exception_table_entry *start,
69660 struct exception_table_entry *finish)
69661 {
69662+ pax_open_kernel();
69663 sort(start, finish - start, sizeof(struct exception_table_entry),
69664 cmp_ex, NULL);
69665+ pax_close_kernel();
69666 }
69667
69668 #ifdef CONFIG_MODULES
fe2de317
MT
69669diff --git a/lib/inflate.c b/lib/inflate.c
69670index 013a761..c28f3fc 100644
69671--- a/lib/inflate.c
69672+++ b/lib/inflate.c
6892158b 69673@@ -269,7 +269,7 @@ static void free(void *where)
58c5fc13
MT
69674 malloc_ptr = free_mem_ptr;
69675 }
69676 #else
69677-#define malloc(a) kmalloc(a, GFP_KERNEL)
69678+#define malloc(a) kmalloc((a), GFP_KERNEL)
69679 #define free(a) kfree(a)
69680 #endif
69681
fe2de317
MT
69682diff --git a/lib/kref.c b/lib/kref.c
69683index 3efb882..8492f4c 100644
69684--- a/lib/kref.c
69685+++ b/lib/kref.c
16454cff 69686@@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
6892158b
MT
69687 */
69688 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
69689 {
69690- WARN_ON(release == NULL);
69691+ BUG_ON(release == NULL);
69692 WARN_ON(release == (void (*)(struct kref *))kfree);
69693
69694 if (atomic_dec_and_test(&kref->refcount)) {
fe2de317
MT
69695diff --git a/lib/radix-tree.c b/lib/radix-tree.c
69696index a2f9da5..3bcadb6 100644
69697--- a/lib/radix-tree.c
69698+++ b/lib/radix-tree.c
df50ba0c 69699@@ -80,7 +80,7 @@ struct radix_tree_preload {
58c5fc13
MT
69700 int nr;
69701 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
69702 };
69703-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
69704+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
69705
bc901d79 69706 static inline void *ptr_to_indirect(void *ptr)
58c5fc13 69707 {
fe2de317
MT
69708diff --git a/lib/vsprintf.c b/lib/vsprintf.c
69709index d7222a9..2172edc 100644
69710--- a/lib/vsprintf.c
69711+++ b/lib/vsprintf.c
bc901d79
MT
69712@@ -16,6 +16,9 @@
69713 * - scnprintf and vscnprintf
69714 */
69715
69716+#ifdef CONFIG_GRKERNSEC_HIDESYM
69717+#define __INCLUDED_BY_HIDESYM 1
69718+#endif
69719 #include <stdarg.h>
69720 #include <linux/module.h>
69721 #include <linux/types.h>
fe2de317 69722@@ -432,7 +435,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
bc901d79 69723 char sym[KSYM_SYMBOL_LEN];
66a7e928
MT
69724 if (ext == 'B')
69725 sprint_backtrace(sym, value);
69726- else if (ext != 'f' && ext != 's')
69727+ else if (ext != 'f' && ext != 's' && ext != 'a')
bc901d79
MT
69728 sprint_symbol(sym, value);
69729 else
69730 kallsyms_lookup(value, NULL, NULL, NULL, sym);
fe2de317 69731@@ -796,7 +799,11 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
16454cff
MT
69732 return string(buf, end, uuid, spec);
69733 }
69734
69735+#ifdef CONFIG_GRKERNSEC_HIDESYM
66a7e928 69736+int kptr_restrict __read_mostly = 2;
16454cff 69737+#else
66a7e928 69738 int kptr_restrict __read_mostly;
16454cff
MT
69739+#endif
69740
69741 /*
69742 * Show a '%p' thing. A kernel extension is that the '%p' is followed
6e9df6a3 69743@@ -810,6 +817,8 @@ int kptr_restrict __read_mostly;
bc901d79
MT
69744 * - 'S' For symbolic direct pointers with offset
69745 * - 's' For symbolic direct pointers without offset
66a7e928 69746 * - 'B' For backtraced symbolic direct pointers with offset
bc901d79
MT
69747+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
69748+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
69749 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
69750 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
69751 * - 'M' For a 6-byte MAC address, it prints the address in the
fe2de317 69752@@ -854,12 +863,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
bc901d79 69753 {
66a7e928 69754 if (!ptr && *fmt != 'K') {
bc901d79
MT
69755 /*
69756- * Print (null) with the same width as a pointer so it makes
69757+ * Print (nil) with the same width as a pointer so it makes
69758 * tabular output look nice.
69759 */
69760 if (spec.field_width == -1)
69761 spec.field_width = 2 * sizeof(void *);
6892158b
MT
69762- return string(buf, end, "(null)", spec);
69763+ return string(buf, end, "(nil)", spec);
bc901d79 69764 }
6892158b
MT
69765
69766 switch (*fmt) {
fe2de317 69767@@ -869,6 +878,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
bc901d79
MT
69768 /* Fallthrough */
69769 case 'S':
69770 case 's':
69771+#ifdef CONFIG_GRKERNSEC_HIDESYM
69772+ break;
69773+#else
69774+ return symbol_string(buf, end, ptr, spec, *fmt);
69775+#endif
69776+ case 'A':
69777+ case 'a':
66a7e928 69778 case 'B':
bc901d79
MT
69779 return symbol_string(buf, end, ptr, spec, *fmt);
69780 case 'R':
fe2de317 69781@@ -1627,11 +1643,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
bc901d79
MT
69782 typeof(type) value; \
69783 if (sizeof(type) == 8) { \
69784 args = PTR_ALIGN(args, sizeof(u32)); \
69785- *(u32 *)&value = *(u32 *)args; \
69786- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
69787+ *(u32 *)&value = *(const u32 *)args; \
69788+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
69789 } else { \
69790 args = PTR_ALIGN(args, sizeof(type)); \
69791- value = *(typeof(type) *)args; \
69792+ value = *(const typeof(type) *)args; \
69793 } \
69794 args += sizeof(type); \
69795 value; \
fe2de317 69796@@ -1694,7 +1710,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
bc901d79
MT
69797 case FORMAT_TYPE_STR: {
69798 const char *str_arg = args;
69799 args += strlen(str_arg) + 1;
69800- str = string(str, end, (char *)str_arg, spec);
69801+ str = string(str, end, str_arg, spec);
69802 break;
69803 }
69804
fe2de317
MT
69805diff --git a/localversion-grsec b/localversion-grsec
69806new file mode 100644
69807index 0000000..7cd6065
69808--- /dev/null
69809+++ b/localversion-grsec
58c5fc13
MT
69810@@ -0,0 +1 @@
69811+-grsec
fe2de317
MT
69812diff --git a/mm/Kconfig b/mm/Kconfig
69813index f2f1ca1..0645f06 100644
69814--- a/mm/Kconfig
69815+++ b/mm/Kconfig
69816@@ -238,10 +238,10 @@ config KSM
69817 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
15a11c5b 69818
fe2de317
MT
69819 config DEFAULT_MMAP_MIN_ADDR
69820- int "Low address space to protect from user allocation"
69821+ int "Low address space to protect from user allocation"
69822 depends on MMU
69823- default 4096
69824- help
69825+ default 65536
69826+ help
69827 This is the portion of low virtual memory which should be protected
69828 from userspace allocation. Keeping a user from writing to low pages
69829 can help reduce the impact of kernel NULL pointer bugs.
69830diff --git a/mm/filemap.c b/mm/filemap.c
69831index 7771871..91bcdb4 100644
69832--- a/mm/filemap.c
69833+++ b/mm/filemap.c
69834@@ -1784,7 +1784,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
58c5fc13
MT
69835 struct address_space *mapping = file->f_mapping;
69836
69837 if (!mapping->a_ops->readpage)
69838- return -ENOEXEC;
69839+ return -ENODEV;
69840 file_accessed(file);
69841 vma->vm_ops = &generic_file_vm_ops;
69842 vma->vm_flags |= VM_CAN_NONLINEAR;
fe2de317 69843@@ -2190,6 +2190,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
58c5fc13
MT
69844 *pos = i_size_read(inode);
69845
69846 if (limit != RLIM_INFINITY) {
69847+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
69848 if (*pos >= limit) {
69849 send_sig(SIGXFSZ, current, 0);
69850 return -EFBIG;
fe2de317
MT
69851diff --git a/mm/fremap.c b/mm/fremap.c
69852index b8e0e2d..076e171 100644
69853--- a/mm/fremap.c
69854+++ b/mm/fremap.c
69855@@ -156,6 +156,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
58c5fc13
MT
69856 retry:
69857 vma = find_vma(mm, start);
69858
69859+#ifdef CONFIG_PAX_SEGMEXEC
69860+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
69861+ goto out;
69862+#endif
69863+
69864 /*
69865 * Make sure the vma is shared, that it supports prefaulting,
69866 * and that the remapped range is valid and fully within
fe2de317
MT
69867diff --git a/mm/highmem.c b/mm/highmem.c
69868index 5ef672c..d7660f4 100644
69869--- a/mm/highmem.c
69870+++ b/mm/highmem.c
bc901d79 69871@@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
58c5fc13
MT
69872 * So no dangers, even with speculative execution.
69873 */
69874 page = pte_page(pkmap_page_table[i]);
ae4e228f 69875+ pax_open_kernel();
58c5fc13
MT
69876 pte_clear(&init_mm, (unsigned long)page_address(page),
69877 &pkmap_page_table[i]);
ae4e228f
MT
69878-
69879+ pax_close_kernel();
58c5fc13
MT
69880 set_page_address(page, NULL);
69881 need_flush = 1;
69882 }
bc901d79 69883@@ -186,9 +187,11 @@ start:
58c5fc13
MT
69884 }
69885 }
69886 vaddr = PKMAP_ADDR(last_pkmap_nr);
ae4e228f
MT
69887+
69888+ pax_open_kernel();
58c5fc13
MT
69889 set_pte_at(&init_mm, vaddr,
69890 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
ae4e228f
MT
69891-
69892+ pax_close_kernel();
58c5fc13
MT
69893 pkmap_count[last_pkmap_nr] = 1;
69894 set_page_address(page, (void *)vaddr);
58c5fc13 69895
fe2de317
MT
69896diff --git a/mm/huge_memory.c b/mm/huge_memory.c
69897index d819d93..468e18f 100644
69898--- a/mm/huge_memory.c
69899+++ b/mm/huge_memory.c
66a7e928
MT
69900@@ -702,7 +702,7 @@ out:
69901 * run pte_offset_map on the pmd, if an huge pmd could
69902 * materialize from under us from a different thread.
69903 */
69904- if (unlikely(__pte_alloc(mm, vma, pmd, address)))
69905+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
69906 return VM_FAULT_OOM;
69907 /* if an huge pmd materialized from under us just retry later */
69908 if (unlikely(pmd_trans_huge(*pmd)))
fe2de317
MT
69909@@ -829,7 +829,7 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
69910
69911 for (i = 0; i < HPAGE_PMD_NR; i++) {
69912 copy_user_highpage(pages[i], page + i,
69913- haddr + PAGE_SHIFT*i, vma);
69914+ haddr + PAGE_SIZE*i, vma);
69915 __SetPageUptodate(pages[i]);
69916 cond_resched();
69917 }
69918diff --git a/mm/hugetlb.c b/mm/hugetlb.c
69919index bb28a5f..fef0140 100644
69920--- a/mm/hugetlb.c
69921+++ b/mm/hugetlb.c
69922@@ -576,6 +576,7 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order)
69923 __SetPageHead(page);
69924 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
69925 __SetPageTail(p);
69926+ set_page_count(p, 0);
69927 p->first_page = page;
69928 }
69929 }
69930@@ -2346,6 +2347,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
58c5fc13
MT
69931 return 1;
69932 }
69933
69934+#ifdef CONFIG_PAX_SEGMEXEC
69935+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
69936+{
69937+ struct mm_struct *mm = vma->vm_mm;
69938+ struct vm_area_struct *vma_m;
69939+ unsigned long address_m;
69940+ pte_t *ptep_m;
69941+
69942+ vma_m = pax_find_mirror_vma(vma);
69943+ if (!vma_m)
69944+ return;
69945+
69946+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
69947+ address_m = address + SEGMEXEC_TASK_SIZE;
69948+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
69949+ get_page(page_m);
6892158b 69950+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
58c5fc13
MT
69951+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
69952+}
69953+#endif
69954+
6892158b
MT
69955 /*
69956 * Hugetlb_cow() should be called with page lock of the original hugepage held.
69957 */
fe2de317 69958@@ -2449,6 +2471,11 @@ retry_avoidcopy:
58c5fc13 69959 make_huge_pte(vma, new_page, 1));
6892158b
MT
69960 page_remove_rmap(old_page);
69961 hugepage_add_new_anon_rmap(new_page, vma, address);
58c5fc13
MT
69962+
69963+#ifdef CONFIG_PAX_SEGMEXEC
69964+ pax_mirror_huge_pte(vma, address, new_page);
69965+#endif
69966+
69967 /* Make the old page be freed below */
69968 new_page = old_page;
6892158b 69969 mmu_notifier_invalidate_range_end(mm,
fe2de317 69970@@ -2600,6 +2627,10 @@ retry:
58c5fc13
MT
69971 && (vma->vm_flags & VM_SHARED)));
69972 set_huge_pte_at(mm, address, ptep, new_pte);
69973
69974+#ifdef CONFIG_PAX_SEGMEXEC
69975+ pax_mirror_huge_pte(vma, address, page);
69976+#endif
69977+
69978 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
69979 /* Optimization, do the COW without a second fault */
69980 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
fe2de317 69981@@ -2629,6 +2660,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
58c5fc13
MT
69982 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
69983 struct hstate *h = hstate_vma(vma);
69984
69985+#ifdef CONFIG_PAX_SEGMEXEC
69986+ struct vm_area_struct *vma_m;
6892158b 69987+#endif
58c5fc13 69988+
6892158b
MT
69989 ptep = huge_pte_offset(mm, address);
69990 if (ptep) {
69991 entry = huge_ptep_get(ptep);
fe2de317 69992@@ -2640,6 +2675,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
bc901d79 69993 VM_FAULT_SET_HINDEX(h - hstates);
6892158b
MT
69994 }
69995
69996+#ifdef CONFIG_PAX_SEGMEXEC
58c5fc13
MT
69997+ vma_m = pax_find_mirror_vma(vma);
69998+ if (vma_m) {
69999+ unsigned long address_m;
70000+
70001+ if (vma->vm_start > vma_m->vm_start) {
70002+ address_m = address;
70003+ address -= SEGMEXEC_TASK_SIZE;
70004+ vma = vma_m;
70005+ h = hstate_vma(vma);
70006+ } else
70007+ address_m = address + SEGMEXEC_TASK_SIZE;
70008+
70009+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
70010+ return VM_FAULT_OOM;
70011+ address_m &= HPAGE_MASK;
70012+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
70013+ }
70014+#endif
70015+
70016 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
70017 if (!ptep)
70018 return VM_FAULT_OOM;
fe2de317
MT
70019diff --git a/mm/internal.h b/mm/internal.h
70020index 2189af4..f2ca332 100644
70021--- a/mm/internal.h
70022+++ b/mm/internal.h
70023@@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
15a11c5b
MT
70024 * in mm/page_alloc.c
70025 */
70026 extern void __free_pages_bootmem(struct page *page, unsigned int order);
70027+extern void free_compound_page(struct page *page);
70028 extern void prep_compound_page(struct page *page, unsigned long order);
70029 #ifdef CONFIG_MEMORY_FAILURE
70030 extern bool is_free_buddy_page(struct page *page);
fe2de317
MT
70031diff --git a/mm/kmemleak.c b/mm/kmemleak.c
70032index d6880f5..ed77913 100644
70033--- a/mm/kmemleak.c
70034+++ b/mm/kmemleak.c
70035@@ -357,7 +357,7 @@ static void print_unreferenced(struct seq_file *seq,
bc901d79
MT
70036
70037 for (i = 0; i < object->trace_len; i++) {
70038 void *ptr = (void *)object->trace[i];
70039- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
70040+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
70041 }
70042 }
70043
fe2de317
MT
70044diff --git a/mm/maccess.c b/mm/maccess.c
70045index 4cee182..e00511d 100644
70046--- a/mm/maccess.c
70047+++ b/mm/maccess.c
70048@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
6e9df6a3
MT
70049 set_fs(KERNEL_DS);
70050 pagefault_disable();
70051 ret = __copy_from_user_inatomic(dst,
70052- (__force const void __user *)src, size);
70053+ (const void __force_user *)src, size);
70054 pagefault_enable();
70055 set_fs(old_fs);
70056
fe2de317 70057@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
6e9df6a3
MT
70058
70059 set_fs(KERNEL_DS);
70060 pagefault_disable();
70061- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
70062+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
70063 pagefault_enable();
70064 set_fs(old_fs);
70065
fe2de317
MT
70066diff --git a/mm/madvise.c b/mm/madvise.c
70067index 74bf193..feb6fd3 100644
70068--- a/mm/madvise.c
70069+++ b/mm/madvise.c
70070@@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
58c5fc13 70071 pgoff_t pgoff;
ae4e228f 70072 unsigned long new_flags = vma->vm_flags;
58c5fc13
MT
70073
70074+#ifdef CONFIG_PAX_SEGMEXEC
70075+ struct vm_area_struct *vma_m;
70076+#endif
70077+
70078 switch (behavior) {
70079 case MADV_NORMAL:
70080 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
16454cff 70081@@ -110,6 +114,13 @@ success:
58c5fc13
MT
70082 /*
70083 * vm_flags is protected by the mmap_sem held in write mode.
70084 */
70085+
70086+#ifdef CONFIG_PAX_SEGMEXEC
70087+ vma_m = pax_find_mirror_vma(vma);
70088+ if (vma_m)
70089+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
70090+#endif
70091+
70092 vma->vm_flags = new_flags;
70093
70094 out:
fe2de317 70095@@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
ae4e228f
MT
70096 struct vm_area_struct ** prev,
70097 unsigned long start, unsigned long end)
70098 {
58c5fc13
MT
70099+
70100+#ifdef CONFIG_PAX_SEGMEXEC
ae4e228f
MT
70101+ struct vm_area_struct *vma_m;
70102+#endif
58c5fc13 70103+
ae4e228f
MT
70104 *prev = vma;
70105 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
70106 return -EINVAL;
fe2de317 70107@@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
ae4e228f
MT
70108 zap_page_range(vma, start, end - start, &details);
70109 } else
70110 zap_page_range(vma, start, end - start, NULL);
70111+
70112+#ifdef CONFIG_PAX_SEGMEXEC
70113+ vma_m = pax_find_mirror_vma(vma);
70114+ if (vma_m) {
70115+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
70116+ struct zap_details details = {
70117+ .nonlinear_vma = vma_m,
70118+ .last_index = ULONG_MAX,
70119+ };
70120+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
70121+ } else
70122+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
70123+ }
58c5fc13
MT
70124+#endif
70125+
ae4e228f
MT
70126 return 0;
70127 }
58c5fc13 70128
fe2de317 70129@@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
58c5fc13
MT
70130 if (end < start)
70131 goto out;
70132
70133+#ifdef CONFIG_PAX_SEGMEXEC
70134+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
70135+ if (end > SEGMEXEC_TASK_SIZE)
70136+ goto out;
70137+ } else
70138+#endif
70139+
70140+ if (end > TASK_SIZE)
70141+ goto out;
70142+
70143 error = 0;
70144 if (end == start)
70145 goto out;
fe2de317
MT
70146diff --git a/mm/memory-failure.c b/mm/memory-failure.c
70147index 2b43ba0..fc09657 100644
70148--- a/mm/memory-failure.c
70149+++ b/mm/memory-failure.c
70150@@ -60,7 +60,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
70151
70152 int sysctl_memory_failure_recovery __read_mostly = 1;
70153
70154-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
70155+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
70156
70157 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
70158
70159@@ -201,7 +201,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
70160 si.si_signo = SIGBUS;
70161 si.si_errno = 0;
70162 si.si_code = BUS_MCEERR_AO;
70163- si.si_addr = (void *)addr;
70164+ si.si_addr = (void __user *)addr;
70165 #ifdef __ARCH_SI_TRAPNO
70166 si.si_trapno = trapno;
70167 #endif
70168@@ -1009,7 +1009,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
70169 }
70170
70171 nr_pages = 1 << compound_trans_order(hpage);
70172- atomic_long_add(nr_pages, &mce_bad_pages);
70173+ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
70174
70175 /*
70176 * We need/can do nothing about count=0 pages.
70177@@ -1039,7 +1039,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
70178 if (!PageHWPoison(hpage)
70179 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
70180 || (p != hpage && TestSetPageHWPoison(hpage))) {
70181- atomic_long_sub(nr_pages, &mce_bad_pages);
70182+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
70183 return 0;
70184 }
70185 set_page_hwpoison_huge_page(hpage);
70186@@ -1097,7 +1097,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
70187 }
70188 if (hwpoison_filter(p)) {
70189 if (TestClearPageHWPoison(p))
70190- atomic_long_sub(nr_pages, &mce_bad_pages);
70191+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
70192 unlock_page(hpage);
70193 put_page(hpage);
70194 return 0;
70195@@ -1314,7 +1314,7 @@ int unpoison_memory(unsigned long pfn)
70196 return 0;
70197 }
70198 if (TestClearPageHWPoison(p))
70199- atomic_long_sub(nr_pages, &mce_bad_pages);
70200+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
70201 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
70202 return 0;
70203 }
70204@@ -1328,7 +1328,7 @@ int unpoison_memory(unsigned long pfn)
70205 */
70206 if (TestClearPageHWPoison(page)) {
70207 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
70208- atomic_long_sub(nr_pages, &mce_bad_pages);
70209+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
70210 freeit = 1;
70211 if (PageHuge(page))
70212 clear_page_hwpoison_huge_page(page);
70213@@ -1441,7 +1441,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
70214 }
70215 done:
70216 if (!PageHWPoison(hpage))
70217- atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
70218+ atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
70219 set_page_hwpoison_huge_page(hpage);
70220 dequeue_hwpoisoned_huge_page(hpage);
70221 /* keep elevated page count for bad page */
70222@@ -1572,7 +1572,7 @@ int soft_offline_page(struct page *page, int flags)
70223 return ret;
70224
70225 done:
70226- atomic_long_add(1, &mce_bad_pages);
70227+ atomic_long_add_unchecked(1, &mce_bad_pages);
70228 SetPageHWPoison(page);
70229 /* keep elevated page count for bad page */
70230 return ret;
70231diff --git a/mm/memory.c b/mm/memory.c
70232index b2b8731..6080174 100644
70233--- a/mm/memory.c
70234+++ b/mm/memory.c
70235@@ -457,8 +457,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
df50ba0c
MT
70236 return;
70237
70238 pmd = pmd_offset(pud, start);
70239+
70240+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
70241 pud_clear(pud);
70242 pmd_free_tlb(tlb, pmd, start);
70243+#endif
70244+
70245 }
70246
70247 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
fe2de317 70248@@ -489,9 +493,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
6892158b 70249 if (end - 1 > ceiling - 1)
df50ba0c
MT
70250 return;
70251
df50ba0c 70252+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
6892158b 70253 pud = pud_offset(pgd, start);
df50ba0c
MT
70254 pgd_clear(pgd);
70255 pud_free_tlb(tlb, pud, start);
70256+#endif
70257+
70258 }
70259
70260 /*
6e9df6a3 70261@@ -1566,12 +1573,6 @@ no_page_table:
71d190be
MT
70262 return page;
70263 }
70264
70265-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
70266-{
66a7e928
MT
70267- return stack_guard_page_start(vma, addr) ||
70268- stack_guard_page_end(vma, addr+PAGE_SIZE);
71d190be
MT
70269-}
70270-
66a7e928
MT
70271 /**
70272 * __get_user_pages() - pin user pages in memory
70273 * @tsk: task_struct of target task
fe2de317 70274@@ -1644,10 +1645,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
ae4e228f 70275 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
58c5fc13
MT
70276 i = 0;
70277
70278- do {
70279+ while (nr_pages) {
70280 struct vm_area_struct *vma;
58c5fc13
MT
70281
70282- vma = find_extend_vma(mm, start);
70283+ vma = find_vma(mm, start);
66a7e928 70284 if (!vma && in_gate_area(mm, start)) {
58c5fc13 70285 unsigned long pg = start & PAGE_MASK;
71d190be 70286 pgd_t *pgd;
fe2de317 70287@@ -1695,7 +1696,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
71d190be 70288 goto next_page;
58c5fc13
MT
70289 }
70290
70291- if (!vma ||
70292+ if (!vma || start < vma->vm_start ||
70293 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
ae4e228f 70294 !(vm_flags & vma->vm_flags))
58c5fc13 70295 return i ? : -EFAULT;
fe2de317 70296@@ -1722,11 +1723,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
66a7e928
MT
70297 int ret;
70298 unsigned int fault_flags = 0;
70299
70300- /* For mlock, just skip the stack guard page. */
70301- if (foll_flags & FOLL_MLOCK) {
70302- if (stack_guard_page(vma, start))
70303- goto next_page;
70304- }
70305 if (foll_flags & FOLL_WRITE)
70306 fault_flags |= FAULT_FLAG_WRITE;
70307 if (nonblocking)
6e9df6a3 70308@@ -1800,7 +1796,7 @@ next_page:
58c5fc13
MT
70309 start += PAGE_SIZE;
70310 nr_pages--;
70311 } while (nr_pages && start < vma->vm_end);
70312- } while (nr_pages);
70313+ }
70314 return i;
70315 }
66a7e928 70316 EXPORT_SYMBOL(__get_user_pages);
fe2de317 70317@@ -2007,6 +2003,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
6892158b
MT
70318 page_add_file_rmap(page);
70319 set_pte_at(mm, addr, pte, mk_pte(page, prot));
70320
70321+#ifdef CONFIG_PAX_SEGMEXEC
70322+ pax_mirror_file_pte(vma, addr, page, ptl);
70323+#endif
70324+
70325 retval = 0;
70326 pte_unmap_unlock(pte, ptl);
70327 return retval;
6e9df6a3 70328@@ -2041,10 +2041,22 @@ out:
6892158b
MT
70329 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
70330 struct page *page)
70331 {
70332+
70333+#ifdef CONFIG_PAX_SEGMEXEC
70334+ struct vm_area_struct *vma_m;
70335+#endif
70336+
70337 if (addr < vma->vm_start || addr >= vma->vm_end)
70338 return -EFAULT;
70339 if (!page_count(page))
70340 return -EINVAL;
70341+
70342+#ifdef CONFIG_PAX_SEGMEXEC
70343+ vma_m = pax_find_mirror_vma(vma);
70344+ if (vma_m)
70345+ vma_m->vm_flags |= VM_INSERTPAGE;
70346+#endif
70347+
70348 vma->vm_flags |= VM_INSERTPAGE;
70349 return insert_page(vma, addr, page, vma->vm_page_prot);
70350 }
fe2de317 70351@@ -2130,6 +2142,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
6892158b
MT
70352 unsigned long pfn)
70353 {
70354 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
70355+ BUG_ON(vma->vm_mirror);
70356
70357 if (addr < vma->vm_start || addr >= vma->vm_end)
70358 return -EFAULT;
fe2de317 70359@@ -2445,6 +2458,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
58c5fc13
MT
70360 copy_user_highpage(dst, src, va, vma);
70361 }
70362
70363+#ifdef CONFIG_PAX_SEGMEXEC
70364+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
70365+{
70366+ struct mm_struct *mm = vma->vm_mm;
70367+ spinlock_t *ptl;
70368+ pte_t *pte, entry;
70369+
70370+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
70371+ entry = *pte;
70372+ if (!pte_present(entry)) {
70373+ if (!pte_none(entry)) {
70374+ BUG_ON(pte_file(entry));
70375+ free_swap_and_cache(pte_to_swp_entry(entry));
70376+ pte_clear_not_present_full(mm, address, pte, 0);
70377+ }
70378+ } else {
70379+ struct page *page;
70380+
70381+ flush_cache_page(vma, address, pte_pfn(entry));
70382+ entry = ptep_clear_flush(vma, address, pte);
70383+ BUG_ON(pte_dirty(entry));
70384+ page = vm_normal_page(vma, address, entry);
70385+ if (page) {
70386+ update_hiwater_rss(mm);
70387+ if (PageAnon(page))
df50ba0c 70388+ dec_mm_counter_fast(mm, MM_ANONPAGES);
58c5fc13 70389+ else
df50ba0c 70390+ dec_mm_counter_fast(mm, MM_FILEPAGES);
58c5fc13
MT
70391+ page_remove_rmap(page);
70392+ page_cache_release(page);
70393+ }
70394+ }
70395+ pte_unmap_unlock(pte, ptl);
70396+}
70397+
70398+/* PaX: if vma is mirrored, synchronize the mirror's PTE
70399+ *
70400+ * the ptl of the lower mapped page is held on entry and is not released on exit
70401+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
70402+ */
70403+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
70404+{
70405+ struct mm_struct *mm = vma->vm_mm;
70406+ unsigned long address_m;
70407+ spinlock_t *ptl_m;
70408+ struct vm_area_struct *vma_m;
70409+ pmd_t *pmd_m;
70410+ pte_t *pte_m, entry_m;
70411+
70412+ BUG_ON(!page_m || !PageAnon(page_m));
70413+
70414+ vma_m = pax_find_mirror_vma(vma);
70415+ if (!vma_m)
70416+ return;
70417+
70418+ BUG_ON(!PageLocked(page_m));
70419+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70420+ address_m = address + SEGMEXEC_TASK_SIZE;
70421+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
bc901d79 70422+ pte_m = pte_offset_map(pmd_m, address_m);
58c5fc13
MT
70423+ ptl_m = pte_lockptr(mm, pmd_m);
70424+ if (ptl != ptl_m) {
70425+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
70426+ if (!pte_none(*pte_m))
70427+ goto out;
70428+ }
70429+
70430+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
70431+ page_cache_get(page_m);
70432+ page_add_anon_rmap(page_m, vma_m, address_m);
df50ba0c 70433+ inc_mm_counter_fast(mm, MM_ANONPAGES);
58c5fc13
MT
70434+ set_pte_at(mm, address_m, pte_m, entry_m);
70435+ update_mmu_cache(vma_m, address_m, entry_m);
70436+out:
70437+ if (ptl != ptl_m)
70438+ spin_unlock(ptl_m);
bc901d79 70439+ pte_unmap(pte_m);
58c5fc13
MT
70440+ unlock_page(page_m);
70441+}
70442+
70443+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
70444+{
70445+ struct mm_struct *mm = vma->vm_mm;
70446+ unsigned long address_m;
70447+ spinlock_t *ptl_m;
70448+ struct vm_area_struct *vma_m;
70449+ pmd_t *pmd_m;
70450+ pte_t *pte_m, entry_m;
70451+
70452+ BUG_ON(!page_m || PageAnon(page_m));
70453+
70454+ vma_m = pax_find_mirror_vma(vma);
70455+ if (!vma_m)
70456+ return;
70457+
70458+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70459+ address_m = address + SEGMEXEC_TASK_SIZE;
70460+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
bc901d79 70461+ pte_m = pte_offset_map(pmd_m, address_m);
58c5fc13
MT
70462+ ptl_m = pte_lockptr(mm, pmd_m);
70463+ if (ptl != ptl_m) {
70464+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
70465+ if (!pte_none(*pte_m))
70466+ goto out;
70467+ }
70468+
70469+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
70470+ page_cache_get(page_m);
70471+ page_add_file_rmap(page_m);
df50ba0c 70472+ inc_mm_counter_fast(mm, MM_FILEPAGES);
58c5fc13
MT
70473+ set_pte_at(mm, address_m, pte_m, entry_m);
70474+ update_mmu_cache(vma_m, address_m, entry_m);
70475+out:
70476+ if (ptl != ptl_m)
70477+ spin_unlock(ptl_m);
bc901d79 70478+ pte_unmap(pte_m);
58c5fc13
MT
70479+}
70480+
70481+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
70482+{
70483+ struct mm_struct *mm = vma->vm_mm;
70484+ unsigned long address_m;
70485+ spinlock_t *ptl_m;
70486+ struct vm_area_struct *vma_m;
70487+ pmd_t *pmd_m;
70488+ pte_t *pte_m, entry_m;
70489+
70490+ vma_m = pax_find_mirror_vma(vma);
70491+ if (!vma_m)
70492+ return;
70493+
70494+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70495+ address_m = address + SEGMEXEC_TASK_SIZE;
70496+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
bc901d79 70497+ pte_m = pte_offset_map(pmd_m, address_m);
58c5fc13
MT
70498+ ptl_m = pte_lockptr(mm, pmd_m);
70499+ if (ptl != ptl_m) {
70500+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
70501+ if (!pte_none(*pte_m))
70502+ goto out;
70503+ }
70504+
70505+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
70506+ set_pte_at(mm, address_m, pte_m, entry_m);
70507+out:
70508+ if (ptl != ptl_m)
70509+ spin_unlock(ptl_m);
bc901d79 70510+ pte_unmap(pte_m);
58c5fc13
MT
70511+}
70512+
70513+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
70514+{
70515+ struct page *page_m;
70516+ pte_t entry;
70517+
70518+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
70519+ goto out;
70520+
70521+ entry = *pte;
70522+ page_m = vm_normal_page(vma, address, entry);
70523+ if (!page_m)
70524+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
70525+ else if (PageAnon(page_m)) {
70526+ if (pax_find_mirror_vma(vma)) {
70527+ pte_unmap_unlock(pte, ptl);
70528+ lock_page(page_m);
70529+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
70530+ if (pte_same(entry, *pte))
70531+ pax_mirror_anon_pte(vma, address, page_m, ptl);
70532+ else
70533+ unlock_page(page_m);
70534+ }
70535+ } else
70536+ pax_mirror_file_pte(vma, address, page_m, ptl);
70537+
70538+out:
70539+ pte_unmap_unlock(pte, ptl);
70540+}
70541+#endif
70542+
70543 /*
70544 * This routine handles present pages, when users try to write
70545 * to a shared page. It is done by copying the page to a new address
6e9df6a3 70546@@ -2656,6 +2849,12 @@ gotten:
58c5fc13
MT
70547 */
70548 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
70549 if (likely(pte_same(*page_table, orig_pte))) {
70550+
70551+#ifdef CONFIG_PAX_SEGMEXEC
70552+ if (pax_find_mirror_vma(vma))
70553+ BUG_ON(!trylock_page(new_page));
70554+#endif
70555+
70556 if (old_page) {
70557 if (!PageAnon(old_page)) {
df50ba0c 70558 dec_mm_counter_fast(mm, MM_FILEPAGES);
6e9df6a3 70559@@ -2707,6 +2906,10 @@ gotten:
58c5fc13
MT
70560 page_remove_rmap(old_page);
70561 }
70562
70563+#ifdef CONFIG_PAX_SEGMEXEC
70564+ pax_mirror_anon_pte(vma, address, new_page, ptl);
70565+#endif
70566+
70567 /* Free the old page.. */
70568 new_page = old_page;
70569 ret |= VM_FAULT_WRITE;
fe2de317 70570@@ -2986,6 +3189,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
58c5fc13
MT
70571 swap_free(entry);
70572 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
70573 try_to_free_swap(page);
70574+
70575+#ifdef CONFIG_PAX_SEGMEXEC
70576+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
70577+#endif
70578+
70579 unlock_page(page);
bc901d79
MT
70580 if (swapcache) {
70581 /*
fe2de317 70582@@ -3009,6 +3217,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
58c5fc13
MT
70583
70584 /* No need to invalidate - it was non-present before */
df50ba0c 70585 update_mmu_cache(vma, address, page_table);
58c5fc13
MT
70586+
70587+#ifdef CONFIG_PAX_SEGMEXEC
70588+ pax_mirror_anon_pte(vma, address, page, ptl);
70589+#endif
70590+
70591 unlock:
70592 pte_unmap_unlock(page_table, ptl);
70593 out:
6e9df6a3 70594@@ -3028,40 +3241,6 @@ out_release:
57199397
MT
70595 }
70596
70597 /*
6892158b
MT
70598- * This is like a special single-page "expand_{down|up}wards()",
70599- * except we must first make sure that 'address{-|+}PAGE_SIZE'
57199397 70600- * doesn't hit another vma.
57199397
MT
70601- */
70602-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
70603-{
70604- address &= PAGE_MASK;
70605- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
70606- struct vm_area_struct *prev = vma->vm_prev;
70607-
70608- /*
70609- * Is there a mapping abutting this one below?
70610- *
70611- * That's only ok if it's the same stack mapping
70612- * that has gotten split..
70613- */
70614- if (prev && prev->vm_end == address)
70615- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
70616-
15a11c5b 70617- expand_downwards(vma, address - PAGE_SIZE);
57199397 70618- }
6892158b
MT
70619- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
70620- struct vm_area_struct *next = vma->vm_next;
70621-
70622- /* As VM_GROWSDOWN but s/below/above/ */
70623- if (next && next->vm_start == address + PAGE_SIZE)
70624- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
70625-
70626- expand_upwards(vma, address + PAGE_SIZE);
70627- }
57199397
MT
70628- return 0;
70629-}
70630-
70631-/*
70632 * We enter with non-exclusive mmap_sem (to exclude vma changes,
70633 * but allow concurrent faults), and pte mapped but not yet locked.
70634 * We return with mmap_sem still held, but pte unmapped and unlocked.
fe2de317 70635@@ -3070,27 +3249,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
ae4e228f
MT
70636 unsigned long address, pte_t *page_table, pmd_t *pmd,
70637 unsigned int flags)
70638 {
70639- struct page *page;
70640+ struct page *page = NULL;
70641 spinlock_t *ptl;
70642 pte_t entry;
70643
57199397
MT
70644- pte_unmap(page_table);
70645-
70646- /* Check if we need to add a guard page to the stack */
70647- if (check_stack_guard_page(vma, address) < 0)
70648- return VM_FAULT_SIGBUS;
70649-
70650- /* Use the zero-page for reads */
70651 if (!(flags & FAULT_FLAG_WRITE)) {
70652 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
70653 vma->vm_page_prot));
70654- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
70655+ ptl = pte_lockptr(mm, pmd);
70656+ spin_lock(ptl);
70657 if (!pte_none(*page_table))
70658 goto unlock;
70659 goto setpte;
70660 }
70661
70662 /* Allocate our own private page. */
70663+ pte_unmap(page_table);
70664+
70665 if (unlikely(anon_vma_prepare(vma)))
70666 goto oom;
70667 page = alloc_zeroed_user_highpage_movable(vma, address);
fe2de317 70668@@ -3109,6 +3284,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
58c5fc13
MT
70669 if (!pte_none(*page_table))
70670 goto release;
ae4e228f 70671
58c5fc13
MT
70672+#ifdef CONFIG_PAX_SEGMEXEC
70673+ if (pax_find_mirror_vma(vma))
70674+ BUG_ON(!trylock_page(page));
70675+#endif
70676+
df50ba0c 70677 inc_mm_counter_fast(mm, MM_ANONPAGES);
58c5fc13 70678 page_add_new_anon_rmap(page, vma, address);
ae4e228f 70679 setpte:
6e9df6a3 70680@@ -3116,6 +3296,12 @@ setpte:
58c5fc13
MT
70681
70682 /* No need to invalidate - it was non-present before */
df50ba0c 70683 update_mmu_cache(vma, address, page_table);
58c5fc13
MT
70684+
70685+#ifdef CONFIG_PAX_SEGMEXEC
ae4e228f
MT
70686+ if (page)
70687+ pax_mirror_anon_pte(vma, address, page, ptl);
58c5fc13
MT
70688+#endif
70689+
70690 unlock:
70691 pte_unmap_unlock(page_table, ptl);
70692 return 0;
fe2de317 70693@@ -3259,6 +3445,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
58c5fc13
MT
70694 */
70695 /* Only go through if we didn't race with anybody else... */
70696 if (likely(pte_same(*page_table, orig_pte))) {
70697+
70698+#ifdef CONFIG_PAX_SEGMEXEC
70699+ if (anon && pax_find_mirror_vma(vma))
70700+ BUG_ON(!trylock_page(page));
70701+#endif
70702+
70703 flush_icache_page(vma, page);
70704 entry = mk_pte(page, vma->vm_page_prot);
70705 if (flags & FAULT_FLAG_WRITE)
fe2de317 70706@@ -3278,6 +3470,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
58c5fc13
MT
70707
70708 /* no need to invalidate: a not-present page won't be cached */
df50ba0c 70709 update_mmu_cache(vma, address, page_table);
58c5fc13
MT
70710+
70711+#ifdef CONFIG_PAX_SEGMEXEC
70712+ if (anon)
70713+ pax_mirror_anon_pte(vma, address, page, ptl);
70714+ else
70715+ pax_mirror_file_pte(vma, address, page, ptl);
70716+#endif
70717+
70718 } else {
6e9df6a3
MT
70719 if (cow_page)
70720 mem_cgroup_uncharge_page(cow_page);
fe2de317 70721@@ -3431,6 +3631,12 @@ int handle_pte_fault(struct mm_struct *mm,
58c5fc13 70722 if (flags & FAULT_FLAG_WRITE)
bc901d79 70723 flush_tlb_fix_spurious_fault(vma, address);
58c5fc13
MT
70724 }
70725+
70726+#ifdef CONFIG_PAX_SEGMEXEC
70727+ pax_mirror_pte(vma, address, pte, pmd, ptl);
70728+ return 0;
70729+#endif
70730+
70731 unlock:
70732 pte_unmap_unlock(pte, ptl);
70733 return 0;
fe2de317 70734@@ -3447,6 +3653,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
58c5fc13
MT
70735 pmd_t *pmd;
70736 pte_t *pte;
70737
70738+#ifdef CONFIG_PAX_SEGMEXEC
70739+ struct vm_area_struct *vma_m;
70740+#endif
70741+
70742 __set_current_state(TASK_RUNNING);
70743
70744 count_vm_event(PGFAULT);
fe2de317 70745@@ -3458,6 +3668,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
58c5fc13
MT
70746 if (unlikely(is_vm_hugetlb_page(vma)))
70747 return hugetlb_fault(mm, vma, address, flags);
70748
70749+#ifdef CONFIG_PAX_SEGMEXEC
70750+ vma_m = pax_find_mirror_vma(vma);
70751+ if (vma_m) {
70752+ unsigned long address_m;
70753+ pgd_t *pgd_m;
70754+ pud_t *pud_m;
70755+ pmd_t *pmd_m;
70756+
70757+ if (vma->vm_start > vma_m->vm_start) {
70758+ address_m = address;
70759+ address -= SEGMEXEC_TASK_SIZE;
70760+ vma = vma_m;
70761+ } else
70762+ address_m = address + SEGMEXEC_TASK_SIZE;
70763+
70764+ pgd_m = pgd_offset(mm, address_m);
70765+ pud_m = pud_alloc(mm, pgd_m, address_m);
70766+ if (!pud_m)
70767+ return VM_FAULT_OOM;
70768+ pmd_m = pmd_alloc(mm, pud_m, address_m);
70769+ if (!pmd_m)
70770+ return VM_FAULT_OOM;
16454cff 70771+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
58c5fc13
MT
70772+ return VM_FAULT_OOM;
70773+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
70774+ }
70775+#endif
70776+
70777 pgd = pgd_offset(mm, address);
70778 pud = pud_alloc(mm, pgd, address);
70779 if (!pud)
fe2de317 70780@@ -3487,7 +3725,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
66a7e928
MT
70781 * run pte_offset_map on the pmd, if an huge pmd could
70782 * materialize from under us from a different thread.
70783 */
70784- if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
70785+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
70786 return VM_FAULT_OOM;
70787 /* if an huge pmd materialized from under us just retry later */
70788 if (unlikely(pmd_trans_huge(*pmd)))
6e9df6a3 70789@@ -3591,7 +3829,7 @@ static int __init gate_vma_init(void)
58c5fc13
MT
70790 gate_vma.vm_start = FIXADDR_USER_START;
70791 gate_vma.vm_end = FIXADDR_USER_END;
70792 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
70793- gate_vma.vm_page_prot = __P101;
70794+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
70795 /*
70796 * Make sure the vDSO gets into every core dump.
70797 * Dumping its contents makes post-mortem fully interpretable later
fe2de317
MT
70798diff --git a/mm/mempolicy.c b/mm/mempolicy.c
70799index 9c51f9f..a9416cf 100644
70800--- a/mm/mempolicy.c
70801+++ b/mm/mempolicy.c
70802@@ -639,6 +639,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
df50ba0c
MT
70803 unsigned long vmstart;
70804 unsigned long vmend;
58c5fc13
MT
70805
70806+#ifdef CONFIG_PAX_SEGMEXEC
70807+ struct vm_area_struct *vma_m;
70808+#endif
70809+
df50ba0c
MT
70810 vma = find_vma_prev(mm, start, &prev);
70811 if (!vma || vma->vm_start > start)
70812 return -EFAULT;
fe2de317 70813@@ -669,6 +673,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
df50ba0c 70814 err = policy_vma(vma, new_pol);
58c5fc13 70815 if (err)
df50ba0c 70816 goto out;
58c5fc13
MT
70817+
70818+#ifdef CONFIG_PAX_SEGMEXEC
70819+ vma_m = pax_find_mirror_vma(vma);
70820+ if (vma_m) {
df50ba0c 70821+ err = policy_vma(vma_m, new_pol);
58c5fc13 70822+ if (err)
df50ba0c 70823+ goto out;
58c5fc13
MT
70824+ }
70825+#endif
70826+
70827 }
df50ba0c
MT
70828
70829 out:
fe2de317 70830@@ -1102,6 +1116,17 @@ static long do_mbind(unsigned long start, unsigned long len,
58c5fc13
MT
70831
70832 if (end < start)
70833 return -EINVAL;
70834+
70835+#ifdef CONFIG_PAX_SEGMEXEC
70836+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
70837+ if (end > SEGMEXEC_TASK_SIZE)
70838+ return -EINVAL;
70839+ } else
70840+#endif
70841+
70842+ if (end > TASK_SIZE)
70843+ return -EINVAL;
70844+
70845 if (end == start)
70846 return 0;
70847
fe2de317 70848@@ -1320,6 +1345,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
58c5fc13 70849 if (!mm)
6892158b 70850 goto out;
58c5fc13
MT
70851
70852+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
70853+ if (mm != current->mm &&
70854+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
70855+ err = -EPERM;
70856+ goto out;
70857+ }
70858+#endif
70859+
70860 /*
70861 * Check if this process has the right to modify the specified
70862 * process. The right exists if the process has administrative
fe2de317 70863@@ -1329,8 +1362,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
58c5fc13
MT
70864 rcu_read_lock();
70865 tcred = __task_cred(task);
70866 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
70867- cred->uid != tcred->suid && cred->uid != tcred->uid &&
70868- !capable(CAP_SYS_NICE)) {
70869+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
70870 rcu_read_unlock();
70871 err = -EPERM;
70872 goto out;
fe2de317
MT
70873diff --git a/mm/migrate.c b/mm/migrate.c
70874index 14d0a6a..0360908 100644
70875--- a/mm/migrate.c
70876+++ b/mm/migrate.c
70877@@ -866,9 +866,9 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
70878
70879 if (anon_vma)
70880 put_anon_vma(anon_vma);
70881-out:
70882 unlock_page(hpage);
70883
70884+out:
70885 if (rc != -EAGAIN) {
70886 list_del(&hpage->lru);
70887 put_page(hpage);
70888@@ -1124,6 +1124,8 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
66a7e928
MT
70889 unsigned long chunk_start;
70890 int err;
70891
70892+ pax_track_stack();
70893+
70894 task_nodes = cpuset_mems_allowed(task);
70895
70896 err = -ENOMEM;
fe2de317 70897@@ -1308,6 +1310,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
58c5fc13
MT
70898 if (!mm)
70899 return -EINVAL;
70900
70901+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
70902+ if (mm != current->mm &&
70903+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
70904+ err = -EPERM;
70905+ goto out;
70906+ }
70907+#endif
70908+
70909 /*
70910 * Check if this process has the right to modify the specified
70911 * process. The right exists if the process has administrative
fe2de317 70912@@ -1317,8 +1327,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
58c5fc13
MT
70913 rcu_read_lock();
70914 tcred = __task_cred(task);
70915 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
70916- cred->uid != tcred->suid && cred->uid != tcred->uid &&
70917- !capable(CAP_SYS_NICE)) {
70918+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
70919 rcu_read_unlock();
70920 err = -EPERM;
70921 goto out;
fe2de317
MT
70922diff --git a/mm/mlock.c b/mm/mlock.c
70923index 048260c..57f4a4e 100644
70924--- a/mm/mlock.c
70925+++ b/mm/mlock.c
58c5fc13
MT
70926@@ -13,6 +13,7 @@
70927 #include <linux/pagemap.h>
70928 #include <linux/mempolicy.h>
70929 #include <linux/syscalls.h>
70930+#include <linux/security.h>
70931 #include <linux/sched.h>
70932 #include <linux/module.h>
70933 #include <linux/rmap.h>
fe2de317 70934@@ -377,6 +378,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
58c5fc13
MT
70935 return -EINVAL;
70936 if (end == start)
70937 return 0;
58c5fc13
MT
70938+ if (end > TASK_SIZE)
70939+ return -EINVAL;
70940+
70941 vma = find_vma_prev(current->mm, start, &prev);
70942 if (!vma || vma->vm_start > start)
70943 return -ENOMEM;
fe2de317 70944@@ -387,6 +391,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
57199397 70945 for (nstart = start ; ; ) {
15a11c5b 70946 vm_flags_t newflags;
57199397
MT
70947
70948+#ifdef CONFIG_PAX_SEGMEXEC
70949+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
70950+ break;
70951+#endif
70952+
70953 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
70954
70955 newflags = vma->vm_flags | VM_LOCKED;
fe2de317 70956@@ -492,6 +501,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
58c5fc13
MT
70957 lock_limit >>= PAGE_SHIFT;
70958
70959 /* check against resource limits */
70960+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
70961 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
70962 error = do_mlock(start, len, 1);
70963 up_write(&current->mm->mmap_sem);
fe2de317 70964@@ -515,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
58c5fc13
MT
70965 static int do_mlockall(int flags)
70966 {
70967 struct vm_area_struct * vma, * prev = NULL;
70968- unsigned int def_flags = 0;
58c5fc13
MT
70969
70970 if (flags & MCL_FUTURE)
70971- def_flags = VM_LOCKED;
57199397
MT
70972- current->mm->def_flags = def_flags;
70973+ current->mm->def_flags |= VM_LOCKED;
70974+ else
70975+ current->mm->def_flags &= ~VM_LOCKED;
58c5fc13
MT
70976 if (flags == MCL_FUTURE)
70977 goto out;
58c5fc13 70978
57199397 70979 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
15a11c5b
MT
70980 vm_flags_t newflags;
70981
58c5fc13
MT
70982+#ifdef CONFIG_PAX_SEGMEXEC
70983+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
70984+ break;
70985+#endif
15a11c5b 70986+
58c5fc13
MT
70987+ BUG_ON(vma->vm_end > TASK_SIZE);
70988 newflags = vma->vm_flags | VM_LOCKED;
70989 if (!(flags & MCL_CURRENT))
70990 newflags &= ~VM_LOCKED;
66a7e928 70991@@ -557,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
58c5fc13
MT
70992 lock_limit >>= PAGE_SHIFT;
70993
70994 ret = -ENOMEM;
57199397 70995+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
58c5fc13
MT
70996 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
70997 capable(CAP_IPC_LOCK))
70998 ret = do_mlockall(flags);
fe2de317
MT
70999diff --git a/mm/mmap.c b/mm/mmap.c
71000index a65efd4..17d61ff 100644
71001--- a/mm/mmap.c
71002+++ b/mm/mmap.c
16454cff 71003@@ -46,6 +46,16 @@
58c5fc13
MT
71004 #define arch_rebalance_pgtables(addr, len) (addr)
71005 #endif
71006
71007+static inline void verify_mm_writelocked(struct mm_struct *mm)
71008+{
71009+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
71010+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
71011+ up_read(&mm->mmap_sem);
71012+ BUG();
71013+ }
71014+#endif
71015+}
71016+
71017 static void unmap_region(struct mm_struct *mm,
71018 struct vm_area_struct *vma, struct vm_area_struct *prev,
71019 unsigned long start, unsigned long end);
fe2de317 71020@@ -71,22 +81,32 @@ static void unmap_region(struct mm_struct *mm,
58c5fc13
MT
71021 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
71022 *
71023 */
71024-pgprot_t protection_map[16] = {
71025+pgprot_t protection_map[16] __read_only = {
71026 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
71027 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
71028 };
71029
15a11c5b
MT
71030-pgprot_t vm_get_page_prot(unsigned long vm_flags)
71031+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
58c5fc13
MT
71032 {
71033- return __pgprot(pgprot_val(protection_map[vm_flags &
71034+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
71035 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
71036 pgprot_val(arch_vm_get_page_prot(vm_flags)));
71037+
71038+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
ae4e228f 71039+ if (!(__supported_pte_mask & _PAGE_NX) &&
58c5fc13
MT
71040+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
71041+ (vm_flags & (VM_READ | VM_WRITE)))
71042+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
71043+#endif
71044+
71045+ return prot;
71046 }
71047 EXPORT_SYMBOL(vm_get_page_prot);
71048
15a11c5b
MT
71049 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
71050 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
57199397
MT
71051 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
71052+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
57199397 71053 /*
15a11c5b
MT
71054 * Make sure vm_committed_as in one cacheline and not cacheline shared with
71055 * other variables. It can be updated by several CPUs frequently.
fe2de317 71056@@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
58c5fc13
MT
71057 struct vm_area_struct *next = vma->vm_next;
71058
71059 might_sleep();
71060+ BUG_ON(vma->vm_mirror);
71061 if (vma->vm_ops && vma->vm_ops->close)
71062 vma->vm_ops->close(vma);
71063 if (vma->vm_file) {
6e9df6a3 71064@@ -272,6 +293,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
58c5fc13
MT
71065 * not page aligned -Ram Gupta
71066 */
df50ba0c 71067 rlim = rlimit(RLIMIT_DATA);
58c5fc13
MT
71068+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
71069 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
71070 (mm->end_data - mm->start_data) > rlim)
71071 goto out;
6e9df6a3 71072@@ -689,6 +711,12 @@ static int
58c5fc13
MT
71073 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
71074 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
71075 {
71076+
71077+#ifdef CONFIG_PAX_SEGMEXEC
71078+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
71079+ return 0;
71080+#endif
71081+
71082 if (is_mergeable_vma(vma, file, vm_flags) &&
15a11c5b 71083 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
58c5fc13 71084 if (vma->vm_pgoff == vm_pgoff)
6e9df6a3 71085@@ -708,6 +736,12 @@ static int
58c5fc13
MT
71086 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
71087 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
71088 {
71089+
71090+#ifdef CONFIG_PAX_SEGMEXEC
71091+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
71092+ return 0;
71093+#endif
71094+
71095 if (is_mergeable_vma(vma, file, vm_flags) &&
15a11c5b 71096 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
58c5fc13 71097 pgoff_t vm_pglen;
fe2de317 71098@@ -750,13 +784,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
58c5fc13
MT
71099 struct vm_area_struct *vma_merge(struct mm_struct *mm,
71100 struct vm_area_struct *prev, unsigned long addr,
71101 unsigned long end, unsigned long vm_flags,
71102- struct anon_vma *anon_vma, struct file *file,
71103+ struct anon_vma *anon_vma, struct file *file,
71104 pgoff_t pgoff, struct mempolicy *policy)
71105 {
71106 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
71107 struct vm_area_struct *area, *next;
df50ba0c 71108 int err;
58c5fc13
MT
71109
71110+#ifdef CONFIG_PAX_SEGMEXEC
71111+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
71112+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
71113+
71114+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
71115+#endif
71116+
71117 /*
71118 * We later require that vma->vm_flags == vm_flags,
71119 * so this tests vma->vm_flags & VM_SPECIAL, too.
fe2de317 71120@@ -772,6 +813,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
58c5fc13
MT
71121 if (next && next->vm_end == end) /* cases 6, 7, 8 */
71122 next = next->vm_next;
71123
71124+#ifdef CONFIG_PAX_SEGMEXEC
71125+ if (prev)
71126+ prev_m = pax_find_mirror_vma(prev);
71127+ if (area)
71128+ area_m = pax_find_mirror_vma(area);
71129+ if (next)
71130+ next_m = pax_find_mirror_vma(next);
71131+#endif
71132+
71133 /*
71134 * Can it merge with the predecessor?
71135 */
fe2de317 71136@@ -791,9 +841,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
58c5fc13 71137 /* cases 1, 6 */
df50ba0c 71138 err = vma_adjust(prev, prev->vm_start,
58c5fc13
MT
71139 next->vm_end, prev->vm_pgoff, NULL);
71140- } else /* cases 2, 5, 7 */
71141+
71142+#ifdef CONFIG_PAX_SEGMEXEC
df50ba0c
MT
71143+ if (!err && prev_m)
71144+ err = vma_adjust(prev_m, prev_m->vm_start,
58c5fc13
MT
71145+ next_m->vm_end, prev_m->vm_pgoff, NULL);
71146+#endif
71147+
71148+ } else { /* cases 2, 5, 7 */
df50ba0c 71149 err = vma_adjust(prev, prev->vm_start,
58c5fc13
MT
71150 end, prev->vm_pgoff, NULL);
71151+
71152+#ifdef CONFIG_PAX_SEGMEXEC
df50ba0c
MT
71153+ if (!err && prev_m)
71154+ err = vma_adjust(prev_m, prev_m->vm_start,
71155+ end_m, prev_m->vm_pgoff, NULL);
58c5fc13
MT
71156+#endif
71157+
71158+ }
df50ba0c
MT
71159 if (err)
71160 return NULL;
16454cff 71161 khugepaged_enter_vma_merge(prev);
fe2de317 71162@@ -807,12 +872,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
58c5fc13
MT
71163 mpol_equal(policy, vma_policy(next)) &&
71164 can_vma_merge_before(next, vm_flags,
71165 anon_vma, file, pgoff+pglen)) {
71166- if (prev && addr < prev->vm_end) /* case 4 */
71167+ if (prev && addr < prev->vm_end) { /* case 4 */
df50ba0c 71168 err = vma_adjust(prev, prev->vm_start,
58c5fc13
MT
71169 addr, prev->vm_pgoff, NULL);
71170- else /* cases 3, 8 */
71171+
71172+#ifdef CONFIG_PAX_SEGMEXEC
df50ba0c
MT
71173+ if (!err && prev_m)
71174+ err = vma_adjust(prev_m, prev_m->vm_start,
71175+ addr_m, prev_m->vm_pgoff, NULL);
58c5fc13
MT
71176+#endif
71177+
71178+ } else { /* cases 3, 8 */
df50ba0c 71179 err = vma_adjust(area, addr, next->vm_end,
58c5fc13
MT
71180 next->vm_pgoff - pglen, NULL);
71181+
71182+#ifdef CONFIG_PAX_SEGMEXEC
df50ba0c
MT
71183+ if (!err && area_m)
71184+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
71185+ next_m->vm_pgoff - pglen, NULL);
58c5fc13
MT
71186+#endif
71187+
71188+ }
df50ba0c
MT
71189 if (err)
71190 return NULL;
16454cff 71191 khugepaged_enter_vma_merge(area);
6e9df6a3 71192@@ -921,14 +1001,11 @@ none:
58c5fc13
MT
71193 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
71194 struct file *file, long pages)
71195 {
71196- const unsigned long stack_flags
71197- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
71198-
71199 if (file) {
71200 mm->shared_vm += pages;
71201 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
71202 mm->exec_vm += pages;
71203- } else if (flags & stack_flags)
71204+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
71205 mm->stack_vm += pages;
71206 if (flags & (VM_RESERVED|VM_IO))
71207 mm->reserved_vm += pages;
fe2de317 71208@@ -955,7 +1032,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
58c5fc13
MT
71209 * (the exception is when the underlying filesystem is noexec
71210 * mounted, in which case we dont add PROT_EXEC.)
71211 */
71212- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
71213+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
71214 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
71215 prot |= PROT_EXEC;
71216
fe2de317 71217@@ -981,7 +1058,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
58c5fc13
MT
71218 /* Obtain the address to map to. we verify (or select) it and ensure
71219 * that it represents a valid section of the address space.
71220 */
71221- addr = get_unmapped_area(file, addr, len, pgoff, flags);
71222+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
71223 if (addr & ~PAGE_MASK)
71224 return addr;
71225
fe2de317 71226@@ -992,6 +1069,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
58c5fc13
MT
71227 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
71228 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
71229
58c5fc13 71230+#ifdef CONFIG_PAX_MPROTECT
57199397 71231+ if (mm->pax_flags & MF_PAX_MPROTECT) {
c52201e0 71232+#ifndef CONFIG_PAX_MPROTECT_COMPAT
6892158b
MT
71233+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
71234+ gr_log_rwxmmap(file);
57199397
MT
71235+
71236+#ifdef CONFIG_PAX_EMUPLT
71237+ vm_flags &= ~VM_EXEC;
71238+#else
71239+ return -EPERM;
58c5fc13
MT
71240+#endif
71241+
6892158b
MT
71242+ }
71243+
57199397
MT
71244+ if (!(vm_flags & VM_EXEC))
71245+ vm_flags &= ~VM_MAYEXEC;
c52201e0
MT
71246+#else
71247+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
71248+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
71249+#endif
57199397
MT
71250+ else
71251+ vm_flags &= ~VM_MAYWRITE;
58c5fc13
MT
71252+ }
71253+#endif
71254+
71255+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
71256+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
71257+ vm_flags &= ~VM_PAGEEXEC;
71258+#endif
71259+
ae4e228f 71260 if (flags & MAP_LOCKED)
58c5fc13
MT
71261 if (!can_do_mlock())
71262 return -EPERM;
fe2de317 71263@@ -1003,6 +1110,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
58c5fc13 71264 locked += mm->locked_vm;
df50ba0c 71265 lock_limit = rlimit(RLIMIT_MEMLOCK);
58c5fc13
MT
71266 lock_limit >>= PAGE_SHIFT;
71267+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
71268 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
71269 return -EAGAIN;
71270 }
fe2de317 71271@@ -1073,6 +1181,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
58c5fc13
MT
71272 if (error)
71273 return error;
71274
71275+ if (!gr_acl_handle_mmap(file, prot))
71276+ return -EACCES;
71277+
71278 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
71279 }
71280 EXPORT_SYMBOL(do_mmap_pgoff);
fe2de317 71281@@ -1153,7 +1264,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
15a11c5b 71282 vm_flags_t vm_flags = vma->vm_flags;
58c5fc13
MT
71283
71284 /* If it was private or non-writable, the write bit is already clear */
71285- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
71286+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
71287 return 0;
71288
71289 /* The backer wishes to know when pages are first written to? */
fe2de317 71290@@ -1202,14 +1313,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
58c5fc13
MT
71291 unsigned long charged = 0;
71292 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
71293
71294+#ifdef CONFIG_PAX_SEGMEXEC
71295+ struct vm_area_struct *vma_m = NULL;
71296+#endif
71297+
71298+ /*
71299+ * mm->mmap_sem is required to protect against another thread
71300+ * changing the mappings in case we sleep.
71301+ */
71302+ verify_mm_writelocked(mm);
71303+
71304 /* Clear old maps */
71305 error = -ENOMEM;
71306-munmap_back:
71307 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
71308 if (vma && vma->vm_start < addr + len) {
71309 if (do_munmap(mm, addr, len))
71310 return -ENOMEM;
71311- goto munmap_back;
71312+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
71313+ BUG_ON(vma && vma->vm_start < addr + len);
71314 }
71315
71316 /* Check against address space limit. */
6e9df6a3 71317@@ -1258,6 +1379,16 @@ munmap_back:
58c5fc13
MT
71318 goto unacct_error;
71319 }
71320
71321+#ifdef CONFIG_PAX_SEGMEXEC
71322+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
71323+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
71324+ if (!vma_m) {
71325+ error = -ENOMEM;
71326+ goto free_vma;
71327+ }
71328+ }
71329+#endif
71330+
71331 vma->vm_mm = mm;
71332 vma->vm_start = addr;
71333 vma->vm_end = addr + len;
6e9df6a3 71334@@ -1281,6 +1412,19 @@ munmap_back:
58c5fc13
MT
71335 error = file->f_op->mmap(file, vma);
71336 if (error)
71337 goto unmap_and_free_vma;
71338+
71339+#ifdef CONFIG_PAX_SEGMEXEC
71340+ if (vma_m && (vm_flags & VM_EXECUTABLE))
71341+ added_exe_file_vma(mm);
71342+#endif
71343+
71344+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
71345+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
71346+ vma->vm_flags |= VM_PAGEEXEC;
71347+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
71348+ }
71349+#endif
71350+
71351 if (vm_flags & VM_EXECUTABLE)
71352 added_exe_file_vma(mm);
ae4e228f 71353
6e9df6a3 71354@@ -1316,6 +1460,11 @@ munmap_back:
58c5fc13
MT
71355 vma_link(mm, vma, prev, rb_link, rb_parent);
71356 file = vma->vm_file;
71357
71358+#ifdef CONFIG_PAX_SEGMEXEC
71359+ if (vma_m)
df50ba0c 71360+ BUG_ON(pax_mirror_vma(vma_m, vma));
58c5fc13
MT
71361+#endif
71362+
71363 /* Once vma denies write, undo our temporary denial count */
71364 if (correct_wcount)
71365 atomic_inc(&inode->i_writecount);
6e9df6a3 71366@@ -1324,6 +1473,7 @@ out:
58c5fc13
MT
71367
71368 mm->total_vm += len >> PAGE_SHIFT;
71369 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
71370+ track_exec_limit(mm, addr, addr + len, vm_flags);
71371 if (vm_flags & VM_LOCKED) {
df50ba0c
MT
71372 if (!mlock_vma_pages_range(vma, addr, addr + len))
71373 mm->locked_vm += (len >> PAGE_SHIFT);
6e9df6a3 71374@@ -1341,6 +1491,12 @@ unmap_and_free_vma:
58c5fc13
MT
71375 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
71376 charged = 0;
71377 free_vma:
71378+
71379+#ifdef CONFIG_PAX_SEGMEXEC
71380+ if (vma_m)
71381+ kmem_cache_free(vm_area_cachep, vma_m);
71382+#endif
71383+
71384 kmem_cache_free(vm_area_cachep, vma);
71385 unacct_error:
71386 if (charged)
6e9df6a3 71387@@ -1348,6 +1504,44 @@ unacct_error:
57199397
MT
71388 return error;
71389 }
71390
16454cff 71391+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
57199397
MT
71392+{
71393+ if (!vma) {
71394+#ifdef CONFIG_STACK_GROWSUP
71395+ if (addr > sysctl_heap_stack_gap)
71396+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
71397+ else
71398+ vma = find_vma(current->mm, 0);
71399+ if (vma && (vma->vm_flags & VM_GROWSUP))
71400+ return false;
71401+#endif
71402+ return true;
71403+ }
71404+
71405+ if (addr + len > vma->vm_start)
71406+ return false;
71407+
71408+ if (vma->vm_flags & VM_GROWSDOWN)
71409+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
71410+#ifdef CONFIG_STACK_GROWSUP
71411+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
71412+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
71413+#endif
71414+
71415+ return true;
71416+}
16454cff
MT
71417+
71418+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
71419+{
71420+ if (vma->vm_start < len)
71421+ return -ENOMEM;
71422+ if (!(vma->vm_flags & VM_GROWSDOWN))
71423+ return vma->vm_start - len;
71424+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
71425+ return vma->vm_start - len - sysctl_heap_stack_gap;
71426+ return -ENOMEM;
71427+}
57199397
MT
71428+
71429 /* Get an address range which is currently unmapped.
71430 * For shmat() with addr=0.
71431 *
fe2de317 71432@@ -1374,18 +1568,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
58c5fc13
MT
71433 if (flags & MAP_FIXED)
71434 return addr;
71435
71436+#ifdef CONFIG_PAX_RANDMMAP
71437+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
71438+#endif
71439+
71440 if (addr) {
71441 addr = PAGE_ALIGN(addr);
57199397
MT
71442- vma = find_vma(mm, addr);
71443- if (TASK_SIZE - len >= addr &&
71444- (!vma || addr + len <= vma->vm_start))
71445- return addr;
71446+ if (TASK_SIZE - len >= addr) {
71447+ vma = find_vma(mm, addr);
71448+ if (check_heap_stack_gap(vma, addr, len))
71449+ return addr;
71450+ }
58c5fc13
MT
71451 }
71452 if (len > mm->cached_hole_size) {
71453- start_addr = addr = mm->free_area_cache;
71454+ start_addr = addr = mm->free_area_cache;
71455 } else {
71456- start_addr = addr = TASK_UNMAPPED_BASE;
71457- mm->cached_hole_size = 0;
71458+ start_addr = addr = mm->mmap_base;
71459+ mm->cached_hole_size = 0;
71460 }
71461
71462 full_search:
6e9df6a3 71463@@ -1396,34 +1595,40 @@ full_search:
58c5fc13
MT
71464 * Start a new search - just in case we missed
71465 * some holes.
71466 */
71467- if (start_addr != TASK_UNMAPPED_BASE) {
71468- addr = TASK_UNMAPPED_BASE;
71469- start_addr = addr;
71470+ if (start_addr != mm->mmap_base) {
71471+ start_addr = addr = mm->mmap_base;
71472 mm->cached_hole_size = 0;
71473 goto full_search;
71474 }
57199397
MT
71475 return -ENOMEM;
71476 }
71477- if (!vma || addr + len <= vma->vm_start) {
71478- /*
71479- * Remember the place where we stopped the search:
71480- */
71481- mm->free_area_cache = addr + len;
71482- return addr;
71483- }
71484+ if (check_heap_stack_gap(vma, addr, len))
71485+ break;
71486 if (addr + mm->cached_hole_size < vma->vm_start)
71487 mm->cached_hole_size = vma->vm_start - addr;
71488 addr = vma->vm_end;
71489 }
71490+
71491+ /*
71492+ * Remember the place where we stopped the search:
71493+ */
71494+ mm->free_area_cache = addr + len;
71495+ return addr;
71496 }
71497 #endif
58c5fc13
MT
71498
71499 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
71500 {
71501+
71502+#ifdef CONFIG_PAX_SEGMEXEC
71503+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
71504+ return;
71505+#endif
71506+
71507 /*
71508 * Is this a new hole at the lowest possible address?
71509 */
71510- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
71511+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
71512 mm->free_area_cache = addr;
71513 mm->cached_hole_size = ~0UL;
71514 }
fe2de317 71515@@ -1441,7 +1646,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
58c5fc13
MT
71516 {
71517 struct vm_area_struct *vma;
71518 struct mm_struct *mm = current->mm;
71519- unsigned long addr = addr0;
71520+ unsigned long base = mm->mmap_base, addr = addr0;
71521
71522 /* requested length too big for entire address space */
71523 if (len > TASK_SIZE)
fe2de317 71524@@ -1450,13 +1655,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
58c5fc13
MT
71525 if (flags & MAP_FIXED)
71526 return addr;
71527
71528+#ifdef CONFIG_PAX_RANDMMAP
71529+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
71530+#endif
71531+
71532 /* requesting a specific address */
71533 if (addr) {
71534 addr = PAGE_ALIGN(addr);
57199397
MT
71535- vma = find_vma(mm, addr);
71536- if (TASK_SIZE - len >= addr &&
71537- (!vma || addr + len <= vma->vm_start))
71538- return addr;
71539+ if (TASK_SIZE - len >= addr) {
71540+ vma = find_vma(mm, addr);
71541+ if (check_heap_stack_gap(vma, addr, len))
71542+ return addr;
71543+ }
71544 }
71545
71546 /* check if free_area_cache is useful for us */
fe2de317 71547@@ -1471,7 +1681,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
57199397
MT
71548 /* make sure it can fit in the remaining address space */
71549 if (addr > len) {
71550 vma = find_vma(mm, addr-len);
71551- if (!vma || addr <= vma->vm_start)
71552+ if (check_heap_stack_gap(vma, addr - len, len))
71553 /* remember the address as a hint for next time */
71554 return (mm->free_area_cache = addr-len);
71555 }
fe2de317 71556@@ -1488,7 +1698,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
57199397
MT
71557 * return with success:
71558 */
71559 vma = find_vma(mm, addr);
71560- if (!vma || addr+len <= vma->vm_start)
71561+ if (check_heap_stack_gap(vma, addr, len))
71562 /* remember the address as a hint for next time */
71563 return (mm->free_area_cache = addr);
71564
fe2de317 71565@@ -1497,8 +1707,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
16454cff
MT
71566 mm->cached_hole_size = vma->vm_start - addr;
71567
71568 /* try just below the current vma->vm_start */
71569- addr = vma->vm_start-len;
71570- } while (len < vma->vm_start);
71571+ addr = skip_heap_stack_gap(vma, len);
71572+ } while (!IS_ERR_VALUE(addr));
71573
71574 bottomup:
71575 /*
6e9df6a3 71576@@ -1507,13 +1717,21 @@ bottomup:
58c5fc13
MT
71577 * can happen with large stack limits and large mmap()
71578 * allocations.
71579 */
71580+ mm->mmap_base = TASK_UNMAPPED_BASE;
71581+
71582+#ifdef CONFIG_PAX_RANDMMAP
71583+ if (mm->pax_flags & MF_PAX_RANDMMAP)
71584+ mm->mmap_base += mm->delta_mmap;
71585+#endif
71586+
71587+ mm->free_area_cache = mm->mmap_base;
71588 mm->cached_hole_size = ~0UL;
71589- mm->free_area_cache = TASK_UNMAPPED_BASE;
71590 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
71591 /*
71592 * Restore the topdown base:
71593 */
71594- mm->free_area_cache = mm->mmap_base;
71595+ mm->mmap_base = base;
71596+ mm->free_area_cache = base;
71597 mm->cached_hole_size = ~0UL;
71598
71599 return addr;
6e9df6a3 71600@@ -1522,6 +1740,12 @@ bottomup:
58c5fc13
MT
71601
71602 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
71603 {
71604+
71605+#ifdef CONFIG_PAX_SEGMEXEC
71606+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
71607+ return;
71608+#endif
71609+
71610 /*
71611 * Is this a new hole at the highest possible address?
71612 */
fe2de317 71613@@ -1529,8 +1753,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
58c5fc13
MT
71614 mm->free_area_cache = addr;
71615
71616 /* dont allow allocations above current base */
71617- if (mm->free_area_cache > mm->mmap_base)
71618+ if (mm->free_area_cache > mm->mmap_base) {
71619 mm->free_area_cache = mm->mmap_base;
71620+ mm->cached_hole_size = ~0UL;
71621+ }
71622 }
71623
71624 unsigned long
6e9df6a3 71625@@ -1638,6 +1864,28 @@ out:
58c5fc13
MT
71626 return prev ? prev->vm_next : vma;
71627 }
71628
71629+#ifdef CONFIG_PAX_SEGMEXEC
71630+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
71631+{
71632+ struct vm_area_struct *vma_m;
71633+
71634+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
71635+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
71636+ BUG_ON(vma->vm_mirror);
71637+ return NULL;
71638+ }
71639+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
71640+ vma_m = vma->vm_mirror;
71641+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
71642+ BUG_ON(vma->vm_file != vma_m->vm_file);
71643+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
57199397 71644+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
6892158b
MT
71645+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
71646+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
58c5fc13
MT
71647+ return vma_m;
71648+}
71649+#endif
71650+
71651 /*
71652 * Verify that the stack growth is acceptable and
71653 * update accounting. This is shared with both the
fe2de317 71654@@ -1654,6 +1902,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
58c5fc13
MT
71655 return -ENOMEM;
71656
71657 /* Stack limit test */
71658+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
df50ba0c 71659 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
58c5fc13
MT
71660 return -ENOMEM;
71661
fe2de317 71662@@ -1664,6 +1913,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
58c5fc13 71663 locked = mm->locked_vm + grow;
df50ba0c
MT
71664 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
71665 limit >>= PAGE_SHIFT;
58c5fc13
MT
71666+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
71667 if (locked > limit && !capable(CAP_IPC_LOCK))
71668 return -ENOMEM;
71669 }
fe2de317 71670@@ -1694,37 +1944,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
6892158b
MT
71671 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
71672 * vma is the last one with address > vma->vm_end. Have to extend vma.
71673 */
71674+#ifndef CONFIG_IA64
71675+static
71676+#endif
58c5fc13
MT
71677 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
71678 {
bc901d79
MT
71679 int error;
71680+ bool locknext;
58c5fc13
MT
71681
71682 if (!(vma->vm_flags & VM_GROWSUP))
71683 return -EFAULT;
71684
71685+ /* Also guard against wrapping around to address 0. */
71686+ if (address < PAGE_ALIGN(address+1))
71687+ address = PAGE_ALIGN(address+1);
71688+ else
71689+ return -ENOMEM;
71690+
71691 /*
71692 * We must make sure the anon_vma is allocated
71693 * so that the anon_vma locking is not a noop.
71694 */
71695 if (unlikely(anon_vma_prepare(vma)))
71696 return -ENOMEM;
71697+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
57199397 71698+ if (locknext && anon_vma_prepare(vma->vm_next))
58c5fc13 71699+ return -ENOMEM;
6892158b 71700 vma_lock_anon_vma(vma);
58c5fc13 71701+ if (locknext)
6892158b 71702+ vma_lock_anon_vma(vma->vm_next);
58c5fc13
MT
71703
71704 /*
71705 * vma->vm_start/vm_end cannot change under us because the caller
71706 * is required to hold the mmap_sem in read mode. We need the
71707- * anon_vma lock to serialize against concurrent expand_stacks.
71708- * Also guard against wrapping around to address 0.
71709+ * anon_vma locks to serialize against concurrent expand_stacks
71710+ * and expand_upwards.
71711 */
71712- if (address < PAGE_ALIGN(address+4))
71713- address = PAGE_ALIGN(address+4);
71714- else {
6892158b 71715- vma_unlock_anon_vma(vma);
58c5fc13
MT
71716- return -ENOMEM;
71717- }
71718 error = 0;
71719
71720 /* Somebody else might have raced and expanded it already */
71721- if (address > vma->vm_end) {
57199397
MT
71722+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
71723+ error = -ENOMEM;
71724+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
58c5fc13
MT
71725 unsigned long size, grow;
71726
71727 size = address - vma->vm_start;
fe2de317 71728@@ -1739,6 +2000,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
66a7e928 71729 }
6892158b 71730 }
58c5fc13
MT
71731 }
71732+ if (locknext)
6892158b
MT
71733+ vma_unlock_anon_vma(vma->vm_next);
71734 vma_unlock_anon_vma(vma);
16454cff 71735 khugepaged_enter_vma_merge(vma);
58c5fc13 71736 return error;
fe2de317 71737@@ -1752,6 +2015,8 @@ int expand_downwards(struct vm_area_struct *vma,
58c5fc13
MT
71738 unsigned long address)
71739 {
bc901d79
MT
71740 int error;
71741+ bool lockprev = false;
57199397 71742+ struct vm_area_struct *prev;
58c5fc13
MT
71743
71744 /*
71745 * We must make sure the anon_vma is allocated
fe2de317 71746@@ -1765,6 +2030,15 @@ int expand_downwards(struct vm_area_struct *vma,
58c5fc13
MT
71747 if (error)
71748 return error;
71749
57199397 71750+ prev = vma->vm_prev;
58c5fc13 71751+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
58c5fc13
MT
71752+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
71753+#endif
57199397 71754+ if (lockprev && anon_vma_prepare(prev))
58c5fc13
MT
71755+ return -ENOMEM;
71756+ if (lockprev)
6892158b 71757+ vma_lock_anon_vma(prev);
58c5fc13 71758+
6892158b 71759 vma_lock_anon_vma(vma);
58c5fc13
MT
71760
71761 /*
fe2de317 71762@@ -1774,9 +2048,17 @@ int expand_downwards(struct vm_area_struct *vma,
58c5fc13
MT
71763 */
71764
71765 /* Somebody else might have raced and expanded it already */
71766- if (address < vma->vm_start) {
57199397
MT
71767+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
71768+ error = -ENOMEM;
71769+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
58c5fc13
MT
71770 unsigned long size, grow;
71771
71772+#ifdef CONFIG_PAX_SEGMEXEC
71773+ struct vm_area_struct *vma_m;
71774+
71775+ vma_m = pax_find_mirror_vma(vma);
71776+#endif
71777+
71778 size = vma->vm_end - address;
71779 grow = (vma->vm_start - address) >> PAGE_SHIFT;
71780
fe2de317 71781@@ -1786,11 +2068,22 @@ int expand_downwards(struct vm_area_struct *vma,
71d190be
MT
71782 if (!error) {
71783 vma->vm_start = address;
71784 vma->vm_pgoff -= grow;
71785+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
58c5fc13
MT
71786+
71787+#ifdef CONFIG_PAX_SEGMEXEC
71d190be
MT
71788+ if (vma_m) {
71789+ vma_m->vm_start -= grow << PAGE_SHIFT;
71790+ vma_m->vm_pgoff -= grow;
71791+ }
58c5fc13
MT
71792+#endif
71793+
71d190be
MT
71794 perf_event_mmap(vma);
71795 }
58c5fc13
MT
71796 }
71797 }
6892158b 71798 vma_unlock_anon_vma(vma);
58c5fc13 71799+ if (lockprev)
6892158b 71800+ vma_unlock_anon_vma(prev);
16454cff 71801 khugepaged_enter_vma_merge(vma);
58c5fc13
MT
71802 return error;
71803 }
fe2de317 71804@@ -1860,6 +2153,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
58c5fc13
MT
71805 do {
71806 long nrpages = vma_pages(vma);
71807
71808+#ifdef CONFIG_PAX_SEGMEXEC
71809+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
71810+ vma = remove_vma(vma);
71811+ continue;
71812+ }
71813+#endif
71814+
71815 mm->total_vm -= nrpages;
71816 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
71817 vma = remove_vma(vma);
fe2de317 71818@@ -1905,6 +2205,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
58c5fc13 71819 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
57199397 71820 vma->vm_prev = NULL;
58c5fc13
MT
71821 do {
71822+
71823+#ifdef CONFIG_PAX_SEGMEXEC
71824+ if (vma->vm_mirror) {
71825+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
71826+ vma->vm_mirror->vm_mirror = NULL;
71827+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
71828+ vma->vm_mirror = NULL;
71829+ }
71830+#endif
71831+
71832 rb_erase(&vma->vm_rb, &mm->mm_rb);
71833 mm->map_count--;
71834 tail_vma = vma;
fe2de317 71835@@ -1933,14 +2243,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
ae4e228f 71836 struct vm_area_struct *new;
df50ba0c 71837 int err = -ENOMEM;
ae4e228f 71838
58c5fc13 71839+#ifdef CONFIG_PAX_SEGMEXEC
ae4e228f 71840+ struct vm_area_struct *vma_m, *new_m = NULL;
58c5fc13 71841+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
ae4e228f 71842+#endif
58c5fc13 71843+
ae4e228f
MT
71844 if (is_vm_hugetlb_page(vma) && (addr &
71845 ~(huge_page_mask(hstate_vma(vma)))))
71846 return -EINVAL;
71847
71848+#ifdef CONFIG_PAX_SEGMEXEC
58c5fc13 71849+ vma_m = pax_find_mirror_vma(vma);
ae4e228f 71850+#endif
58c5fc13 71851+
ae4e228f
MT
71852 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
71853 if (!new)
df50ba0c 71854 goto out_err;
ae4e228f
MT
71855
71856+#ifdef CONFIG_PAX_SEGMEXEC
58c5fc13
MT
71857+ if (vma_m) {
71858+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
71859+ if (!new_m) {
71860+ kmem_cache_free(vm_area_cachep, new);
df50ba0c 71861+ goto out_err;
58c5fc13
MT
71862+ }
71863+ }
ae4e228f 71864+#endif
58c5fc13 71865+
ae4e228f
MT
71866 /* most fields are the same, copy all, and then fixup */
71867 *new = *vma;
71868
fe2de317 71869@@ -1953,6 +2282,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
ae4e228f
MT
71870 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
71871 }
71872
71873+#ifdef CONFIG_PAX_SEGMEXEC
58c5fc13
MT
71874+ if (vma_m) {
71875+ *new_m = *vma_m;
df50ba0c 71876+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
58c5fc13
MT
71877+ new_m->vm_mirror = new;
71878+ new->vm_mirror = new_m;
71879+
71880+ if (new_below)
71881+ new_m->vm_end = addr_m;
71882+ else {
71883+ new_m->vm_start = addr_m;
71884+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
71885+ }
71886+ }
ae4e228f
MT
71887+#endif
71888+
71889 pol = mpol_dup(vma_policy(vma));
71890 if (IS_ERR(pol)) {
df50ba0c 71891 err = PTR_ERR(pol);
fe2de317 71892@@ -1978,6 +2323,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
ae4e228f 71893 else
df50ba0c 71894 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
ae4e228f
MT
71895
71896+#ifdef CONFIG_PAX_SEGMEXEC
df50ba0c
MT
71897+ if (!err && vma_m) {
71898+ if (anon_vma_clone(new_m, vma_m))
71899+ goto out_free_mpol;
71900+
58c5fc13
MT
71901+ mpol_get(pol);
71902+ vma_set_policy(new_m, pol);
71903+
71904+ if (new_m->vm_file) {
71905+ get_file(new_m->vm_file);
71906+ if (vma_m->vm_flags & VM_EXECUTABLE)
71907+ added_exe_file_vma(mm);
71908+ }
71909+
71910+ if (new_m->vm_ops && new_m->vm_ops->open)
71911+ new_m->vm_ops->open(new_m);
71912+
71913+ if (new_below)
df50ba0c 71914+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
58c5fc13
MT
71915+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
71916+ else
df50ba0c
MT
71917+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
71918+
71919+ if (err) {
71920+ if (new_m->vm_ops && new_m->vm_ops->close)
71921+ new_m->vm_ops->close(new_m);
71922+ if (new_m->vm_file) {
71923+ if (vma_m->vm_flags & VM_EXECUTABLE)
71924+ removed_exe_file_vma(mm);
71925+ fput(new_m->vm_file);
71926+ }
71927+ mpol_put(pol);
71928+ }
58c5fc13 71929+ }
ae4e228f 71930+#endif
58c5fc13 71931+
df50ba0c
MT
71932 /* Success. */
71933 if (!err)
71934 return 0;
fe2de317 71935@@ -1990,10 +2371,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
6892158b
MT
71936 removed_exe_file_vma(mm);
71937 fput(new->vm_file);
71938 }
71939- unlink_anon_vmas(new);
df50ba0c
MT
71940 out_free_mpol:
71941 mpol_put(pol);
71942 out_free_vma:
71943+
71944+#ifdef CONFIG_PAX_SEGMEXEC
71945+ if (new_m) {
71946+ unlink_anon_vmas(new_m);
71947+ kmem_cache_free(vm_area_cachep, new_m);
71948+ }
71949+#endif
71950+
71951+ unlink_anon_vmas(new);
71952 kmem_cache_free(vm_area_cachep, new);
71953 out_err:
71954 return err;
fe2de317 71955@@ -2006,6 +2395,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
ae4e228f
MT
71956 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
71957 unsigned long addr, int new_below)
71958 {
71959+
71960+#ifdef CONFIG_PAX_SEGMEXEC
71961+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
71962+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
71963+ if (mm->map_count >= sysctl_max_map_count-1)
71964+ return -ENOMEM;
71965+ } else
58c5fc13 71966+#endif
ae4e228f
MT
71967+
71968 if (mm->map_count >= sysctl_max_map_count)
71969 return -ENOMEM;
58c5fc13 71970
fe2de317 71971@@ -2017,11 +2415,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
58c5fc13
MT
71972 * work. This now handles partial unmappings.
71973 * Jeremy Fitzhardinge <jeremy@goop.org>
71974 */
71975+#ifdef CONFIG_PAX_SEGMEXEC
15a11c5b
MT
71976 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
71977 {
58c5fc13
MT
71978+ int ret = __do_munmap(mm, start, len);
71979+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
71980+ return ret;
71981+
71982+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
71983+}
71984+
71985+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
71986+#else
15a11c5b 71987+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
58c5fc13 71988+#endif
15a11c5b 71989+{
58c5fc13
MT
71990 unsigned long end;
71991 struct vm_area_struct *vma, *prev, *last;
71992
71993+ /*
71994+ * mm->mmap_sem is required to protect against another thread
71995+ * changing the mappings in case we sleep.
71996+ */
71997+ verify_mm_writelocked(mm);
71998+
71999 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
72000 return -EINVAL;
72001
fe2de317 72002@@ -2096,6 +2513,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
58c5fc13
MT
72003 /* Fix up all other VM information */
72004 remove_vma_list(mm, vma);
72005
72006+ track_exec_limit(mm, start, end, 0UL);
72007+
72008 return 0;
72009 }
72010
fe2de317 72011@@ -2108,22 +2527,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
58c5fc13
MT
72012
72013 profile_munmap(addr);
72014
72015+#ifdef CONFIG_PAX_SEGMEXEC
72016+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
72017+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
72018+ return -EINVAL;
72019+#endif
72020+
72021 down_write(&mm->mmap_sem);
72022 ret = do_munmap(mm, addr, len);
72023 up_write(&mm->mmap_sem);
72024 return ret;
72025 }
72026
72027-static inline void verify_mm_writelocked(struct mm_struct *mm)
72028-{
72029-#ifdef CONFIG_DEBUG_VM
72030- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
72031- WARN_ON(1);
72032- up_read(&mm->mmap_sem);
72033- }
72034-#endif
72035-}
72036-
72037 /*
72038 * this is really a simplified "do_mmap". it only handles
72039 * anonymous maps. eventually we may be able to do some
fe2de317 72040@@ -2137,6 +2552,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
58c5fc13
MT
72041 struct rb_node ** rb_link, * rb_parent;
72042 pgoff_t pgoff = addr >> PAGE_SHIFT;
72043 int error;
72044+ unsigned long charged;
58c5fc13
MT
72045
72046 len = PAGE_ALIGN(len);
72047 if (!len)
fe2de317 72048@@ -2148,16 +2564,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
58c5fc13
MT
72049
72050 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
72051
72052+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
72053+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
72054+ flags &= ~VM_EXEC;
72055+
72056+#ifdef CONFIG_PAX_MPROTECT
72057+ if (mm->pax_flags & MF_PAX_MPROTECT)
72058+ flags &= ~VM_MAYEXEC;
72059+#endif
72060+
72061+ }
72062+#endif
72063+
ae4e228f
MT
72064 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
72065 if (error & ~PAGE_MASK)
58c5fc13
MT
72066 return error;
72067
72068+ charged = len >> PAGE_SHIFT;
72069+
72070 /*
72071 * mlock MCL_FUTURE?
72072 */
72073 if (mm->def_flags & VM_LOCKED) {
72074 unsigned long locked, lock_limit;
72075- locked = len >> PAGE_SHIFT;
72076+ locked = charged;
72077 locked += mm->locked_vm;
df50ba0c 72078 lock_limit = rlimit(RLIMIT_MEMLOCK);
58c5fc13 72079 lock_limit >>= PAGE_SHIFT;
fe2de317 72080@@ -2174,22 +2604,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
58c5fc13
MT
72081 /*
72082 * Clear old maps. this also does some error checking for us
72083 */
72084- munmap_back:
72085 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
72086 if (vma && vma->vm_start < addr + len) {
72087 if (do_munmap(mm, addr, len))
72088 return -ENOMEM;
72089- goto munmap_back;
72090+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
72091+ BUG_ON(vma && vma->vm_start < addr + len);
72092 }
72093
72094 /* Check against address space limits *after* clearing old maps... */
72095- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
72096+ if (!may_expand_vm(mm, charged))
72097 return -ENOMEM;
72098
72099 if (mm->map_count > sysctl_max_map_count)
72100 return -ENOMEM;
72101
72102- if (security_vm_enough_memory(len >> PAGE_SHIFT))
72103+ if (security_vm_enough_memory(charged))
72104 return -ENOMEM;
72105
72106 /* Can we just expand an old private anonymous mapping? */
fe2de317 72107@@ -2203,7 +2633,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
58c5fc13
MT
72108 */
72109 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
72110 if (!vma) {
72111- vm_unacct_memory(len >> PAGE_SHIFT);
72112+ vm_unacct_memory(charged);
72113 return -ENOMEM;
72114 }
72115
fe2de317 72116@@ -2217,11 +2647,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
58c5fc13
MT
72117 vma_link(mm, vma, prev, rb_link, rb_parent);
72118 out:
6892158b 72119 perf_event_mmap(vma);
58c5fc13
MT
72120- mm->total_vm += len >> PAGE_SHIFT;
72121+ mm->total_vm += charged;
72122 if (flags & VM_LOCKED) {
72123 if (!mlock_vma_pages_range(vma, addr, addr + len))
72124- mm->locked_vm += (len >> PAGE_SHIFT);
72125+ mm->locked_vm += charged;
72126 }
72127+ track_exec_limit(mm, addr, addr + len, flags);
72128 return addr;
72129 }
72130
6e9df6a3 72131@@ -2268,8 +2699,10 @@ void exit_mmap(struct mm_struct *mm)
58c5fc13
MT
72132 * Walk the list again, actually closing and freeing it,
72133 * with preemption enabled, without holding any MM locks.
72134 */
72135- while (vma)
72136+ while (vma) {
72137+ vma->vm_mirror = NULL;
72138 vma = remove_vma(vma);
72139+ }
72140
72141 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
72142 }
fe2de317 72143@@ -2283,6 +2716,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
58c5fc13
MT
72144 struct vm_area_struct * __vma, * prev;
72145 struct rb_node ** rb_link, * rb_parent;
72146
72147+#ifdef CONFIG_PAX_SEGMEXEC
72148+ struct vm_area_struct *vma_m = NULL;
72149+#endif
bc901d79
MT
72150+
72151+ if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
72152+ return -EPERM;
58c5fc13
MT
72153+
72154 /*
72155 * The vm_pgoff of a purely anonymous vma should be irrelevant
72156 * until its first write fault, when page's anon_vma and index
fe2de317 72157@@ -2305,7 +2745,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
58c5fc13
MT
72158 if ((vma->vm_flags & VM_ACCOUNT) &&
72159 security_vm_enough_memory_mm(mm, vma_pages(vma)))
72160 return -ENOMEM;
72161+
72162+#ifdef CONFIG_PAX_SEGMEXEC
72163+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
72164+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
72165+ if (!vma_m)
72166+ return -ENOMEM;
72167+ }
72168+#endif
72169+
72170 vma_link(mm, vma, prev, rb_link, rb_parent);
72171+
72172+#ifdef CONFIG_PAX_SEGMEXEC
72173+ if (vma_m)
df50ba0c 72174+ BUG_ON(pax_mirror_vma(vma_m, vma));
58c5fc13
MT
72175+#endif
72176+
72177 return 0;
72178 }
72179
fe2de317 72180@@ -2323,6 +2778,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
58c5fc13
MT
72181 struct rb_node **rb_link, *rb_parent;
72182 struct mempolicy *pol;
72183
72184+ BUG_ON(vma->vm_mirror);
72185+
72186 /*
72187 * If anonymous vma has not yet been faulted, update new pgoff
72188 * to match new location, to increase its chance of merging.
fe2de317 72189@@ -2373,6 +2830,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
df50ba0c 72190 return NULL;
58c5fc13 72191 }
15a11c5b 72192
58c5fc13 72193+#ifdef CONFIG_PAX_SEGMEXEC
df50ba0c 72194+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
58c5fc13
MT
72195+{
72196+ struct vm_area_struct *prev_m;
72197+ struct rb_node **rb_link_m, *rb_parent_m;
72198+ struct mempolicy *pol_m;
72199+
72200+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
72201+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
72202+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
72203+ *vma_m = *vma;
df50ba0c
MT
72204+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
72205+ if (anon_vma_clone(vma_m, vma))
72206+ return -ENOMEM;
58c5fc13
MT
72207+ pol_m = vma_policy(vma_m);
72208+ mpol_get(pol_m);
72209+ vma_set_policy(vma_m, pol_m);
72210+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
72211+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
72212+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
72213+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
72214+ if (vma_m->vm_file)
72215+ get_file(vma_m->vm_file);
72216+ if (vma_m->vm_ops && vma_m->vm_ops->open)
72217+ vma_m->vm_ops->open(vma_m);
72218+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
72219+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
72220+ vma_m->vm_mirror = vma;
72221+ vma->vm_mirror = vma_m;
df50ba0c 72222+ return 0;
58c5fc13
MT
72223+}
72224+#endif
15a11c5b 72225+
58c5fc13
MT
72226 /*
72227 * Return true if the calling process may expand its vm space by the passed
15a11c5b 72228 * number of pages
fe2de317 72229@@ -2383,7 +2873,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
58c5fc13
MT
72230 unsigned long lim;
72231
df50ba0c 72232 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
58c5fc13
MT
72233-
72234+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
72235 if (cur + npages > lim)
72236 return 0;
72237 return 1;
fe2de317 72238@@ -2454,6 +2944,22 @@ int install_special_mapping(struct mm_struct *mm,
58c5fc13
MT
72239 vma->vm_start = addr;
72240 vma->vm_end = addr + len;
72241
72242+#ifdef CONFIG_PAX_MPROTECT
72243+ if (mm->pax_flags & MF_PAX_MPROTECT) {
c52201e0 72244+#ifndef CONFIG_PAX_MPROTECT_COMPAT
57199397
MT
72245+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
72246+ return -EPERM;
72247+ if (!(vm_flags & VM_EXEC))
72248+ vm_flags &= ~VM_MAYEXEC;
c52201e0
MT
72249+#else
72250+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
72251+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
72252+#endif
58c5fc13 72253+ else
57199397 72254+ vm_flags &= ~VM_MAYWRITE;
58c5fc13
MT
72255+ }
72256+#endif
72257+
72258 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
72259 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
72260
fe2de317
MT
72261diff --git a/mm/mprotect.c b/mm/mprotect.c
72262index 5a688a2..27e031c 100644
72263--- a/mm/mprotect.c
72264+++ b/mm/mprotect.c
df50ba0c 72265@@ -23,10 +23,16 @@
58c5fc13
MT
72266 #include <linux/mmu_notifier.h>
72267 #include <linux/migrate.h>
ae4e228f 72268 #include <linux/perf_event.h>
58c5fc13
MT
72269+
72270+#ifdef CONFIG_PAX_MPROTECT
72271+#include <linux/elf.h>
72272+#endif
72273+
72274 #include <asm/uaccess.h>
72275 #include <asm/pgtable.h>
72276 #include <asm/cacheflush.h>
72277 #include <asm/tlbflush.h>
72278+#include <asm/mmu_context.h>
72279
72280 #ifndef pgprot_modify
72281 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
fe2de317 72282@@ -141,6 +147,48 @@ static void change_protection(struct vm_area_struct *vma,
58c5fc13
MT
72283 flush_tlb_range(vma, start, end);
72284 }
72285
72286+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
72287+/* called while holding the mmap semaphor for writing except stack expansion */
72288+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
72289+{
72290+ unsigned long oldlimit, newlimit = 0UL;
72291+
ae4e228f 72292+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
58c5fc13
MT
72293+ return;
72294+
72295+ spin_lock(&mm->page_table_lock);
72296+ oldlimit = mm->context.user_cs_limit;
72297+ if ((prot & VM_EXEC) && oldlimit < end)
72298+ /* USER_CS limit moved up */
72299+ newlimit = end;
72300+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
72301+ /* USER_CS limit moved down */
72302+ newlimit = start;
72303+
72304+ if (newlimit) {
72305+ mm->context.user_cs_limit = newlimit;
72306+
72307+#ifdef CONFIG_SMP
72308+ wmb();
72309+ cpus_clear(mm->context.cpu_user_cs_mask);
72310+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
72311+#endif
72312+
72313+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
72314+ }
72315+ spin_unlock(&mm->page_table_lock);
72316+ if (newlimit == end) {
72317+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
72318+
72319+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
72320+ if (is_vm_hugetlb_page(vma))
72321+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
72322+ else
72323+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
72324+ }
72325+}
72326+#endif
72327+
72328 int
72329 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
72330 unsigned long start, unsigned long end, unsigned long newflags)
fe2de317 72331@@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
58c5fc13
MT
72332 int error;
72333 int dirty_accountable = 0;
72334
72335+#ifdef CONFIG_PAX_SEGMEXEC
72336+ struct vm_area_struct *vma_m = NULL;
72337+ unsigned long start_m, end_m;
72338+
72339+ start_m = start + SEGMEXEC_TASK_SIZE;
72340+ end_m = end + SEGMEXEC_TASK_SIZE;
72341+#endif
72342+
72343 if (newflags == oldflags) {
72344 *pprev = vma;
72345 return 0;
57199397
MT
72346 }
72347
72348+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
72349+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
72350+
72351+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
72352+ return -ENOMEM;
72353+
72354+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
72355+ return -ENOMEM;
72356+ }
72357+
72358 /*
72359 * If we make a private mapping writable we increase our commit;
72360 * but (without finer accounting) cannot reduce our commit if we
fe2de317 72361@@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
58c5fc13
MT
72362 }
72363 }
72364
72365+#ifdef CONFIG_PAX_SEGMEXEC
72366+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
72367+ if (start != vma->vm_start) {
72368+ error = split_vma(mm, vma, start, 1);
72369+ if (error)
72370+ goto fail;
72371+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
72372+ *pprev = (*pprev)->vm_next;
72373+ }
72374+
72375+ if (end != vma->vm_end) {
72376+ error = split_vma(mm, vma, end, 0);
72377+ if (error)
72378+ goto fail;
72379+ }
72380+
72381+ if (pax_find_mirror_vma(vma)) {
72382+ error = __do_munmap(mm, start_m, end_m - start_m);
72383+ if (error)
72384+ goto fail;
72385+ } else {
72386+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
72387+ if (!vma_m) {
72388+ error = -ENOMEM;
72389+ goto fail;
72390+ }
72391+ vma->vm_flags = newflags;
df50ba0c
MT
72392+ error = pax_mirror_vma(vma_m, vma);
72393+ if (error) {
72394+ vma->vm_flags = oldflags;
72395+ goto fail;
72396+ }
58c5fc13
MT
72397+ }
72398+ }
72399+#endif
72400+
72401 /*
72402 * First try to merge with previous and/or next vma.
72403 */
16454cff 72404@@ -204,9 +306,21 @@ success:
df50ba0c 72405 * vm_flags and vm_page_prot are protected by the mmap_sem
58c5fc13
MT
72406 * held in write mode.
72407 */
df50ba0c
MT
72408+
72409+#ifdef CONFIG_PAX_SEGMEXEC
72410+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
72411+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
72412+#endif
72413+
58c5fc13
MT
72414 vma->vm_flags = newflags;
72415+
72416+#ifdef CONFIG_PAX_MPROTECT
ae4e228f
MT
72417+ if (mm->binfmt && mm->binfmt->handle_mprotect)
72418+ mm->binfmt->handle_mprotect(vma, newflags);
58c5fc13
MT
72419+#endif
72420+
72421 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
72422- vm_get_page_prot(newflags));
72423+ vm_get_page_prot(vma->vm_flags));
72424
72425 if (vma_wants_writenotify(vma)) {
72426 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
fe2de317 72427@@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
58c5fc13
MT
72428 end = start + len;
72429 if (end <= start)
72430 return -ENOMEM;
72431+
72432+#ifdef CONFIG_PAX_SEGMEXEC
72433+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
72434+ if (end > SEGMEXEC_TASK_SIZE)
72435+ return -EINVAL;
72436+ } else
72437+#endif
72438+
72439+ if (end > TASK_SIZE)
72440+ return -EINVAL;
72441+
72442 if (!arch_validate_prot(prot))
72443 return -EINVAL;
72444
fe2de317 72445@@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
58c5fc13
MT
72446 /*
72447 * Does the application expect PROT_READ to imply PROT_EXEC:
72448 */
72449- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
72450+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
72451 prot |= PROT_EXEC;
72452
72453 vm_flags = calc_vm_prot_bits(prot);
fe2de317 72454@@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
58c5fc13
MT
72455 if (start > vma->vm_start)
72456 prev = vma;
72457
58c5fc13 72458+#ifdef CONFIG_PAX_MPROTECT
ae4e228f
MT
72459+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
72460+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
58c5fc13
MT
72461+#endif
72462+
72463 for (nstart = start ; ; ) {
72464 unsigned long newflags;
72465
fe2de317 72466@@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
6892158b
MT
72467
72468 /* newflags >> 4 shift VM_MAY% in place of VM_% */
72469 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
72470+ if (prot & (PROT_WRITE | PROT_EXEC))
72471+ gr_log_rwxmprotect(vma->vm_file);
72472+
72473+ error = -EACCES;
72474+ goto out;
72475+ }
72476+
72477+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
72478 error = -EACCES;
72479 goto out;
72480 }
fe2de317 72481@@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
bc901d79 72482 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
58c5fc13
MT
72483 if (error)
72484 goto out;
58c5fc13
MT
72485+
72486+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
72487+
72488 nstart = tmp;
72489
72490 if (nstart < prev->vm_end)
fe2de317
MT
72491diff --git a/mm/mremap.c b/mm/mremap.c
72492index 506fa44..ccc0ba9 100644
72493--- a/mm/mremap.c
72494+++ b/mm/mremap.c
72495@@ -113,6 +113,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
58c5fc13
MT
72496 continue;
72497 pte = ptep_clear_flush(vma, old_addr, old_pte);
72498 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
72499+
72500+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
ae4e228f 72501+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
58c5fc13
MT
72502+ pte = pte_exprotect(pte);
72503+#endif
72504+
72505 set_pte_at(mm, new_addr, new_pte, pte);
72506 }
72507
fe2de317 72508@@ -272,6 +278,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
ae4e228f
MT
72509 if (is_vm_hugetlb_page(vma))
72510 goto Einval;
72511
72512+#ifdef CONFIG_PAX_SEGMEXEC
72513+ if (pax_find_mirror_vma(vma))
72514+ goto Einval;
72515+#endif
72516+
72517 /* We can't remap across vm area boundaries */
72518 if (old_len > vma->vm_end - addr)
72519 goto Efault;
fe2de317 72520@@ -328,20 +339,25 @@ static unsigned long mremap_to(unsigned long addr,
ae4e228f
MT
72521 unsigned long ret = -EINVAL;
72522 unsigned long charged = 0;
72523 unsigned long map_flags;
72524+ unsigned long pax_task_size = TASK_SIZE;
72525
72526 if (new_addr & ~PAGE_MASK)
72527 goto out;
72528
72529- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
72530+#ifdef CONFIG_PAX_SEGMEXEC
72531+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
72532+ pax_task_size = SEGMEXEC_TASK_SIZE;
72533+#endif
72534+
6892158b
MT
72535+ pax_task_size -= PAGE_SIZE;
72536+
ae4e228f
MT
72537+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
72538 goto out;
72539
72540 /* Check if the location we're moving into overlaps the
72541 * old location at all, and fail if it does.
72542 */
72543- if ((new_addr <= addr) && (new_addr+new_len) > addr)
72544- goto out;
72545-
72546- if ((addr <= new_addr) && (addr+old_len) > new_addr)
72547+ if (addr + old_len > new_addr && new_addr + new_len > addr)
72548 goto out;
72549
72550 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
fe2de317 72551@@ -413,6 +429,7 @@ unsigned long do_mremap(unsigned long addr,
58c5fc13
MT
72552 struct vm_area_struct *vma;
72553 unsigned long ret = -EINVAL;
72554 unsigned long charged = 0;
72555+ unsigned long pax_task_size = TASK_SIZE;
72556
72557 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
72558 goto out;
fe2de317 72559@@ -431,6 +448,17 @@ unsigned long do_mremap(unsigned long addr,
58c5fc13
MT
72560 if (!new_len)
72561 goto out;
72562
72563+#ifdef CONFIG_PAX_SEGMEXEC
ae4e228f 72564+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
58c5fc13
MT
72565+ pax_task_size = SEGMEXEC_TASK_SIZE;
72566+#endif
72567+
6892158b
MT
72568+ pax_task_size -= PAGE_SIZE;
72569+
58c5fc13
MT
72570+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
72571+ old_len > pax_task_size || addr > pax_task_size-old_len)
72572+ goto out;
72573+
58c5fc13 72574 if (flags & MREMAP_FIXED) {
ae4e228f
MT
72575 if (flags & MREMAP_MAYMOVE)
72576 ret = mremap_to(addr, old_len, new_addr, new_len);
fe2de317 72577@@ -480,6 +508,7 @@ unsigned long do_mremap(unsigned long addr,
58c5fc13
MT
72578 addr + new_len);
72579 }
72580 ret = addr;
72581+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
72582 goto out;
72583 }
72584 }
fe2de317 72585@@ -506,7 +535,13 @@ unsigned long do_mremap(unsigned long addr,
ae4e228f
MT
72586 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
72587 if (ret)
72588 goto out;
72589+
58c5fc13
MT
72590+ map_flags = vma->vm_flags;
72591 ret = move_vma(vma, addr, old_len, new_len, new_addr);
72592+ if (!(ret & ~PAGE_MASK)) {
72593+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
72594+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
72595+ }
72596 }
72597 out:
72598 if (ret & ~PAGE_MASK)
fe2de317
MT
72599diff --git a/mm/nobootmem.c b/mm/nobootmem.c
72600index 6e93dc7..c98df0c 100644
72601--- a/mm/nobootmem.c
72602+++ b/mm/nobootmem.c
72603@@ -110,19 +110,30 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
66a7e928
MT
72604 unsigned long __init free_all_memory_core_early(int nodeid)
72605 {
72606 int i;
72607- u64 start, end;
72608+ u64 start, end, startrange, endrange;
72609 unsigned long count = 0;
72610- struct range *range = NULL;
72611+ struct range *range = NULL, rangerange = { 0, 0 };
72612 int nr_range;
72613
72614 nr_range = get_free_all_memory_range(&range, nodeid);
72615+ startrange = __pa(range) >> PAGE_SHIFT;
72616+ endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
72617
72618 for (i = 0; i < nr_range; i++) {
72619 start = range[i].start;
72620 end = range[i].end;
72621+ if (start <= endrange && startrange < end) {
72622+ BUG_ON(rangerange.start | rangerange.end);
72623+ rangerange = range[i];
72624+ continue;
72625+ }
72626 count += end - start;
72627 __free_pages_memory(start, end);
72628 }
72629+ start = rangerange.start;
72630+ end = rangerange.end;
72631+ count += end - start;
72632+ __free_pages_memory(start, end);
72633
72634 return count;
72635 }
fe2de317
MT
72636diff --git a/mm/nommu.c b/mm/nommu.c
72637index 4358032..e79b99f 100644
72638--- a/mm/nommu.c
72639+++ b/mm/nommu.c
72640@@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
57199397
MT
72641 int sysctl_overcommit_ratio = 50; /* default is 50% */
72642 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
72643 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
72644-int heap_stack_gap = 0;
72645
72646 atomic_long_t mmap_pages_allocated;
72647
fe2de317 72648@@ -825,15 +824,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
58c5fc13
MT
72649 EXPORT_SYMBOL(find_vma);
72650
72651 /*
72652- * find a VMA
72653- * - we don't extend stack VMAs under NOMMU conditions
72654- */
72655-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
72656-{
72657- return find_vma(mm, addr);
72658-}
72659-
72660-/*
72661 * expand a stack to a given address
72662 * - not supported under NOMMU conditions
72663 */
fe2de317 72664@@ -1553,6 +1543,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
df50ba0c
MT
72665
72666 /* most fields are the same, copy all, and then fixup */
72667 *new = *vma;
72668+ INIT_LIST_HEAD(&new->anon_vma_chain);
72669 *region = *vma->vm_region;
72670 new->vm_region = region;
72671
fe2de317
MT
72672diff --git a/mm/oom_kill.c b/mm/oom_kill.c
72673index 626303b..e9a1785 100644
72674--- a/mm/oom_kill.c
72675+++ b/mm/oom_kill.c
72676@@ -162,7 +162,7 @@ static bool oom_unkillable_task(struct task_struct *p,
883a9837
MT
72677 unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
72678 const nodemask_t *nodemask, unsigned long totalpages)
72679 {
72680- int points;
72681+ long points;
72682
72683 if (oom_unkillable_task(p, mem, nodemask))
72684 return 0;
fe2de317
MT
72685diff --git a/mm/page_alloc.c b/mm/page_alloc.c
72686index 6e8ecb6..d9e3d7a 100644
72687--- a/mm/page_alloc.c
72688+++ b/mm/page_alloc.c
15a11c5b
MT
72689@@ -340,7 +340,7 @@ out:
72690 * This usage means that zero-order pages may not be compound.
72691 */
72692
72693-static void free_compound_page(struct page *page)
72694+void free_compound_page(struct page *page)
72695 {
72696 __free_pages_ok(page, compound_order(page));
72697 }
fe2de317
MT
72698@@ -355,8 +355,8 @@ void prep_compound_page(struct page *page, unsigned long order)
72699 __SetPageHead(page);
72700 for (i = 1; i < nr_pages; i++) {
72701 struct page *p = page + i;
72702-
72703 __SetPageTail(p);
72704+ set_page_count(p, 0);
72705 p->first_page = page;
72706 }
72707 }
72708@@ -653,6 +653,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
57199397 72709 int i;
58c5fc13 72710 int bad = 0;
58c5fc13
MT
72711
72712+#ifdef CONFIG_PAX_MEMORY_SANITIZE
72713+ unsigned long index = 1UL << order;
72714+#endif
72715+
df50ba0c 72716 trace_mm_page_free_direct(page, order);
58c5fc13
MT
72717 kmemcheck_free_shadow(page, order);
72718
fe2de317 72719@@ -668,6 +672,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
58c5fc13
MT
72720 debug_check_no_obj_freed(page_address(page),
72721 PAGE_SIZE << order);
72722 }
72723+
72724+#ifdef CONFIG_PAX_MEMORY_SANITIZE
72725+ for (; index; --index)
72726+ sanitize_highpage(page + index - 1);
72727+#endif
72728+
72729 arch_free_page(page, order);
72730 kernel_map_pages(page, 1 << order, 0);
72731
fe2de317 72732@@ -783,8 +793,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
58c5fc13
MT
72733 arch_alloc_page(page, order);
72734 kernel_map_pages(page, 1 << order, 1);
72735
72736+#ifndef CONFIG_PAX_MEMORY_SANITIZE
72737 if (gfp_flags & __GFP_ZERO)
72738 prep_zero_page(page, order, gfp_flags);
72739+#endif
72740
72741 if (order && (gfp_flags & __GFP_COMP))
72742 prep_compound_page(page, order);
fe2de317 72743@@ -2539,6 +2551,8 @@ void show_free_areas(unsigned int filter)
66a7e928
MT
72744 int cpu;
72745 struct zone *zone;
72746
72747+ pax_track_stack();
72748+
72749 for_each_populated_zone(zone) {
15a11c5b 72750 if (skip_free_areas_node(filter, zone_to_nid(zone)))
66a7e928 72751 continue;
fe2de317 72752@@ -3350,7 +3364,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
6e9df6a3
MT
72753 unsigned long pfn;
72754
72755 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
72756+#ifdef CONFIG_X86_32
72757+ /* boot failures in VMware 8 on 32bit vanilla since
72758+ this change */
72759+ if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
72760+#else
72761 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
72762+#endif
72763 return 1;
72764 }
72765 return 0;
fe2de317
MT
72766@@ -3373,6 +3393,7 @@ static void setup_zone_migrate_reserve(struct zone *zone)
72767 /* Get the start pfn, end pfn and the number of blocks to reserve */
72768 start_pfn = zone->zone_start_pfn;
72769 end_pfn = start_pfn + zone->spanned_pages;
72770+ start_pfn = roundup(start_pfn, pageblock_nr_pages);
72771 reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
72772 pageblock_order;
72773
72774diff --git a/mm/percpu.c b/mm/percpu.c
72775index bf80e55..c7c3f9a 100644
72776--- a/mm/percpu.c
72777+++ b/mm/percpu.c
72778@@ -121,7 +121,7 @@ static unsigned int pcpu_first_unit_cpu __read_mostly;
ae4e228f 72779 static unsigned int pcpu_last_unit_cpu __read_mostly;
58c5fc13
MT
72780
72781 /* the address of the first chunk which starts with the kernel static area */
72782-void *pcpu_base_addr __read_mostly;
72783+void *pcpu_base_addr __read_only;
72784 EXPORT_SYMBOL_GPL(pcpu_base_addr);
72785
ae4e228f 72786 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
fe2de317
MT
72787diff --git a/mm/rmap.c b/mm/rmap.c
72788index 8005080..198c2cd 100644
72789--- a/mm/rmap.c
72790+++ b/mm/rmap.c
72791@@ -152,6 +152,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
df50ba0c
MT
72792 struct anon_vma *anon_vma = vma->anon_vma;
72793 struct anon_vma_chain *avc;
72794
72795+#ifdef CONFIG_PAX_SEGMEXEC
72796+ struct anon_vma_chain *avc_m = NULL;
72797+#endif
72798+
72799 might_sleep();
72800 if (unlikely(!anon_vma)) {
58c5fc13 72801 struct mm_struct *mm = vma->vm_mm;
fe2de317 72802@@ -161,6 +165,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
df50ba0c
MT
72803 if (!avc)
72804 goto out_enomem;
72805
72806+#ifdef CONFIG_PAX_SEGMEXEC
15a11c5b 72807+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
df50ba0c
MT
72808+ if (!avc_m)
72809+ goto out_enomem_free_avc;
72810+#endif
58c5fc13
MT
72811+
72812 anon_vma = find_mergeable_anon_vma(vma);
72813 allocated = NULL;
72814 if (!anon_vma) {
fe2de317 72815@@ -174,6 +184,21 @@ int anon_vma_prepare(struct vm_area_struct *vma)
57199397
MT
72816 /* page_table_lock to protect against threads */
72817 spin_lock(&mm->page_table_lock);
72818 if (likely(!vma->anon_vma)) {
58c5fc13
MT
72819+
72820+#ifdef CONFIG_PAX_SEGMEXEC
57199397
MT
72821+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
72822+
58c5fc13 72823+ if (vma_m) {
df50ba0c 72824+ BUG_ON(vma_m->anon_vma);
58c5fc13 72825+ vma_m->anon_vma = anon_vma;
df50ba0c
MT
72826+ avc_m->anon_vma = anon_vma;
72827+ avc_m->vma = vma;
72828+ list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
72829+ list_add(&avc_m->same_anon_vma, &anon_vma->head);
72830+ avc_m = NULL;
58c5fc13
MT
72831+ }
72832+#endif
72833+
57199397
MT
72834 vma->anon_vma = anon_vma;
72835 avc->anon_vma = anon_vma;
72836 avc->vma = vma;
fe2de317 72837@@ -187,12 +212,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
df50ba0c
MT
72838
72839 if (unlikely(allocated))
66a7e928 72840 put_anon_vma(allocated);
df50ba0c
MT
72841+
72842+#ifdef CONFIG_PAX_SEGMEXEC
72843+ if (unlikely(avc_m))
72844+ anon_vma_chain_free(avc_m);
72845+#endif
72846+
72847 if (unlikely(avc))
72848 anon_vma_chain_free(avc);
72849 }
72850 return 0;
72851
72852 out_enomem_free_avc:
72853+
72854+#ifdef CONFIG_PAX_SEGMEXEC
72855+ if (avc_m)
72856+ anon_vma_chain_free(avc_m);
72857+#endif
72858+
72859 anon_vma_chain_free(avc);
72860 out_enomem:
72861 return -ENOMEM;
fe2de317 72862@@ -243,7 +280,7 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
57199397
MT
72863 * Attach the anon_vmas from src to dst.
72864 * Returns 0 on success, -ENOMEM on failure.
72865 */
72866-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
72867+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
72868 {
72869 struct anon_vma_chain *avc, *pavc;
15a11c5b 72870 struct anon_vma *root = NULL;
fe2de317 72871@@ -276,7 +313,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
57199397
MT
72872 * the corresponding VMA in the parent process is attached to.
72873 * Returns 0 on success, non-zero on failure.
72874 */
72875-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
72876+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
72877 {
72878 struct anon_vma_chain *avc;
72879 struct anon_vma *anon_vma;
fe2de317
MT
72880diff --git a/mm/shmem.c b/mm/shmem.c
72881index 32f6763..431c405 100644
72882--- a/mm/shmem.c
72883+++ b/mm/shmem.c
6892158b 72884@@ -31,7 +31,7 @@
6e9df6a3 72885 #include <linux/module.h>
58c5fc13 72886 #include <linux/swap.h>
58c5fc13
MT
72887
72888-static struct vfsmount *shm_mnt;
72889+struct vfsmount *shm_mnt;
72890
72891 #ifdef CONFIG_SHMEM
72892 /*
6e9df6a3
MT
72893@@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
72894 #define BOGO_DIRENT_SIZE 20
72895
72896 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
72897-#define SHORT_SYMLINK_LEN 128
72898+#define SHORT_SYMLINK_LEN 64
72899
72900 struct shmem_xattr {
72901 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
fe2de317 72902@@ -769,6 +769,8 @@ static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
6e9df6a3 72903 struct mempolicy mpol, *spol;
66a7e928 72904 struct vm_area_struct pvma;
66a7e928
MT
72905
72906+ pax_track_stack();
72907+
72908 spol = mpol_cond_copy(&mpol,
6e9df6a3 72909 mpol_shared_policy_lookup(&info->policy, index));
66a7e928 72910
fe2de317 72911@@ -2149,8 +2151,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
66a7e928
MT
72912 int err = -ENOMEM;
72913
72914 /* Round up to L1_CACHE_BYTES to resist false sharing */
72915- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
72916- L1_CACHE_BYTES), GFP_KERNEL);
72917+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
72918 if (!sbinfo)
72919 return -ENOMEM;
72920
fe2de317
MT
72921diff --git a/mm/slab.c b/mm/slab.c
72922index 893c76d..a742de2 100644
72923--- a/mm/slab.c
72924+++ b/mm/slab.c
15a11c5b 72925@@ -151,7 +151,7 @@
71d190be
MT
72926
72927 /* Legal flag mask for kmem_cache_create(). */
72928 #if DEBUG
72929-# define CREATE_MASK (SLAB_RED_ZONE | \
72930+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
72931 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
72932 SLAB_CACHE_DMA | \
72933 SLAB_STORE_USER | \
15a11c5b 72934@@ -159,7 +159,7 @@
71d190be
MT
72935 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
72936 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
72937 #else
72938-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
72939+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
72940 SLAB_CACHE_DMA | \
72941 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
72942 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
15a11c5b 72943@@ -288,7 +288,7 @@ struct kmem_list3 {
58c5fc13
MT
72944 * Need this for bootstrapping a per node allocator.
72945 */
72946 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
16454cff
MT
72947-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
72948+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
58c5fc13
MT
72949 #define CACHE_CACHE 0
72950 #define SIZE_AC MAX_NUMNODES
72951 #define SIZE_L3 (2 * MAX_NUMNODES)
fe2de317 72952@@ -389,10 +389,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
8308f9c9
MT
72953 if ((x)->max_freeable < i) \
72954 (x)->max_freeable = i; \
72955 } while (0)
72956-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
72957-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
72958-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
72959-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
72960+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
72961+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
72962+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
72963+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
72964 #else
72965 #define STATS_INC_ACTIVE(x) do { } while (0)
72966 #define STATS_DEC_ACTIVE(x) do { } while (0)
fe2de317 72967@@ -538,7 +538,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
58c5fc13
MT
72968 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
72969 */
72970 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
72971- const struct slab *slab, void *obj)
72972+ const struct slab *slab, const void *obj)
72973 {
72974 u32 offset = (obj - slab->s_mem);
72975 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
15a11c5b 72976@@ -564,7 +564,7 @@ struct cache_names {
58c5fc13
MT
72977 static struct cache_names __initdata cache_names[] = {
72978 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
72979 #include <linux/kmalloc_sizes.h>
72980- {NULL,}
71d190be 72981+ {NULL}
58c5fc13
MT
72982 #undef CACHE
72983 };
72984
fe2de317 72985@@ -1572,7 +1572,7 @@ void __init kmem_cache_init(void)
71d190be
MT
72986 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
72987 sizes[INDEX_AC].cs_size,
72988 ARCH_KMALLOC_MINALIGN,
72989- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
72990+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
72991 NULL);
72992
72993 if (INDEX_AC != INDEX_L3) {
fe2de317 72994@@ -1580,7 +1580,7 @@ void __init kmem_cache_init(void)
71d190be
MT
72995 kmem_cache_create(names[INDEX_L3].name,
72996 sizes[INDEX_L3].cs_size,
72997 ARCH_KMALLOC_MINALIGN,
72998- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
72999+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
73000 NULL);
73001 }
73002
fe2de317 73003@@ -1598,7 +1598,7 @@ void __init kmem_cache_init(void)
71d190be
MT
73004 sizes->cs_cachep = kmem_cache_create(names->name,
73005 sizes->cs_size,
73006 ARCH_KMALLOC_MINALIGN,
73007- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
73008+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
73009 NULL);
73010 }
73011 #ifdef CONFIG_ZONE_DMA
fe2de317 73012@@ -4327,10 +4327,10 @@ static int s_show(struct seq_file *m, void *p)
8308f9c9
MT
73013 }
73014 /* cpu stats */
73015 {
73016- unsigned long allochit = atomic_read(&cachep->allochit);
73017- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
73018- unsigned long freehit = atomic_read(&cachep->freehit);
73019- unsigned long freemiss = atomic_read(&cachep->freemiss);
73020+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
73021+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
73022+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
73023+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
73024
73025 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
73026 allochit, allocmiss, freehit, freemiss);
fe2de317 73027@@ -4587,15 +4587,70 @@ static const struct file_operations proc_slabstats_operations = {
58c5fc13 73028
df50ba0c 73029 static int __init slab_proc_init(void)
58c5fc13 73030 {
df50ba0c
MT
73031- proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
73032+ mode_t gr_mode = S_IRUGO;
73033+
73034+#ifdef CONFIG_GRKERNSEC_PROC_ADD
73035+ gr_mode = S_IRUSR;
73036+#endif
73037+
73038+ proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
73039 #ifdef CONFIG_DEBUG_SLAB_LEAK
73040- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
73041+ proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
73042 #endif
73043 return 0;
73044 }
58c5fc13
MT
73045 module_init(slab_proc_init);
73046 #endif
73047
73048+void check_object_size(const void *ptr, unsigned long n, bool to)
73049+{
73050+
73051+#ifdef CONFIG_PAX_USERCOPY
58c5fc13 73052+ struct page *page;
71d190be
MT
73053+ struct kmem_cache *cachep = NULL;
73054+ struct slab *slabp;
58c5fc13
MT
73055+ unsigned int objnr;
73056+ unsigned long offset;
6e9df6a3 73057+ const char *type;
58c5fc13
MT
73058+
73059+ if (!n)
73060+ return;
73061+
6e9df6a3 73062+ type = "<null>";
58c5fc13
MT
73063+ if (ZERO_OR_NULL_PTR(ptr))
73064+ goto report;
73065+
73066+ if (!virt_addr_valid(ptr))
73067+ return;
73068+
73069+ page = virt_to_head_page(ptr);
73070+
6e9df6a3 73071+ type = "<process stack>";
ae4e228f
MT
73072+ if (!PageSlab(page)) {
73073+ if (object_is_on_stack(ptr, n) == -1)
73074+ goto report;
58c5fc13 73075+ return;
ae4e228f 73076+ }
58c5fc13
MT
73077+
73078+ cachep = page_get_cache(page);
6e9df6a3 73079+ type = cachep->name;
71d190be
MT
73080+ if (!(cachep->flags & SLAB_USERCOPY))
73081+ goto report;
73082+
58c5fc13
MT
73083+ slabp = page_get_slab(page);
73084+ objnr = obj_to_index(cachep, slabp, ptr);
73085+ BUG_ON(objnr >= cachep->num);
73086+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
73087+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
73088+ return;
73089+
73090+report:
6e9df6a3 73091+ pax_report_usercopy(ptr, n, to, type);
58c5fc13
MT
73092+#endif
73093+
73094+}
73095+EXPORT_SYMBOL(check_object_size);
73096+
73097 /**
73098 * ksize - get the actual amount of memory allocated for a given object
73099 * @objp: Pointer to the object
fe2de317
MT
73100diff --git a/mm/slob.c b/mm/slob.c
73101index bf39181..727f7a3 100644
73102--- a/mm/slob.c
73103+++ b/mm/slob.c
58c5fc13
MT
73104@@ -29,7 +29,7 @@
73105 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
73106 * alloc_pages() directly, allocating compound pages so the page order
73107 * does not have to be separately tracked, and also stores the exact
73108- * allocation size in page->private so that it can be used to accurately
73109+ * allocation size in slob_page->size so that it can be used to accurately
73110 * provide ksize(). These objects are detected in kfree() because slob_page()
73111 * is false for them.
73112 *
73113@@ -58,6 +58,7 @@
73114 */
73115
73116 #include <linux/kernel.h>
73117+#include <linux/sched.h>
73118 #include <linux/slab.h>
73119 #include <linux/mm.h>
73120 #include <linux/swap.h> /* struct reclaim_state */
6892158b 73121@@ -102,7 +103,8 @@ struct slob_page {
58c5fc13
MT
73122 unsigned long flags; /* mandatory */
73123 atomic_t _count; /* mandatory */
73124 slobidx_t units; /* free units left in page */
73125- unsigned long pad[2];
73126+ unsigned long pad[1];
73127+ unsigned long size; /* size when >=PAGE_SIZE */
73128 slob_t *free; /* first free slob_t in page */
73129 struct list_head list; /* linked list of free pages */
73130 };
6892158b 73131@@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
58c5fc13
MT
73132 */
73133 static inline int is_slob_page(struct slob_page *sp)
73134 {
73135- return PageSlab((struct page *)sp);
73136+ return PageSlab((struct page *)sp) && !sp->size;
73137 }
73138
73139 static inline void set_slob_page(struct slob_page *sp)
fe2de317 73140@@ -150,7 +152,7 @@ static inline void clear_slob_page(struct slob_page *sp)
58c5fc13
MT
73141
73142 static inline struct slob_page *slob_page(const void *addr)
73143 {
73144- return (struct slob_page *)virt_to_page(addr);
73145+ return (struct slob_page *)virt_to_head_page(addr);
73146 }
73147
73148 /*
fe2de317 73149@@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
58c5fc13
MT
73150 /*
73151 * Return the size of a slob block.
73152 */
73153-static slobidx_t slob_units(slob_t *s)
73154+static slobidx_t slob_units(const slob_t *s)
73155 {
73156 if (s->units > 0)
73157 return s->units;
6892158b 73158@@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
58c5fc13
MT
73159 /*
73160 * Return the next free slob block pointer after this one.
73161 */
73162-static slob_t *slob_next(slob_t *s)
73163+static slob_t *slob_next(const slob_t *s)
73164 {
73165 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
73166 slobidx_t next;
6892158b 73167@@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
58c5fc13
MT
73168 /*
73169 * Returns true if s is the last free block in its page.
73170 */
73171-static int slob_last(slob_t *s)
73172+static int slob_last(const slob_t *s)
73173 {
73174 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
73175 }
fe2de317 73176@@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
58c5fc13
MT
73177 if (!page)
73178 return NULL;
73179
73180+ set_slob_page(page);
73181 return page_address(page);
73182 }
73183
fe2de317 73184@@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
58c5fc13
MT
73185 if (!b)
73186 return NULL;
73187 sp = slob_page(b);
73188- set_slob_page(sp);
73189
73190 spin_lock_irqsave(&slob_lock, flags);
73191 sp->units = SLOB_UNITS(PAGE_SIZE);
73192 sp->free = b;
73193+ sp->size = 0;
73194 INIT_LIST_HEAD(&sp->list);
73195 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
73196 set_slob_page_free(sp, slob_list);
6892158b 73197@@ -476,10 +479,9 @@ out:
57199397
MT
73198 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
73199 */
58c5fc13
MT
73200
73201-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
73202+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
73203 {
73204- unsigned int *m;
73205- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
73206+ slob_t *m;
73207 void *ret;
73208
6e9df6a3 73209 gfp &= gfp_allowed_mask;
fe2de317 73210@@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
58c5fc13
MT
73211
73212 if (!m)
73213 return NULL;
73214- *m = size;
73215+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
73216+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
73217+ m[0].units = size;
73218+ m[1].units = align;
73219 ret = (void *)m + align;
73220
73221 trace_kmalloc_node(_RET_IP_, ret,
fe2de317 73222@@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
bc901d79
MT
73223 gfp |= __GFP_COMP;
73224 ret = slob_new_pages(gfp, order, node);
58c5fc13
MT
73225 if (ret) {
73226- struct page *page;
73227- page = virt_to_page(ret);
73228- page->private = size;
73229+ struct slob_page *sp;
73230+ sp = slob_page(ret);
73231+ sp->size = size;
73232 }
73233
73234 trace_kmalloc_node(_RET_IP_, ret,
15a11c5b
MT
73235 size, PAGE_SIZE << order, gfp, node);
73236 }
73237
73238- kmemleak_alloc(ret, size, 1, gfp);
73239+ return ret;
73240+}
58c5fc13
MT
73241+
73242+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
73243+{
73244+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
15a11c5b 73245+ void *ret = __kmalloc_node_align(size, gfp, node, align);
58c5fc13 73246+
15a11c5b
MT
73247+ if (!ZERO_OR_NULL_PTR(ret))
73248+ kmemleak_alloc(ret, size, 1, gfp);
73249 return ret;
73250 }
58c5fc13 73251 EXPORT_SYMBOL(__kmalloc_node);
6e9df6a3 73252@@ -533,13 +547,92 @@ void kfree(const void *block)
58c5fc13
MT
73253 sp = slob_page(block);
73254 if (is_slob_page(sp)) {
73255 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
73256- unsigned int *m = (unsigned int *)(block - align);
73257- slob_free(m, *m + align);
73258- } else
73259+ slob_t *m = (slob_t *)(block - align);
73260+ slob_free(m, m[0].units + align);
73261+ } else {
73262+ clear_slob_page(sp);
73263+ free_slob_page(sp);
73264+ sp->size = 0;
73265 put_page(&sp->page);
73266+ }
73267 }
73268 EXPORT_SYMBOL(kfree);
73269
73270+void check_object_size(const void *ptr, unsigned long n, bool to)
73271+{
73272+
73273+#ifdef CONFIG_PAX_USERCOPY
73274+ struct slob_page *sp;
73275+ const slob_t *free;
73276+ const void *base;
15a11c5b 73277+ unsigned long flags;
6e9df6a3 73278+ const char *type;
58c5fc13
MT
73279+
73280+ if (!n)
73281+ return;
73282+
6e9df6a3 73283+ type = "<null>";
58c5fc13
MT
73284+ if (ZERO_OR_NULL_PTR(ptr))
73285+ goto report;
73286+
73287+ if (!virt_addr_valid(ptr))
73288+ return;
73289+
6e9df6a3 73290+ type = "<process stack>";
58c5fc13 73291+ sp = slob_page(ptr);
ae4e228f
MT
73292+ if (!PageSlab((struct page*)sp)) {
73293+ if (object_is_on_stack(ptr, n) == -1)
73294+ goto report;
58c5fc13 73295+ return;
ae4e228f 73296+ }
58c5fc13 73297+
6e9df6a3 73298+ type = "<slob>";
58c5fc13
MT
73299+ if (sp->size) {
73300+ base = page_address(&sp->page);
73301+ if (base <= ptr && n <= sp->size - (ptr - base))
73302+ return;
73303+ goto report;
73304+ }
73305+
73306+ /* some tricky double walking to find the chunk */
15a11c5b 73307+ spin_lock_irqsave(&slob_lock, flags);
58c5fc13
MT
73308+ base = (void *)((unsigned long)ptr & PAGE_MASK);
73309+ free = sp->free;
73310+
73311+ while (!slob_last(free) && (void *)free <= ptr) {
73312+ base = free + slob_units(free);
73313+ free = slob_next(free);
73314+ }
73315+
73316+ while (base < (void *)free) {
73317+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
73318+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
73319+ int offset;
73320+
73321+ if (ptr < base + align)
15a11c5b 73322+ break;
58c5fc13
MT
73323+
73324+ offset = ptr - base - align;
15a11c5b
MT
73325+ if (offset >= m) {
73326+ base += size;
73327+ continue;
58c5fc13 73328+ }
15a11c5b
MT
73329+
73330+ if (n > m - offset)
73331+ break;
73332+
73333+ spin_unlock_irqrestore(&slob_lock, flags);
73334+ return;
58c5fc13
MT
73335+ }
73336+
15a11c5b 73337+ spin_unlock_irqrestore(&slob_lock, flags);
58c5fc13 73338+report:
6e9df6a3 73339+ pax_report_usercopy(ptr, n, to, type);
58c5fc13
MT
73340+#endif
73341+
73342+}
73343+EXPORT_SYMBOL(check_object_size);
73344+
73345 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
73346 size_t ksize(const void *block)
73347 {
6e9df6a3 73348@@ -552,10 +645,10 @@ size_t ksize(const void *block)
58c5fc13
MT
73349 sp = slob_page(block);
73350 if (is_slob_page(sp)) {
73351 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
73352- unsigned int *m = (unsigned int *)(block - align);
73353- return SLOB_UNITS(*m) * SLOB_UNIT;
73354+ slob_t *m = (slob_t *)(block - align);
73355+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
73356 } else
73357- return sp->page.private;
73358+ return sp->size;
73359 }
73360 EXPORT_SYMBOL(ksize);
73361
fe2de317 73362@@ -571,8 +664,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
15a11c5b
MT
73363 {
73364 struct kmem_cache *c;
73365
73366+#ifdef CONFIG_PAX_USERCOPY
73367+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
73368+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
73369+#else
73370 c = slob_alloc(sizeof(struct kmem_cache),
73371 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
73372+#endif
73373
73374 if (c) {
73375 c->name = name;
fe2de317 73376@@ -614,17 +712,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
6e9df6a3
MT
73377
73378 lockdep_trace_alloc(flags);
58c5fc13
MT
73379
73380+#ifdef CONFIG_PAX_USERCOPY
73381+ b = __kmalloc_node_align(c->size, flags, node, c->align);
73382+#else
73383 if (c->size < PAGE_SIZE) {
73384 b = slob_alloc(c->size, flags, c->align, node);
73385 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
73386 SLOB_UNITS(c->size) * SLOB_UNIT,
73387 flags, node);
73388 } else {
73389+ struct slob_page *sp;
73390+
73391 b = slob_new_pages(flags, get_order(c->size), node);
73392+ sp = slob_page(b);
73393+ sp->size = c->size;
73394 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
73395 PAGE_SIZE << get_order(c->size),
73396 flags, node);
73397 }
73398+#endif
73399
73400 if (c->ctor)
73401 c->ctor(b);
6e9df6a3 73402@@ -636,10 +742,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
58c5fc13
MT
73403
73404 static void __kmem_cache_free(void *b, int size)
73405 {
73406- if (size < PAGE_SIZE)
73407+ struct slob_page *sp = slob_page(b);
73408+
73409+ if (is_slob_page(sp))
73410 slob_free(b, size);
73411- else
73412+ else {
73413+ clear_slob_page(sp);
73414+ free_slob_page(sp);
73415+ sp->size = 0;
73416 slob_free_pages(b, get_order(size));
73417+ }
73418 }
73419
73420 static void kmem_rcu_free(struct rcu_head *head)
fe2de317 73421@@ -652,17 +764,31 @@ static void kmem_rcu_free(struct rcu_head *head)
58c5fc13
MT
73422
73423 void kmem_cache_free(struct kmem_cache *c, void *b)
73424 {
73425+ int size = c->size;
73426+
73427+#ifdef CONFIG_PAX_USERCOPY
73428+ if (size + c->align < PAGE_SIZE) {
73429+ size += c->align;
73430+ b -= c->align;
73431+ }
73432+#endif
73433+
73434 kmemleak_free_recursive(b, c->flags);
73435 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
73436 struct slob_rcu *slob_rcu;
73437- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
58c5fc13 73438- slob_rcu->size = c->size;
6892158b 73439+ slob_rcu = b + (size - sizeof(struct slob_rcu));
58c5fc13
MT
73440+ slob_rcu->size = size;
73441 call_rcu(&slob_rcu->head, kmem_rcu_free);
73442 } else {
73443- __kmem_cache_free(b, c->size);
73444+ __kmem_cache_free(b, size);
73445 }
73446
15a11c5b
MT
73447+#ifdef CONFIG_PAX_USERCOPY
73448+ trace_kfree(_RET_IP_, b);
73449+#else
58c5fc13 73450 trace_kmem_cache_free(_RET_IP_, b);
15a11c5b
MT
73451+#endif
73452+
73453 }
73454 EXPORT_SYMBOL(kmem_cache_free);
73455
fe2de317
MT
73456diff --git a/mm/slub.c b/mm/slub.c
73457index 7c54fe8..0bb4ac5 100644
73458--- a/mm/slub.c
73459+++ b/mm/slub.c
6e9df6a3 73460@@ -208,7 +208,7 @@ struct track {
15a11c5b
MT
73461
73462 enum track_item { TRACK_ALLOC, TRACK_FREE };
73463
73464-#ifdef CONFIG_SYSFS
73465+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73466 static int sysfs_slab_add(struct kmem_cache *);
73467 static int sysfs_slab_alias(struct kmem_cache *, const char *);
73468 static void sysfs_slab_remove(struct kmem_cache *);
fe2de317 73469@@ -556,7 +556,7 @@ static void print_track(const char *s, struct track *t)
bc901d79
MT
73470 if (!t->addr)
73471 return;
73472
73473- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
73474+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
73475 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
6e9df6a3
MT
73476 #ifdef CONFIG_STACKTRACE
73477 {
fe2de317 73478@@ -2456,6 +2456,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
ae4e228f
MT
73479
73480 page = virt_to_head_page(x);
73481
73482+ BUG_ON(!PageSlab(page));
73483+
73484 slab_free(s, page, x, _RET_IP_);
73485
73486 trace_kmem_cache_free(_RET_IP_, x);
6e9df6a3 73487@@ -2489,7 +2491,7 @@ static int slub_min_objects;
58c5fc13
MT
73488 * Merge control. If this is set then no merging of slab caches will occur.
73489 * (Could be removed. This was introduced to pacify the merge skeptics.)
73490 */
73491-static int slub_nomerge;
73492+static int slub_nomerge = 1;
73493
73494 /*
73495 * Calculate the order of allocation given an slab object size.
fe2de317 73496@@ -2912,7 +2914,7 @@ static int kmem_cache_open(struct kmem_cache *s,
58c5fc13
MT
73497 * list to avoid pounding the page allocator excessively.
73498 */
73499 set_min_partial(s, ilog2(s->size));
73500- s->refcount = 1;
73501+ atomic_set(&s->refcount, 1);
73502 #ifdef CONFIG_NUMA
73503 s->remote_node_defrag_ratio = 1000;
73504 #endif
fe2de317 73505@@ -3017,8 +3019,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
58c5fc13
MT
73506 void kmem_cache_destroy(struct kmem_cache *s)
73507 {
73508 down_write(&slub_lock);
73509- s->refcount--;
73510- if (!s->refcount) {
73511+ if (atomic_dec_and_test(&s->refcount)) {
73512 list_del(&s->list);
58c5fc13 73513 if (kmem_cache_close(s)) {
6892158b 73514 printk(KERN_ERR "SLUB %s: %s called for cache that "
fe2de317 73515@@ -3228,6 +3229,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
58c5fc13
MT
73516 EXPORT_SYMBOL(__kmalloc_node);
73517 #endif
73518
73519+void check_object_size(const void *ptr, unsigned long n, bool to)
73520+{
73521+
73522+#ifdef CONFIG_PAX_USERCOPY
73523+ struct page *page;
71d190be 73524+ struct kmem_cache *s = NULL;
58c5fc13 73525+ unsigned long offset;
6e9df6a3 73526+ const char *type;
58c5fc13
MT
73527+
73528+ if (!n)
73529+ return;
73530+
6e9df6a3 73531+ type = "<null>";
58c5fc13
MT
73532+ if (ZERO_OR_NULL_PTR(ptr))
73533+ goto report;
73534+
73535+ if (!virt_addr_valid(ptr))
73536+ return;
73537+
16454cff 73538+ page = virt_to_head_page(ptr);
58c5fc13 73539+
6e9df6a3 73540+ type = "<process stack>";
16454cff 73541+ if (!PageSlab(page)) {
ae4e228f
MT
73542+ if (object_is_on_stack(ptr, n) == -1)
73543+ goto report;
58c5fc13 73544+ return;
ae4e228f 73545+ }
58c5fc13
MT
73546+
73547+ s = page->slab;
6e9df6a3 73548+ type = s->name;
71d190be
MT
73549+ if (!(s->flags & SLAB_USERCOPY))
73550+ goto report;
73551+
58c5fc13
MT
73552+ offset = (ptr - page_address(page)) % s->size;
73553+ if (offset <= s->objsize && n <= s->objsize - offset)
73554+ return;
73555+
73556+report:
6e9df6a3 73557+ pax_report_usercopy(ptr, n, to, type);
58c5fc13
MT
73558+#endif
73559+
73560+}
73561+EXPORT_SYMBOL(check_object_size);
73562+
73563 size_t ksize(const void *object)
73564 {
73565 struct page *page;
fe2de317 73566@@ -3502,7 +3547,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
bc901d79
MT
73567 int node;
73568
73569 list_add(&s->list, &slab_caches);
73570- s->refcount = -1;
73571+ atomic_set(&s->refcount, -1);
73572
73573 for_each_node_state(node, N_NORMAL_MEMORY) {
73574 struct kmem_cache_node *n = get_node(s, node);
6e9df6a3 73575@@ -3619,17 +3664,17 @@ void __init kmem_cache_init(void)
71d190be
MT
73576
73577 /* Caches that are not of the two-to-the-power-of size */
73578 if (KMALLOC_MIN_SIZE <= 32) {
73579- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
73580+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
73581 caches++;
73582 }
73583
73584 if (KMALLOC_MIN_SIZE <= 64) {
73585- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
73586+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
73587 caches++;
73588 }
73589
73590 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
73591- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
73592+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
73593 caches++;
73594 }
73595
fe2de317 73596@@ -3697,7 +3742,7 @@ static int slab_unmergeable(struct kmem_cache *s)
58c5fc13
MT
73597 /*
73598 * We may have set a slab to be unmergeable during bootstrap.
73599 */
73600- if (s->refcount < 0)
73601+ if (atomic_read(&s->refcount) < 0)
73602 return 1;
73603
73604 return 0;
fe2de317 73605@@ -3756,7 +3801,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
df50ba0c
MT
73606 down_write(&slub_lock);
73607 s = find_mergeable(size, align, flags, name, ctor);
58c5fc13 73608 if (s) {
58c5fc13
MT
73609- s->refcount++;
73610+ atomic_inc(&s->refcount);
73611 /*
73612 * Adjust the object sizes so that we clear
73613 * the complete object on kzalloc.
fe2de317 73614@@ -3765,7 +3810,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
6892158b 73615 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
58c5fc13
MT
73616
73617 if (sysfs_slab_alias(s, name)) {
58c5fc13
MT
73618- s->refcount--;
73619+ atomic_dec(&s->refcount);
58c5fc13
MT
73620 goto err;
73621 }
6892158b 73622 up_write(&slub_lock);
fe2de317 73623@@ -3893,7 +3938,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
15a11c5b
MT
73624 }
73625 #endif
73626
73627-#ifdef CONFIG_SYSFS
73628+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73629 static int count_inuse(struct page *page)
73630 {
73631 return page->inuse;
6e9df6a3 73632@@ -4280,12 +4325,12 @@ static void resiliency_test(void)
15a11c5b
MT
73633 validate_slab_cache(kmalloc_caches[9]);
73634 }
73635 #else
73636-#ifdef CONFIG_SYSFS
73637+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73638 static void resiliency_test(void) {};
73639 #endif
73640 #endif
73641
73642-#ifdef CONFIG_SYSFS
73643+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73644 enum slab_stat_type {
73645 SL_ALL, /* All slabs */
73646 SL_PARTIAL, /* Only partially allocated slabs */
6e9df6a3 73647@@ -4495,7 +4540,7 @@ SLAB_ATTR_RO(ctor);
58c5fc13
MT
73648
73649 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
73650 {
73651- return sprintf(buf, "%d\n", s->refcount - 1);
73652+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
73653 }
73654 SLAB_ATTR_RO(aliases);
73655
fe2de317 73656@@ -5025,6 +5070,7 @@ static char *create_unique_id(struct kmem_cache *s)
15a11c5b
MT
73657 return name;
73658 }
73659
73660+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73661 static int sysfs_slab_add(struct kmem_cache *s)
73662 {
73663 int err;
fe2de317 73664@@ -5087,6 +5133,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
15a11c5b
MT
73665 kobject_del(&s->kobj);
73666 kobject_put(&s->kobj);
73667 }
73668+#endif
73669
73670 /*
73671 * Need to buffer aliases during bootup until sysfs becomes
6e9df6a3 73672@@ -5100,6 +5147,7 @@ struct saved_alias {
15a11c5b
MT
73673
73674 static struct saved_alias *alias_list;
73675
73676+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73677 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
73678 {
73679 struct saved_alias *al;
fe2de317 73680@@ -5122,6 +5170,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
15a11c5b
MT
73681 alias_list = al;
73682 return 0;
73683 }
73684+#endif
73685
73686 static int __init slab_sysfs_init(void)
73687 {
fe2de317 73688@@ -5257,7 +5306,13 @@ static const struct file_operations proc_slabinfo_operations = {
58c5fc13 73689
df50ba0c
MT
73690 static int __init slab_proc_init(void)
73691 {
73692- proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
73693+ mode_t gr_mode = S_IRUGO;
73694+
73695+#ifdef CONFIG_GRKERNSEC_PROC_ADD
73696+ gr_mode = S_IRUSR;
73697+#endif
73698+
73699+ proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
58c5fc13
MT
73700 return 0;
73701 }
df50ba0c 73702 module_init(slab_proc_init);
fe2de317
MT
73703diff --git a/mm/swap.c b/mm/swap.c
73704index 87627f1..8a9eb34 100644
73705--- a/mm/swap.c
73706+++ b/mm/swap.c
15a11c5b
MT
73707@@ -31,6 +31,7 @@
73708 #include <linux/backing-dev.h>
73709 #include <linux/memcontrol.h>
73710 #include <linux/gfp.h>
73711+#include <linux/hugetlb.h>
73712
73713 #include "internal.h"
73714
fe2de317 73715@@ -71,6 +72,8 @@ static void __put_compound_page(struct page *page)
15a11c5b
MT
73716
73717 __page_cache_release(page);
73718 dtor = get_compound_page_dtor(page);
73719+ if (!PageHuge(page))
73720+ BUG_ON(dtor != free_compound_page);
73721 (*dtor)(page);
73722 }
73723
fe2de317
MT
73724diff --git a/mm/swapfile.c b/mm/swapfile.c
73725index 17bc224..1677059 100644
73726--- a/mm/swapfile.c
73727+++ b/mm/swapfile.c
15a11c5b 73728@@ -62,7 +62,7 @@ static DEFINE_MUTEX(swapon_mutex);
8308f9c9
MT
73729
73730 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
73731 /* Activity counter to indicate that a swapon or swapoff has occurred */
73732-static atomic_t proc_poll_event = ATOMIC_INIT(0);
73733+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
73734
73735 static inline unsigned char swap_count(unsigned char ent)
73736 {
fe2de317 73737@@ -1671,7 +1671,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
8308f9c9
MT
73738 }
73739 filp_close(swap_file, NULL);
73740 err = 0;
73741- atomic_inc(&proc_poll_event);
73742+ atomic_inc_unchecked(&proc_poll_event);
73743 wake_up_interruptible(&proc_poll_wait);
73744
73745 out_dput:
fe2de317 73746@@ -1687,8 +1687,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
8308f9c9
MT
73747
73748 poll_wait(file, &proc_poll_wait, wait);
73749
6e9df6a3
MT
73750- if (seq->poll_event != atomic_read(&proc_poll_event)) {
73751- seq->poll_event = atomic_read(&proc_poll_event);
73752+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
73753+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
8308f9c9
MT
73754 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
73755 }
73756
fe2de317 73757@@ -1786,7 +1786,7 @@ static int swaps_open(struct inode *inode, struct file *file)
6e9df6a3 73758 return ret;
8308f9c9 73759
6e9df6a3
MT
73760 seq = file->private_data;
73761- seq->poll_event = atomic_read(&proc_poll_event);
73762+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
73763 return 0;
8308f9c9
MT
73764 }
73765
fe2de317 73766@@ -2124,7 +2124,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
66a7e928
MT
73767 (p->flags & SWP_DISCARDABLE) ? "D" : "");
73768
8308f9c9
MT
73769 mutex_unlock(&swapon_mutex);
73770- atomic_inc(&proc_poll_event);
73771+ atomic_inc_unchecked(&proc_poll_event);
73772 wake_up_interruptible(&proc_poll_wait);
73773
66a7e928 73774 if (S_ISREG(inode->i_mode))
fe2de317
MT
73775diff --git a/mm/util.c b/mm/util.c
73776index 88ea1bd..0f1dfdb 100644
73777--- a/mm/util.c
73778+++ b/mm/util.c
15a11c5b 73779@@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
66a7e928
MT
73780 * allocated buffer. Use this if you don't want to free the buffer immediately
73781 * like, for example, with RCU.
73782 */
73783+#undef __krealloc
73784 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
73785 {
73786 void *ret;
15a11c5b 73787@@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
66a7e928
MT
73788 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
73789 * %NULL pointer, the object pointed to is freed.
73790 */
73791+#undef krealloc
73792 void *krealloc(const void *p, size_t new_size, gfp_t flags)
73793 {
73794 void *ret;
fe2de317 73795@@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
58c5fc13
MT
73796 void arch_pick_mmap_layout(struct mm_struct *mm)
73797 {
73798 mm->mmap_base = TASK_UNMAPPED_BASE;
73799+
73800+#ifdef CONFIG_PAX_RANDMMAP
73801+ if (mm->pax_flags & MF_PAX_RANDMMAP)
73802+ mm->mmap_base += mm->delta_mmap;
73803+#endif
73804+
73805 mm->get_unmapped_area = arch_get_unmapped_area;
73806 mm->unmap_area = arch_unmap_area;
73807 }
fe2de317
MT
73808diff --git a/mm/vmalloc.c b/mm/vmalloc.c
73809index 56faf31..862c072 100644
73810--- a/mm/vmalloc.c
73811+++ b/mm/vmalloc.c
73812@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
ae4e228f
MT
73813
73814 pte = pte_offset_kernel(pmd, addr);
73815 do {
73816- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
73817- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
73818+
73819+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
73820+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
73821+ BUG_ON(!pte_exec(*pte));
73822+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
73823+ continue;
73824+ }
73825+#endif
73826+
73827+ {
73828+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
73829+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
73830+ }
73831 } while (pte++, addr += PAGE_SIZE, addr != end);
73832 }
73833
fe2de317 73834@@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
58c5fc13
MT
73835 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
73836 {
73837 pte_t *pte;
73838+ int ret = -ENOMEM;
58c5fc13
MT
73839
73840 /*
73841 * nr is a running index into the array which helps higher level
fe2de317 73842@@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
58c5fc13
MT
73843 pte = pte_alloc_kernel(pmd, addr);
73844 if (!pte)
73845 return -ENOMEM;
73846+
ae4e228f 73847+ pax_open_kernel();
58c5fc13
MT
73848 do {
73849 struct page *page = pages[*nr];
73850
73851- if (WARN_ON(!pte_none(*pte)))
73852- return -EBUSY;
73853- if (WARN_ON(!page))
73854- return -ENOMEM;
ae4e228f 73855+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
57199397 73856+ if (pgprot_val(prot) & _PAGE_NX)
ae4e228f
MT
73857+#endif
73858+
58c5fc13
MT
73859+ if (WARN_ON(!pte_none(*pte))) {
73860+ ret = -EBUSY;
73861+ goto out;
73862+ }
73863+ if (WARN_ON(!page)) {
73864+ ret = -ENOMEM;
73865+ goto out;
73866+ }
73867 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
73868 (*nr)++;
73869 } while (pte++, addr += PAGE_SIZE, addr != end);
73870- return 0;
73871+ ret = 0;
73872+out:
ae4e228f
MT
73873+ pax_close_kernel();
73874+ return ret;
73875 }
73876
73877 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
fe2de317 73878@@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void *x)
ae4e228f
MT
73879 * and fall back on vmalloc() if that fails. Others
73880 * just put it in the vmalloc space.
73881 */
73882-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
73883+#ifdef CONFIG_MODULES
73884+#ifdef MODULES_VADDR
73885 unsigned long addr = (unsigned long)x;
73886 if (addr >= MODULES_VADDR && addr < MODULES_END)
73887 return 1;
73888 #endif
58c5fc13 73889+
ae4e228f
MT
73890+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
73891+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
73892+ return 1;
58c5fc13
MT
73893+#endif
73894+
ae4e228f
MT
73895+#endif
73896+
73897 return is_vmalloc_addr(x);
58c5fc13
MT
73898 }
73899
fe2de317 73900@@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
57199397
MT
73901
73902 if (!pgd_none(*pgd)) {
73903 pud_t *pud = pud_offset(pgd, addr);
73904+#ifdef CONFIG_X86
73905+ if (!pud_large(*pud))
73906+#endif
73907 if (!pud_none(*pud)) {
73908 pmd_t *pmd = pmd_offset(pud, addr);
73909+#ifdef CONFIG_X86
73910+ if (!pmd_large(*pmd))
73911+#endif
73912 if (!pmd_none(*pmd)) {
73913 pte_t *ptep, pte;
73914
fe2de317 73915@@ -1294,6 +1334,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
ae4e228f 73916 struct vm_struct *area;
58c5fc13
MT
73917
73918 BUG_ON(in_interrupt());
73919+
df50ba0c 73920+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
58c5fc13
MT
73921+ if (flags & VM_KERNEXEC) {
73922+ if (start != VMALLOC_START || end != VMALLOC_END)
73923+ return NULL;
df50ba0c
MT
73924+ start = (unsigned long)MODULES_EXEC_VADDR;
73925+ end = (unsigned long)MODULES_EXEC_END;
58c5fc13
MT
73926+ }
73927+#endif
73928+
73929 if (flags & VM_IOREMAP) {
73930 int bit = fls(size);
73931
fe2de317 73932@@ -1526,6 +1576,11 @@ void *vmap(struct page **pages, unsigned int count,
ae4e228f 73933 if (count > totalram_pages)
58c5fc13
MT
73934 return NULL;
73935
df50ba0c 73936+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
58c5fc13
MT
73937+ if (!(pgprot_val(prot) & _PAGE_NX))
73938+ flags |= VM_KERNEXEC;
73939+#endif
73940+
73941 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
73942 __builtin_return_address(0));
73943 if (!area)
fe2de317 73944@@ -1627,6 +1682,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
ae4e228f 73945 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
58c5fc13
MT
73946 return NULL;
73947
df50ba0c 73948+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
58c5fc13 73949+ if (!(pgprot_val(prot) & _PAGE_NX))
6e9df6a3
MT
73950+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
73951+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
58c5fc13
MT
73952+ else
73953+#endif
73954+
6e9df6a3
MT
73955 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
73956 start, end, node, gfp_mask, caller);
58c5fc13 73957
fe2de317
MT
73958@@ -1634,6 +1696,8 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
73959 return NULL;
73960
73961 addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller);
73962+ if (!addr)
73963+ return NULL;
73964
73965 /*
73966 * In this function, newly allocated vm_struct is not added
73967@@ -1672,6 +1736,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
16454cff 73968 gfp_mask, prot, node, caller);
58c5fc13
MT
73969 }
73970
73971+#undef __vmalloc
73972 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
73973 {
ae4e228f 73974 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
fe2de317 73975@@ -1695,6 +1760,7 @@ static inline void *__vmalloc_node_flags(unsigned long size,
58c5fc13
MT
73976 * For tight control over page level allocator and protection flags
73977 * use __vmalloc() instead.
73978 */
73979+#undef vmalloc
73980 void *vmalloc(unsigned long size)
73981 {
bc901d79 73982 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
fe2de317 73983@@ -1711,6 +1777,7 @@ EXPORT_SYMBOL(vmalloc);
bc901d79
MT
73984 * For tight control over page level allocator and protection flags
73985 * use __vmalloc() instead.
73986 */
73987+#undef vzalloc
73988 void *vzalloc(unsigned long size)
73989 {
73990 return __vmalloc_node_flags(size, -1,
fe2de317 73991@@ -1725,6 +1792,7 @@ EXPORT_SYMBOL(vzalloc);
58c5fc13
MT
73992 * The resulting memory area is zeroed so it can be mapped to userspace
73993 * without leaking data.
73994 */
73995+#undef vmalloc_user
73996 void *vmalloc_user(unsigned long size)
73997 {
73998 struct vm_struct *area;
fe2de317 73999@@ -1752,6 +1820,7 @@ EXPORT_SYMBOL(vmalloc_user);
58c5fc13
MT
74000 * For tight control over page level allocator and protection flags
74001 * use __vmalloc() instead.
74002 */
74003+#undef vmalloc_node
74004 void *vmalloc_node(unsigned long size, int node)
74005 {
ae4e228f 74006 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
fe2de317 74007@@ -1771,6 +1840,7 @@ EXPORT_SYMBOL(vmalloc_node);
bc901d79
MT
74008 * For tight control over page level allocator and protection flags
74009 * use __vmalloc_node() instead.
74010 */
74011+#undef vzalloc_node
74012 void *vzalloc_node(unsigned long size, int node)
74013 {
74014 return __vmalloc_node_flags(size, node,
fe2de317 74015@@ -1793,10 +1863,10 @@ EXPORT_SYMBOL(vzalloc_node);
58c5fc13
MT
74016 * For tight control over page level allocator and protection flags
74017 * use __vmalloc() instead.
74018 */
74019-
74020+#undef vmalloc_exec
74021 void *vmalloc_exec(unsigned long size)
74022 {
ae4e228f
MT
74023- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
74024+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
58c5fc13
MT
74025 -1, __builtin_return_address(0));
74026 }
74027
fe2de317 74028@@ -1815,6 +1885,7 @@ void *vmalloc_exec(unsigned long size)
58c5fc13
MT
74029 * Allocate enough 32bit PA addressable pages to cover @size from the
74030 * page level allocator and map them into contiguous kernel virtual space.
74031 */
74032+#undef vmalloc_32
74033 void *vmalloc_32(unsigned long size)
74034 {
ae4e228f 74035 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
fe2de317 74036@@ -1829,6 +1900,7 @@ EXPORT_SYMBOL(vmalloc_32);
58c5fc13
MT
74037 * The resulting memory area is 32bit addressable and zeroed so it can be
74038 * mapped to userspace without leaking data.
74039 */
74040+#undef vmalloc_32_user
74041 void *vmalloc_32_user(unsigned long size)
74042 {
74043 struct vm_struct *area;
fe2de317 74044@@ -2091,6 +2163,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
6892158b
MT
74045 unsigned long uaddr = vma->vm_start;
74046 unsigned long usize = vma->vm_end - vma->vm_start;
74047
74048+ BUG_ON(vma->vm_mirror);
74049+
74050 if ((PAGE_SIZE-1) & (unsigned long)addr)
74051 return -EINVAL;
74052
fe2de317
MT
74053diff --git a/mm/vmstat.c b/mm/vmstat.c
74054index d52b13d..381d1ac 100644
74055--- a/mm/vmstat.c
74056+++ b/mm/vmstat.c
bc901d79 74057@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
57199397
MT
74058 *
74059 * vm_stat contains the global counters
74060 */
74061-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
74062+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
74063 EXPORT_SYMBOL(vm_stat);
74064
74065 #ifdef CONFIG_SMP
66a7e928 74066@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
57199397
MT
74067 v = p->vm_stat_diff[i];
74068 p->vm_stat_diff[i] = 0;
74069 local_irq_restore(flags);
74070- atomic_long_add(v, &zone->vm_stat[i]);
74071+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
74072 global_diff[i] += v;
74073 #ifdef CONFIG_NUMA
74074 /* 3 seconds idle till flush */
66a7e928 74075@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
57199397
MT
74076
74077 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
74078 if (global_diff[i])
74079- atomic_long_add(global_diff[i], &vm_stat[i]);
74080+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
74081 }
74082
74083 #endif
15a11c5b 74084@@ -1207,10 +1207,20 @@ static int __init setup_vmstat(void)
57199397
MT
74085 start_cpu_timer(cpu);
74086 #endif
74087 #ifdef CONFIG_PROC_FS
74088- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
74089- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
74090- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
74091- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
74092+ {
74093+ mode_t gr_mode = S_IRUGO;
74094+#ifdef CONFIG_GRKERNSEC_PROC_ADD
74095+ gr_mode = S_IRUSR;
74096+#endif
74097+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
74098+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
bc901d79
MT
74099+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
74100+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
74101+#else
57199397 74102+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
bc901d79 74103+#endif
57199397
MT
74104+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
74105+ }
74106 #endif
74107 return 0;
74108 }
fe2de317
MT
74109diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
74110index 8970ba1..e3361fe 100644
74111--- a/net/8021q/vlan.c
74112+++ b/net/8021q/vlan.c
74113@@ -588,8 +588,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
df50ba0c
MT
74114 err = -EPERM;
74115 if (!capable(CAP_NET_ADMIN))
74116 break;
74117- if ((args.u.name_type >= 0) &&
74118- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
74119+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
74120 struct vlan_net *vn;
74121
74122 vn = net_generic(net, vlan_net_id);
fe2de317
MT
74123diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
74124index fdfdb57..38d368c 100644
74125--- a/net/9p/trans_fd.c
74126+++ b/net/9p/trans_fd.c
74127@@ -423,7 +423,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
6e9df6a3
MT
74128 oldfs = get_fs();
74129 set_fs(get_ds());
74130 /* The cast to a user pointer is valid due to the set_fs() */
74131- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
74132+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
74133 set_fs(oldfs);
74134
74135 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
fe2de317
MT
74136diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
74137index e317583..3c8aeaf 100644
74138--- a/net/9p/trans_virtio.c
74139+++ b/net/9p/trans_virtio.c
6e9df6a3
MT
74140@@ -327,7 +327,7 @@ req_retry_pinned:
74141 } else {
74142 char *pbuf;
74143 if (req->tc->pubuf)
74144- pbuf = (__force char *) req->tc->pubuf;
74145+ pbuf = (char __force_kernel *) req->tc->pubuf;
74146 else
74147 pbuf = req->tc->pkbuf;
74148 outp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, pbuf,
74149@@ -357,7 +357,7 @@ req_retry_pinned:
74150 } else {
74151 char *pbuf;
74152 if (req->tc->pubuf)
74153- pbuf = (__force char *) req->tc->pubuf;
74154+ pbuf = (char __force_kernel *) req->tc->pubuf;
74155 else
74156 pbuf = req->tc->pkbuf;
74157
fe2de317
MT
74158diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
74159index f41f026..fe76ea8 100644
74160--- a/net/atm/atm_misc.c
74161+++ b/net/atm/atm_misc.c
74162@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
58c5fc13
MT
74163 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
74164 return 1;
df50ba0c 74165 atm_return(vcc, truesize);
58c5fc13
MT
74166- atomic_inc(&vcc->stats->rx_drop);
74167+ atomic_inc_unchecked(&vcc->stats->rx_drop);
74168 return 0;
74169 }
df50ba0c 74170 EXPORT_SYMBOL(atm_charge);
fe2de317 74171@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
58c5fc13
MT
74172 }
74173 }
df50ba0c 74174 atm_return(vcc, guess);
58c5fc13
MT
74175- atomic_inc(&vcc->stats->rx_drop);
74176+ atomic_inc_unchecked(&vcc->stats->rx_drop);
74177 return NULL;
74178 }
df50ba0c
MT
74179 EXPORT_SYMBOL(atm_alloc_charge);
74180@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
58c5fc13 74181
df50ba0c 74182 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
58c5fc13
MT
74183 {
74184-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
74185+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
74186 __SONET_ITEMS
74187 #undef __HANDLE_ITEM
74188 }
df50ba0c 74189@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
58c5fc13 74190
df50ba0c 74191 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
58c5fc13 74192 {
df50ba0c 74193-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
58c5fc13
MT
74194+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
74195 __SONET_ITEMS
74196 #undef __HANDLE_ITEM
74197 }
fe2de317
MT
74198diff --git a/net/atm/lec.h b/net/atm/lec.h
74199index dfc0719..47c5322 100644
74200--- a/net/atm/lec.h
74201+++ b/net/atm/lec.h
15a11c5b
MT
74202@@ -48,7 +48,7 @@ struct lane2_ops {
74203 const u8 *tlvs, u32 sizeoftlvs);
74204 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
74205 const u8 *tlvs, u32 sizeoftlvs);
74206-};
74207+} __no_const;
74208
74209 /*
74210 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
fe2de317
MT
74211diff --git a/net/atm/mpc.h b/net/atm/mpc.h
74212index 0919a88..a23d54e 100644
74213--- a/net/atm/mpc.h
74214+++ b/net/atm/mpc.h
15a11c5b
MT
74215@@ -33,7 +33,7 @@ struct mpoa_client {
74216 struct mpc_parameters parameters; /* parameters for this client */
74217
74218 const struct net_device_ops *old_ops;
74219- struct net_device_ops new_ops;
74220+ net_device_ops_no_const new_ops;
74221 };
74222
74223
fe2de317
MT
74224diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
74225index d1b2d9a..7cc2219 100644
74226--- a/net/atm/mpoa_caches.c
74227+++ b/net/atm/mpoa_caches.c
74228@@ -255,6 +255,8 @@ static void check_resolving_entries(struct mpoa_client *client)
66a7e928
MT
74229 struct timeval now;
74230 struct k_message msg;
74231
74232+ pax_track_stack();
74233+
74234 do_gettimeofday(&now);
74235
74236 read_lock_bh(&client->ingress_lock);
fe2de317
MT
74237diff --git a/net/atm/proc.c b/net/atm/proc.c
74238index 0d020de..011c7bb 100644
74239--- a/net/atm/proc.c
74240+++ b/net/atm/proc.c
74241@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
58c5fc13
MT
74242 const struct k_atm_aal_stats *stats)
74243 {
74244 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
df50ba0c
MT
74245- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
74246- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
74247- atomic_read(&stats->rx_drop));
74248+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
74249+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
74250+ atomic_read_unchecked(&stats->rx_drop));
58c5fc13
MT
74251 }
74252
74253 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
fe2de317
MT
74254diff --git a/net/atm/resources.c b/net/atm/resources.c
74255index 23f45ce..c748f1a 100644
74256--- a/net/atm/resources.c
74257+++ b/net/atm/resources.c
bc901d79 74258@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
58c5fc13
MT
74259 static void copy_aal_stats(struct k_atm_aal_stats *from,
74260 struct atm_aal_stats *to)
74261 {
74262-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
74263+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
74264 __AAL_STAT_ITEMS
74265 #undef __HANDLE_ITEM
74266 }
fe2de317 74267@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
58c5fc13
MT
74268 static void subtract_aal_stats(struct k_atm_aal_stats *from,
74269 struct atm_aal_stats *to)
74270 {
74271-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
74272+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
74273 __AAL_STAT_ITEMS
74274 #undef __HANDLE_ITEM
74275 }
fe2de317
MT
74276diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
74277index db7aacf..991e539 100644
74278--- a/net/batman-adv/hard-interface.c
74279+++ b/net/batman-adv/hard-interface.c
74280@@ -347,8 +347,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
66a7e928
MT
74281 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
74282 dev_add_pack(&hard_iface->batman_adv_ptype);
74283
74284- atomic_set(&hard_iface->seqno, 1);
74285- atomic_set(&hard_iface->frag_seqno, 1);
74286+ atomic_set_unchecked(&hard_iface->seqno, 1);
74287+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
74288 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
74289 hard_iface->net_dev->name);
74290
fe2de317
MT
74291diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
74292index 0f32c81..82d1895 100644
74293--- a/net/batman-adv/routing.c
74294+++ b/net/batman-adv/routing.c
74295@@ -656,7 +656,7 @@ void receive_bat_packet(const struct ethhdr *ethhdr,
8308f9c9
MT
74296 return;
74297
74298 /* could be changed by schedule_own_packet() */
74299- if_incoming_seqno = atomic_read(&if_incoming->seqno);
74300+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
74301
74302 has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
74303
fe2de317
MT
74304diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
74305index 58d1447..2a66c8c 100644
74306--- a/net/batman-adv/send.c
74307+++ b/net/batman-adv/send.c
74308@@ -326,7 +326,7 @@ void schedule_own_packet(struct hard_iface *hard_iface)
8308f9c9
MT
74309
74310 /* change sequence number to network order */
74311 batman_packet->seqno =
66a7e928
MT
74312- htonl((uint32_t)atomic_read(&hard_iface->seqno));
74313+ htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
8308f9c9 74314
6e9df6a3
MT
74315 batman_packet->ttvn = atomic_read(&bat_priv->ttvn);
74316 batman_packet->tt_crc = htons((uint16_t)atomic_read(&bat_priv->tt_crc));
fe2de317 74317@@ -343,7 +343,7 @@ void schedule_own_packet(struct hard_iface *hard_iface)
8308f9c9 74318 else
6e9df6a3 74319 batman_packet->gw_flags = NO_FLAGS;
8308f9c9 74320
66a7e928
MT
74321- atomic_inc(&hard_iface->seqno);
74322+ atomic_inc_unchecked(&hard_iface->seqno);
8308f9c9 74323
66a7e928 74324 slide_own_bcast_window(hard_iface);
8308f9c9 74325 send_time = own_send_time(bat_priv);
fe2de317
MT
74326diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
74327index 05dd351..2ecd19b 100644
74328--- a/net/batman-adv/soft-interface.c
74329+++ b/net/batman-adv/soft-interface.c
74330@@ -632,7 +632,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
8308f9c9
MT
74331
74332 /* set broadcast sequence number */
74333 bcast_packet->seqno =
74334- htonl(atomic_inc_return(&bat_priv->bcast_seqno));
74335+ htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
74336
6e9df6a3 74337 add_bcast_packet_to_list(bat_priv, skb, 1);
8308f9c9 74338
fe2de317 74339@@ -824,7 +824,7 @@ struct net_device *softif_create(const char *name)
8308f9c9
MT
74340 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
74341
74342 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
74343- atomic_set(&bat_priv->bcast_seqno, 1);
74344+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
6e9df6a3
MT
74345 atomic_set(&bat_priv->ttvn, 0);
74346 atomic_set(&bat_priv->tt_local_changes, 0);
74347 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
fe2de317
MT
74348diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
74349index 51a0db7..b8a62be 100644
74350--- a/net/batman-adv/types.h
74351+++ b/net/batman-adv/types.h
66a7e928 74352@@ -38,8 +38,8 @@ struct hard_iface {
8308f9c9
MT
74353 int16_t if_num;
74354 char if_status;
74355 struct net_device *net_dev;
74356- atomic_t seqno;
74357- atomic_t frag_seqno;
74358+ atomic_unchecked_t seqno;
74359+ atomic_unchecked_t frag_seqno;
74360 unsigned char *packet_buff;
74361 int packet_len;
74362 struct kobject *hardif_obj;
6e9df6a3 74363@@ -153,7 +153,7 @@ struct bat_priv {
8308f9c9
MT
74364 atomic_t orig_interval; /* uint */
74365 atomic_t hop_penalty; /* uint */
74366 atomic_t log_level; /* uint */
74367- atomic_t bcast_seqno;
74368+ atomic_unchecked_t bcast_seqno;
74369 atomic_t bcast_queue_left;
74370 atomic_t batman_queue_left;
6e9df6a3 74371 atomic_t ttvn; /* tranlation table version number */
fe2de317
MT
74372diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
74373index 32b125f..f1447e0 100644
74374--- a/net/batman-adv/unicast.c
74375+++ b/net/batman-adv/unicast.c
74376@@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
66a7e928
MT
74377 frag1->flags = UNI_FRAG_HEAD | large_tail;
74378 frag2->flags = large_tail;
74379
74380- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
74381+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
74382 frag1->seqno = htons(seqno - 1);
74383 frag2->seqno = htons(seqno);
74384
fe2de317
MT
74385diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
74386index ea7f031..0615edc 100644
74387--- a/net/bluetooth/hci_conn.c
74388+++ b/net/bluetooth/hci_conn.c
74389@@ -218,7 +218,7 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
6e9df6a3
MT
74390 cp.handle = cpu_to_le16(conn->handle);
74391 memcpy(cp.ltk, ltk, sizeof(cp.ltk));
74392 cp.ediv = ediv;
74393- memcpy(cp.rand, rand, sizeof(rand));
74394+ memcpy(cp.rand, rand, sizeof(cp.rand));
74395
74396 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
74397 }
fe2de317 74398@@ -234,7 +234,7 @@ void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
6e9df6a3
MT
74399 memset(&cp, 0, sizeof(cp));
74400
74401 cp.handle = cpu_to_le16(conn->handle);
74402- memcpy(cp.ltk, ltk, sizeof(ltk));
74403+ memcpy(cp.ltk, ltk, sizeof(cp.ltk));
74404
74405 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
74406 }
fe2de317
MT
74407diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
74408index e79ff75..215b57d 100644
74409--- a/net/bridge/br_multicast.c
74410+++ b/net/bridge/br_multicast.c
74411@@ -1485,7 +1485,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
6892158b
MT
74412 nexthdr = ip6h->nexthdr;
74413 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
74414
74415- if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
74416+ if (nexthdr != IPPROTO_ICMPV6)
74417 return 0;
74418
74419 /* Okay, we found ICMPv6 header */
fe2de317
MT
74420diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
74421index 5864cc4..94cab18 100644
74422--- a/net/bridge/netfilter/ebtables.c
74423+++ b/net/bridge/netfilter/ebtables.c
74424@@ -1513,7 +1513,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
ae4e228f
MT
74425 tmp.valid_hooks = t->table->valid_hooks;
74426 }
74427 mutex_unlock(&ebt_mutex);
74428- if (copy_to_user(user, &tmp, *len) != 0){
74429+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
74430 BUGPRINT("c2u Didn't work\n");
74431 ret = -EFAULT;
74432 break;
fe2de317 74433@@ -1781,6 +1781,8 @@ static int compat_copy_everything_to_user(struct ebt_table *t,
66a7e928
MT
74434 int ret;
74435 void __user *pos;
74436
74437+ pax_track_stack();
74438+
74439 memset(&tinfo, 0, sizeof(tinfo));
74440
74441 if (cmd == EBT_SO_GET_ENTRIES) {
fe2de317
MT
74442diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
74443index a986280..13444a1 100644
74444--- a/net/caif/caif_socket.c
74445+++ b/net/caif/caif_socket.c
15a11c5b 74446@@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
8308f9c9
MT
74447 #ifdef CONFIG_DEBUG_FS
74448 struct debug_fs_counter {
74449 atomic_t caif_nr_socks;
15a11c5b 74450- atomic_t caif_sock_create;
8308f9c9
MT
74451- atomic_t num_connect_req;
74452- atomic_t num_connect_resp;
74453- atomic_t num_connect_fail_resp;
74454- atomic_t num_disconnect;
74455- atomic_t num_remote_shutdown_ind;
74456- atomic_t num_tx_flow_off_ind;
74457- atomic_t num_tx_flow_on_ind;
74458- atomic_t num_rx_flow_off;
74459- atomic_t num_rx_flow_on;
15a11c5b 74460+ atomic_unchecked_t caif_sock_create;
8308f9c9
MT
74461+ atomic_unchecked_t num_connect_req;
74462+ atomic_unchecked_t num_connect_resp;
74463+ atomic_unchecked_t num_connect_fail_resp;
74464+ atomic_unchecked_t num_disconnect;
74465+ atomic_unchecked_t num_remote_shutdown_ind;
74466+ atomic_unchecked_t num_tx_flow_off_ind;
74467+ atomic_unchecked_t num_tx_flow_on_ind;
74468+ atomic_unchecked_t num_rx_flow_off;
74469+ atomic_unchecked_t num_rx_flow_on;
74470 };
74471 static struct debug_fs_counter cnt;
15a11c5b
MT
74472 #define dbfs_atomic_inc(v) atomic_inc_return(v)
74473+#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
74474 #define dbfs_atomic_dec(v) atomic_dec_return(v)
8308f9c9 74475 #else
15a11c5b 74476 #define dbfs_atomic_inc(v) 0
fe2de317 74477@@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
15a11c5b
MT
74478 atomic_read(&cf_sk->sk.sk_rmem_alloc),
74479 sk_rcvbuf_lowwater(cf_sk));
8308f9c9
MT
74480 set_rx_flow_off(cf_sk);
74481- dbfs_atomic_inc(&cnt.num_rx_flow_off);
74482+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
74483 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
74484 }
74485
fe2de317 74486@@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
8308f9c9 74487 set_rx_flow_off(cf_sk);
15a11c5b
MT
74488 if (net_ratelimit())
74489 pr_debug("sending flow OFF due to rmem_schedule\n");
8308f9c9
MT
74490- dbfs_atomic_inc(&cnt.num_rx_flow_off);
74491+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
74492 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
74493 }
74494 skb->dev = NULL;
fe2de317 74495@@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer *layr,
8308f9c9
MT
74496 switch (flow) {
74497 case CAIF_CTRLCMD_FLOW_ON_IND:
74498 /* OK from modem to start sending again */
74499- dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
74500+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
74501 set_tx_flow_on(cf_sk);
74502 cf_sk->sk.sk_state_change(&cf_sk->sk);
74503 break;
74504
74505 case CAIF_CTRLCMD_FLOW_OFF_IND:
74506 /* Modem asks us to shut up */
74507- dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
74508+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
74509 set_tx_flow_off(cf_sk);
74510 cf_sk->sk.sk_state_change(&cf_sk->sk);
74511 break;
fe2de317 74512@@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
8308f9c9 74513 /* We're now connected */
15a11c5b
MT
74514 caif_client_register_refcnt(&cf_sk->layer,
74515 cfsk_hold, cfsk_put);
8308f9c9
MT
74516- dbfs_atomic_inc(&cnt.num_connect_resp);
74517+ dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
74518 cf_sk->sk.sk_state = CAIF_CONNECTED;
74519 set_tx_flow_on(cf_sk);
74520 cf_sk->sk.sk_state_change(&cf_sk->sk);
fe2de317 74521@@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
8308f9c9
MT
74522
74523 case CAIF_CTRLCMD_INIT_FAIL_RSP:
74524 /* Connect request failed */
74525- dbfs_atomic_inc(&cnt.num_connect_fail_resp);
74526+ dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
74527 cf_sk->sk.sk_err = ECONNREFUSED;
74528 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
74529 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
fe2de317 74530@@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
8308f9c9
MT
74531
74532 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
74533 /* Modem has closed this connection, or device is down. */
74534- dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
74535+ dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
74536 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
74537 cf_sk->sk.sk_err = ECONNRESET;
74538 set_rx_flow_on(cf_sk);
fe2de317 74539@@ -297,7 +298,7 @@ static void caif_check_flow_release(struct sock *sk)
8308f9c9
MT
74540 return;
74541
74542 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
74543- dbfs_atomic_inc(&cnt.num_rx_flow_on);
74544+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
74545 set_rx_flow_on(cf_sk);
74546 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
74547 }
fe2de317 74548@@ -854,7 +855,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
8308f9c9
MT
74549 /*ifindex = id of the interface.*/
74550 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
74551
74552- dbfs_atomic_inc(&cnt.num_connect_req);
74553+ dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
74554 cf_sk->layer.receive = caif_sktrecv_cb;
15a11c5b
MT
74555
74556 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
fe2de317 74557@@ -943,7 +944,7 @@ static int caif_release(struct socket *sock)
15a11c5b 74558 spin_unlock_bh(&sk->sk_receive_queue.lock);
8308f9c9
MT
74559 sock->sk = NULL;
74560
74561- dbfs_atomic_inc(&cnt.num_disconnect);
74562+ dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
74563
15a11c5b 74564 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
8308f9c9 74565 if (cf_sk->debugfs_socket_dir != NULL)
fe2de317 74566@@ -1122,7 +1123,7 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
15a11c5b
MT
74567 cf_sk->conn_req.protocol = protocol;
74568 /* Increase the number of sockets created. */
74569 dbfs_atomic_inc(&cnt.caif_nr_socks);
74570- num = dbfs_atomic_inc(&cnt.caif_sock_create);
74571+ num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
74572 #ifdef CONFIG_DEBUG_FS
74573 if (!IS_ERR(debugfsdir)) {
74574
fe2de317
MT
74575diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
74576index e22671b..6598ea0 100644
74577--- a/net/caif/cfctrl.c
74578+++ b/net/caif/cfctrl.c
66a7e928
MT
74579@@ -9,6 +9,7 @@
74580 #include <linux/stddef.h>
74581 #include <linux/spinlock.h>
74582 #include <linux/slab.h>
74583+#include <linux/sched.h>
74584 #include <net/caif/caif_layer.h>
74585 #include <net/caif/cfpkt.h>
74586 #include <net/caif/cfctrl.h>
15a11c5b 74587@@ -45,8 +46,8 @@ struct cflayer *cfctrl_create(void)
8308f9c9
MT
74588 dev_info.id = 0xff;
74589 memset(this, 0, sizeof(*this));
74590 cfsrvl_init(&this->serv, 0, &dev_info, false);
74591- atomic_set(&this->req_seq_no, 1);
74592- atomic_set(&this->rsp_seq_no, 1);
74593+ atomic_set_unchecked(&this->req_seq_no, 1);
74594+ atomic_set_unchecked(&this->rsp_seq_no, 1);
74595 this->serv.layer.receive = cfctrl_recv;
74596 sprintf(this->serv.layer.name, "ctrl");
74597 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
fe2de317 74598@@ -132,8 +133,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
8308f9c9
MT
74599 struct cfctrl_request_info *req)
74600 {
15a11c5b 74601 spin_lock_bh(&ctrl->info_list_lock);
8308f9c9
MT
74602- atomic_inc(&ctrl->req_seq_no);
74603- req->sequence_no = atomic_read(&ctrl->req_seq_no);
74604+ atomic_inc_unchecked(&ctrl->req_seq_no);
74605+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
74606 list_add_tail(&req->list, &ctrl->list);
15a11c5b 74607 spin_unlock_bh(&ctrl->info_list_lock);
8308f9c9 74608 }
fe2de317 74609@@ -151,7 +152,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
8308f9c9
MT
74610 if (p != first)
74611 pr_warn("Requests are not received in order\n");
74612
74613- atomic_set(&ctrl->rsp_seq_no,
74614+ atomic_set_unchecked(&ctrl->rsp_seq_no,
74615 p->sequence_no);
74616 list_del(&p->list);
74617 goto out;
fe2de317 74618@@ -364,6 +365,7 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
66a7e928
MT
74619 struct cfctrl *cfctrl = container_obj(layer);
74620 struct cfctrl_request_info rsp, *req;
74621
74622+ pax_track_stack();
74623
74624 cfpkt_extr_head(pkt, &cmdrsp, 1);
74625 cmd = cmdrsp & CFCTRL_CMD_MASK;
fe2de317
MT
74626diff --git a/net/compat.c b/net/compat.c
74627index c578d93..257fab7 100644
74628--- a/net/compat.c
74629+++ b/net/compat.c
74630@@ -70,9 +70,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
6e9df6a3
MT
74631 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
74632 __get_user(kmsg->msg_flags, &umsg->msg_flags))
74633 return -EFAULT;
74634- kmsg->msg_name = compat_ptr(tmp1);
74635- kmsg->msg_iov = compat_ptr(tmp2);
74636- kmsg->msg_control = compat_ptr(tmp3);
74637+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
74638+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
74639+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
74640 return 0;
74641 }
74642
fe2de317 74643@@ -84,7 +84,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
6e9df6a3
MT
74644
74645 if (kern_msg->msg_namelen) {
74646 if (mode == VERIFY_READ) {
74647- int err = move_addr_to_kernel(kern_msg->msg_name,
74648+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
74649 kern_msg->msg_namelen,
74650 kern_address);
74651 if (err < 0)
fe2de317 74652@@ -95,7 +95,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
6e9df6a3
MT
74653 kern_msg->msg_name = NULL;
74654
74655 tot_len = iov_from_user_compat_to_kern(kern_iov,
74656- (struct compat_iovec __user *)kern_msg->msg_iov,
74657+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
74658 kern_msg->msg_iovlen);
74659 if (tot_len >= 0)
74660 kern_msg->msg_iov = kern_iov;
fe2de317 74661@@ -115,20 +115,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
6e9df6a3
MT
74662
74663 #define CMSG_COMPAT_FIRSTHDR(msg) \
74664 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
74665- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
74666+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
74667 (struct compat_cmsghdr __user *)NULL)
74668
74669 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
74670 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
74671 (ucmlen) <= (unsigned long) \
74672 ((mhdr)->msg_controllen - \
74673- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
74674+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
74675
74676 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
74677 struct compat_cmsghdr __user *cmsg, int cmsg_len)
74678 {
74679 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
74680- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
74681+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
74682 msg->msg_controllen)
74683 return NULL;
74684 return (struct compat_cmsghdr __user *)ptr;
fe2de317 74685@@ -220,7 +220,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
6e9df6a3
MT
74686 {
74687 struct compat_timeval ctv;
74688 struct compat_timespec cts[3];
74689- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
74690+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
74691 struct compat_cmsghdr cmhdr;
74692 int cmlen;
74693
fe2de317 74694@@ -272,7 +272,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
6e9df6a3
MT
74695
74696 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
74697 {
74698- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
74699+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
74700 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
74701 int fdnum = scm->fp->count;
74702 struct file **fp = scm->fp->fp;
fe2de317 74703@@ -369,7 +369,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
6e9df6a3
MT
74704 return -EFAULT;
74705 old_fs = get_fs();
74706 set_fs(KERNEL_DS);
74707- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
74708+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
74709 set_fs(old_fs);
74710
74711 return err;
fe2de317 74712@@ -430,7 +430,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
6e9df6a3
MT
74713 len = sizeof(ktime);
74714 old_fs = get_fs();
74715 set_fs(KERNEL_DS);
74716- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
74717+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
74718 set_fs(old_fs);
74719
74720 if (!err) {
fe2de317 74721@@ -565,7 +565,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
6e9df6a3
MT
74722 case MCAST_JOIN_GROUP:
74723 case MCAST_LEAVE_GROUP:
74724 {
74725- struct compat_group_req __user *gr32 = (void *)optval;
74726+ struct compat_group_req __user *gr32 = (void __user *)optval;
74727 struct group_req __user *kgr =
74728 compat_alloc_user_space(sizeof(struct group_req));
74729 u32 interface;
fe2de317 74730@@ -586,7 +586,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
6e9df6a3
MT
74731 case MCAST_BLOCK_SOURCE:
74732 case MCAST_UNBLOCK_SOURCE:
74733 {
74734- struct compat_group_source_req __user *gsr32 = (void *)optval;
74735+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
74736 struct group_source_req __user *kgsr = compat_alloc_user_space(
74737 sizeof(struct group_source_req));
74738 u32 interface;
fe2de317 74739@@ -607,7 +607,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
6e9df6a3
MT
74740 }
74741 case MCAST_MSFILTER:
74742 {
74743- struct compat_group_filter __user *gf32 = (void *)optval;
74744+ struct compat_group_filter __user *gf32 = (void __user *)optval;
74745 struct group_filter __user *kgf;
74746 u32 interface, fmode, numsrc;
74747
fe2de317 74748@@ -645,7 +645,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
6e9df6a3
MT
74749 char __user *optval, int __user *optlen,
74750 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
74751 {
74752- struct compat_group_filter __user *gf32 = (void *)optval;
74753+ struct compat_group_filter __user *gf32 = (void __user *)optval;
74754 struct group_filter __user *kgf;
74755 int __user *koptlen;
74756 u32 interface, fmode, numsrc;
fe2de317
MT
74757diff --git a/net/core/datagram.c b/net/core/datagram.c
74758index 18ac112..fe95ed9 100644
74759--- a/net/core/datagram.c
74760+++ b/net/core/datagram.c
74761@@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
8308f9c9
MT
74762 }
74763
74764 kfree_skb(skb);
74765- atomic_inc(&sk->sk_drops);
74766+ atomic_inc_unchecked(&sk->sk_drops);
74767 sk_mem_reclaim_partial(sk);
74768
74769 return err;
fe2de317
MT
74770diff --git a/net/core/dev.c b/net/core/dev.c
74771index ae5cf2d..2c950a1 100644
74772--- a/net/core/dev.c
74773+++ b/net/core/dev.c
74774@@ -1135,10 +1135,14 @@ void dev_load(struct net *net, const char *name)
16454cff
MT
74775 if (no_module && capable(CAP_NET_ADMIN))
74776 no_module = request_module("netdev-%s", name);
74777 if (no_module && capable(CAP_SYS_MODULE)) {
71d190be
MT
74778+#ifdef CONFIG_GRKERNSEC_MODHARDEN
74779+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
74780+#else
74781 if (!request_module("%s", name))
16454cff
MT
74782 pr_err("Loading kernel module for a network device "
74783 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
74784 "instead\n", name);
71d190be
MT
74785+#endif
74786 }
74787 }
74788 EXPORT_SYMBOL(dev_load);
fe2de317 74789@@ -1977,7 +1981,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
66a7e928 74790
15a11c5b
MT
74791 struct dev_gso_cb {
74792 void (*destructor)(struct sk_buff *skb);
74793-};
74794+} __no_const;
66a7e928 74795
15a11c5b
MT
74796 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
74797
6e9df6a3 74798@@ -2930,7 +2934,7 @@ int netif_rx_ni(struct sk_buff *skb)
ae4e228f
MT
74799 }
74800 EXPORT_SYMBOL(netif_rx_ni);
74801
74802-static void net_tx_action(struct softirq_action *h)
74803+static void net_tx_action(void)
74804 {
74805 struct softnet_data *sd = &__get_cpu_var(softnet_data);
74806
fe2de317 74807@@ -3779,7 +3783,7 @@ void netif_napi_del(struct napi_struct *napi)
57199397 74808 }
ae4e228f
MT
74809 EXPORT_SYMBOL(netif_napi_del);
74810
ae4e228f
MT
74811-static void net_rx_action(struct softirq_action *h)
74812+static void net_rx_action(void)
74813 {
57199397 74814 struct softnet_data *sd = &__get_cpu_var(softnet_data);
ae4e228f 74815 unsigned long time_limit = jiffies + 2;
fe2de317
MT
74816diff --git a/net/core/flow.c b/net/core/flow.c
74817index 555a456..de48421 100644
74818--- a/net/core/flow.c
74819+++ b/net/core/flow.c
6e9df6a3 74820@@ -61,7 +61,7 @@ struct flow_cache {
8308f9c9
MT
74821 struct timer_list rnd_timer;
74822 };
74823
74824-atomic_t flow_cache_genid = ATOMIC_INIT(0);
74825+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
74826 EXPORT_SYMBOL(flow_cache_genid);
74827 static struct flow_cache flow_cache_global;
74828 static struct kmem_cache *flow_cachep __read_mostly;
fe2de317 74829@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
8308f9c9
MT
74830
74831 static int flow_entry_valid(struct flow_cache_entry *fle)
74832 {
74833- if (atomic_read(&flow_cache_genid) != fle->genid)
74834+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
74835 return 0;
74836 if (fle->object && !fle->object->ops->check(fle->object))
74837 return 0;
fe2de317 74838@@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
8308f9c9
MT
74839 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
74840 fcp->hash_count++;
74841 }
74842- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
74843+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
74844 flo = fle->object;
74845 if (!flo)
74846 goto ret_object;
6e9df6a3 74847@@ -280,7 +280,7 @@ nocache:
8308f9c9
MT
74848 }
74849 flo = resolver(net, key, family, dir, flo, ctx);
74850 if (fle) {
74851- fle->genid = atomic_read(&flow_cache_genid);
74852+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
74853 if (!IS_ERR(flo))
74854 fle->object = flo;
74855 else
fe2de317
MT
74856diff --git a/net/core/iovec.c b/net/core/iovec.c
74857index c40f27e..7f49254 100644
74858--- a/net/core/iovec.c
74859+++ b/net/core/iovec.c
74860@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
6e9df6a3
MT
74861 if (m->msg_namelen) {
74862 if (mode == VERIFY_READ) {
74863 void __user *namep;
74864- namep = (void __user __force *) m->msg_name;
74865+ namep = (void __force_user *) m->msg_name;
74866 err = move_addr_to_kernel(namep, m->msg_namelen,
74867 address);
74868 if (err < 0)
fe2de317 74869@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
6e9df6a3
MT
74870 }
74871
74872 size = m->msg_iovlen * sizeof(struct iovec);
74873- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
74874+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
74875 return -EFAULT;
74876
74877 m->msg_iov = iov;
fe2de317
MT
74878diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
74879index 99d9e95..209bae2 100644
74880--- a/net/core/rtnetlink.c
74881+++ b/net/core/rtnetlink.c
6e9df6a3 74882@@ -57,7 +57,7 @@ struct rtnl_link {
15a11c5b
MT
74883 rtnl_doit_func doit;
74884 rtnl_dumpit_func dumpit;
6e9df6a3 74885 rtnl_calcit_func calcit;
15a11c5b
MT
74886-};
74887+} __no_const;
74888
74889 static DEFINE_MUTEX(rtnl_mutex);
6e9df6a3 74890 static u16 min_ifinfo_dump_size;
fe2de317
MT
74891diff --git a/net/core/scm.c b/net/core/scm.c
74892index 811b53f..5d6c343 100644
74893--- a/net/core/scm.c
74894+++ b/net/core/scm.c
6e9df6a3
MT
74895@@ -218,7 +218,7 @@ EXPORT_SYMBOL(__scm_send);
74896 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
74897 {
74898 struct cmsghdr __user *cm
74899- = (__force struct cmsghdr __user *)msg->msg_control;
74900+ = (struct cmsghdr __force_user *)msg->msg_control;
74901 struct cmsghdr cmhdr;
74902 int cmlen = CMSG_LEN(len);
74903 int err;
fe2de317 74904@@ -241,7 +241,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
6e9df6a3
MT
74905 err = -EFAULT;
74906 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
74907 goto out;
74908- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
74909+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
74910 goto out;
74911 cmlen = CMSG_SPACE(len);
74912 if (msg->msg_controllen < cmlen)
74913@@ -257,7 +257,7 @@ EXPORT_SYMBOL(put_cmsg);
74914 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
74915 {
74916 struct cmsghdr __user *cm
74917- = (__force struct cmsghdr __user*)msg->msg_control;
74918+ = (struct cmsghdr __force_user *)msg->msg_control;
74919
74920 int fdmax = 0;
74921 int fdnum = scm->fp->count;
fe2de317 74922@@ -277,7 +277,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
6e9df6a3
MT
74923 if (fdnum < fdmax)
74924 fdmax = fdnum;
74925
74926- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
74927+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
74928 i++, cmfptr++)
74929 {
74930 int new_fd;
fe2de317
MT
74931diff --git a/net/core/skbuff.c b/net/core/skbuff.c
74932index 387703f..035abcf 100644
74933--- a/net/core/skbuff.c
74934+++ b/net/core/skbuff.c
74935@@ -1650,6 +1650,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
66a7e928
MT
74936 struct sock *sk = skb->sk;
74937 int ret = 0;
74938
74939+ pax_track_stack();
74940+
74941 if (splice_grow_spd(pipe, &spd))
74942 return -ENOMEM;
74943
fe2de317
MT
74944diff --git a/net/core/sock.c b/net/core/sock.c
74945index 11d67b3..df26d4b 100644
74946--- a/net/core/sock.c
74947+++ b/net/core/sock.c
74948@@ -293,7 +293,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
8308f9c9
MT
74949 */
74950 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
74951 (unsigned)sk->sk_rcvbuf) {
74952- atomic_inc(&sk->sk_drops);
74953+ atomic_inc_unchecked(&sk->sk_drops);
6e9df6a3 74954 trace_sock_rcvqueue_full(sk, skb);
8308f9c9
MT
74955 return -ENOMEM;
74956 }
fe2de317 74957@@ -303,7 +303,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
8308f9c9
MT
74958 return err;
74959
74960 if (!sk_rmem_schedule(sk, skb->truesize)) {
74961- atomic_inc(&sk->sk_drops);
74962+ atomic_inc_unchecked(&sk->sk_drops);
74963 return -ENOBUFS;
74964 }
74965
fe2de317 74966@@ -323,7 +323,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
8308f9c9
MT
74967 skb_dst_force(skb);
74968
74969 spin_lock_irqsave(&list->lock, flags);
74970- skb->dropcount = atomic_read(&sk->sk_drops);
74971+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
74972 __skb_queue_tail(list, skb);
74973 spin_unlock_irqrestore(&list->lock, flags);
74974
fe2de317 74975@@ -343,7 +343,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
8308f9c9
MT
74976 skb->dev = NULL;
74977
74978 if (sk_rcvqueues_full(sk, skb)) {
74979- atomic_inc(&sk->sk_drops);
74980+ atomic_inc_unchecked(&sk->sk_drops);
74981 goto discard_and_relse;
74982 }
74983 if (nested)
fe2de317 74984@@ -361,7 +361,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
8308f9c9
MT
74985 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
74986 } else if (sk_add_backlog(sk, skb)) {
74987 bh_unlock_sock(sk);
74988- atomic_inc(&sk->sk_drops);
74989+ atomic_inc_unchecked(&sk->sk_drops);
74990 goto discard_and_relse;
74991 }
74992
fe2de317 74993@@ -924,7 +924,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
15a11c5b
MT
74994 if (len > sizeof(peercred))
74995 len = sizeof(peercred);
74996 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
74997- if (copy_to_user(optval, &peercred, len))
74998+ if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
74999 return -EFAULT;
75000 goto lenout;
75001 }
fe2de317 75002@@ -937,7 +937,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
ae4e228f
MT
75003 return -ENOTCONN;
75004 if (lv < len)
75005 return -EINVAL;
75006- if (copy_to_user(optval, address, len))
75007+ if (len > sizeof(address) || copy_to_user(optval, address, len))
75008 return -EFAULT;
75009 goto lenout;
75010 }
fe2de317 75011@@ -970,7 +970,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
ae4e228f
MT
75012
75013 if (len > lv)
75014 len = lv;
75015- if (copy_to_user(optval, &v, len))
75016+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
75017 return -EFAULT;
75018 lenout:
75019 if (put_user(len, optlen))
fe2de317 75020@@ -2029,7 +2029,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
8308f9c9
MT
75021 */
75022 smp_wmb();
75023 atomic_set(&sk->sk_refcnt, 1);
75024- atomic_set(&sk->sk_drops, 0);
75025+ atomic_set_unchecked(&sk->sk_drops, 0);
75026 }
75027 EXPORT_SYMBOL(sock_init_data);
75028
fe2de317
MT
75029diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
75030index 02e75d1..9a57a7c 100644
75031--- a/net/decnet/sysctl_net_decnet.c
75032+++ b/net/decnet/sysctl_net_decnet.c
75033@@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
ae4e228f
MT
75034
75035 if (len > *lenp) len = *lenp;
75036
75037- if (copy_to_user(buffer, addr, len))
bc901d79 75038+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
ae4e228f
MT
75039 return -EFAULT;
75040
75041 *lenp = len;
fe2de317 75042@@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
ae4e228f
MT
75043
75044 if (len > *lenp) len = *lenp;
75045
75046- if (copy_to_user(buffer, devname, len))
bc901d79 75047+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
ae4e228f
MT
75048 return -EFAULT;
75049
75050 *lenp = len;
fe2de317
MT
75051diff --git a/net/econet/Kconfig b/net/econet/Kconfig
75052index 39a2d29..f39c0fe 100644
75053--- a/net/econet/Kconfig
75054+++ b/net/econet/Kconfig
bc901d79
MT
75055@@ -4,7 +4,7 @@
75056
75057 config ECONET
75058 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
75059- depends on EXPERIMENTAL && INET
75060+ depends on EXPERIMENTAL && INET && BROKEN
75061 ---help---
75062 Econet is a fairly old and slow networking protocol mainly used by
75063 Acorn computers to access file and print servers. It uses native
fe2de317
MT
75064diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
75065index 92fc5f6..b790d91 100644
75066--- a/net/ipv4/fib_frontend.c
75067+++ b/net/ipv4/fib_frontend.c
75068@@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
66a7e928
MT
75069 #ifdef CONFIG_IP_ROUTE_MULTIPATH
75070 fib_sync_up(dev);
75071 #endif
75072- atomic_inc(&net->ipv4.dev_addr_genid);
75073+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
75074 rt_cache_flush(dev_net(dev), -1);
75075 break;
75076 case NETDEV_DOWN:
75077 fib_del_ifaddr(ifa, NULL);
75078- atomic_inc(&net->ipv4.dev_addr_genid);
75079+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
75080 if (ifa->ifa_dev->ifa_list == NULL) {
75081 /* Last address was deleted from this interface.
75082 * Disable IP.
fe2de317 75083@@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
66a7e928
MT
75084 #ifdef CONFIG_IP_ROUTE_MULTIPATH
75085 fib_sync_up(dev);
75086 #endif
75087- atomic_inc(&net->ipv4.dev_addr_genid);
75088+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
75089 rt_cache_flush(dev_net(dev), -1);
75090 break;
75091 case NETDEV_DOWN:
fe2de317
MT
75092diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
75093index 80106d8..232e898 100644
75094--- a/net/ipv4/fib_semantics.c
75095+++ b/net/ipv4/fib_semantics.c
75096@@ -699,7 +699,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
66a7e928
MT
75097 nh->nh_saddr = inet_select_addr(nh->nh_dev,
75098 nh->nh_gw,
75099 nh->nh_parent->fib_scope);
75100- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
75101+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
75102
75103 return nh->nh_saddr;
75104 }
fe2de317
MT
75105diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
75106index 389a2e6..ac1c1de 100644
75107--- a/net/ipv4/inet_diag.c
75108+++ b/net/ipv4/inet_diag.c
75109@@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct sock *sk,
bc901d79
MT
75110 r->idiag_retrans = 0;
75111
75112 r->id.idiag_if = sk->sk_bound_dev_if;
75113+
75114+#ifdef CONFIG_GRKERNSEC_HIDESYM
75115+ r->id.idiag_cookie[0] = 0;
75116+ r->id.idiag_cookie[1] = 0;
75117+#else
75118 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
75119 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
75120+#endif
75121
75122 r->id.idiag_sport = inet->inet_sport;
75123 r->id.idiag_dport = inet->inet_dport;
fe2de317 75124@@ -201,8 +207,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
bc901d79
MT
75125 r->idiag_family = tw->tw_family;
75126 r->idiag_retrans = 0;
75127 r->id.idiag_if = tw->tw_bound_dev_if;
75128+
75129+#ifdef CONFIG_GRKERNSEC_HIDESYM
75130+ r->id.idiag_cookie[0] = 0;
75131+ r->id.idiag_cookie[1] = 0;
75132+#else
75133 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
75134 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
75135+#endif
75136+
75137 r->id.idiag_sport = tw->tw_sport;
75138 r->id.idiag_dport = tw->tw_dport;
75139 r->id.idiag_src[0] = tw->tw_rcv_saddr;
fe2de317 75140@@ -285,12 +298,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
bc901d79
MT
75141 if (sk == NULL)
75142 goto unlock;
75143
75144+#ifndef CONFIG_GRKERNSEC_HIDESYM
75145 err = -ESTALE;
75146 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
75147 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
75148 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
75149 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
75150 goto out;
75151+#endif
75152
75153 err = -ENOMEM;
75154 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
fe2de317 75155@@ -580,8 +595,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
bc901d79
MT
75156 r->idiag_retrans = req->retrans;
75157
75158 r->id.idiag_if = sk->sk_bound_dev_if;
75159+
75160+#ifdef CONFIG_GRKERNSEC_HIDESYM
75161+ r->id.idiag_cookie[0] = 0;
75162+ r->id.idiag_cookie[1] = 0;
75163+#else
75164 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
75165 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
75166+#endif
75167
75168 tmo = req->expires - jiffies;
75169 if (tmo < 0)
fe2de317
MT
75170diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
75171index 984ec65..97ac518 100644
75172--- a/net/ipv4/inet_hashtables.c
75173+++ b/net/ipv4/inet_hashtables.c
15a11c5b 75174@@ -18,12 +18,15 @@
58c5fc13
MT
75175 #include <linux/sched.h>
75176 #include <linux/slab.h>
75177 #include <linux/wait.h>
75178+#include <linux/security.h>
75179
75180 #include <net/inet_connection_sock.h>
75181 #include <net/inet_hashtables.h>
15a11c5b 75182 #include <net/secure_seq.h>
58c5fc13
MT
75183 #include <net/ip.h>
75184
75185+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
75186+
75187 /*
75188 * Allocate and initialize a new local port bind bucket.
75189 * The bindhash mutex for snum's hash chain must be held here.
15a11c5b 75190@@ -530,6 +533,8 @@ ok:
ae4e228f 75191 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
58c5fc13
MT
75192 spin_unlock(&head->lock);
75193
75194+ gr_update_task_in_ip_table(current, inet_sk(sk));
75195+
75196 if (tw) {
75197 inet_twsk_deschedule(tw, death_row);
ae4e228f 75198 while (twrefcnt) {
fe2de317
MT
75199diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
75200index 86f13c67..0bce60f 100644
75201--- a/net/ipv4/inetpeer.c
75202+++ b/net/ipv4/inetpeer.c
75203@@ -400,6 +400,8 @@ struct inet_peer *inet_getpeer(const struct inetpeer_addr *daddr, int create)
66a7e928 75204 unsigned int sequence;
6e9df6a3 75205 int invalidated, gccnt = 0;
66a7e928
MT
75206
75207+ pax_track_stack();
75208+
6e9df6a3 75209 /* Attempt a lockless lookup first.
66a7e928
MT
75210 * Because of a concurrent writer, we might not find an existing entry.
75211 */
6e9df6a3 75212@@ -436,8 +438,8 @@ relookup:
6892158b 75213 if (p) {
16454cff 75214 p->daddr = *daddr;
6892158b
MT
75215 atomic_set(&p->refcnt, 1);
75216- atomic_set(&p->rid, 0);
6e9df6a3 75217- atomic_set(&p->ip_id_count,
6892158b 75218+ atomic_set_unchecked(&p->rid, 0);
6e9df6a3
MT
75219+ atomic_set_unchecked(&p->ip_id_count,
75220 (daddr->family == AF_INET) ?
75221 secure_ip_id(daddr->addr.a4) :
75222 secure_ipv6_id(daddr->addr.a6));
fe2de317
MT
75223diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
75224index 0e0ab98..2ed7dd5 100644
75225--- a/net/ipv4/ip_fragment.c
75226+++ b/net/ipv4/ip_fragment.c
75227@@ -316,7 +316,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
6892158b
MT
75228 return 0;
75229
75230 start = qp->rid;
75231- end = atomic_inc_return(&peer->rid);
75232+ end = atomic_inc_return_unchecked(&peer->rid);
75233 qp->rid = end;
75234
75235 rc = qp->q.fragments && (end - start) > max;
fe2de317
MT
75236diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
75237index 8905e92..0b179fb 100644
75238--- a/net/ipv4/ip_sockglue.c
75239+++ b/net/ipv4/ip_sockglue.c
75240@@ -1073,6 +1073,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
66a7e928
MT
75241 int val;
75242 int len;
75243
75244+ pax_track_stack();
75245+
75246 if (level != SOL_IP)
75247 return -EOPNOTSUPP;
75248
fe2de317 75249@@ -1110,7 +1112,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
15a11c5b
MT
75250 len = min_t(unsigned int, len, opt->optlen);
75251 if (put_user(len, optlen))
75252 return -EFAULT;
75253- if (copy_to_user(optval, opt->__data, len))
75254+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
75255+ copy_to_user(optval, opt->__data, len))
75256 return -EFAULT;
75257 return 0;
75258 }
fe2de317 75259@@ -1238,7 +1241,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
6e9df6a3
MT
75260 if (sk->sk_type != SOCK_STREAM)
75261 return -ENOPROTOOPT;
75262
75263- msg.msg_control = optval;
75264+ msg.msg_control = (void __force_kernel *)optval;
75265 msg.msg_controllen = len;
75266 msg.msg_flags = flags;
75267
fe2de317
MT
75268diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
75269index 472a8c4..6507cd4 100644
75270--- a/net/ipv4/ipconfig.c
75271+++ b/net/ipv4/ipconfig.c
75272@@ -313,7 +313,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
75273
75274 mm_segment_t oldfs = get_fs();
75275 set_fs(get_ds());
75276- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
75277+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
75278 set_fs(oldfs);
75279 return res;
75280 }
75281@@ -324,7 +324,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
75282
75283 mm_segment_t oldfs = get_fs();
75284 set_fs(get_ds());
75285- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
75286+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
75287 set_fs(oldfs);
75288 return res;
75289 }
75290@@ -335,7 +335,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
75291
75292 mm_segment_t oldfs = get_fs();
75293 set_fs(get_ds());
75294- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
75295+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
75296 set_fs(oldfs);
75297 return res;
75298 }
75299diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
75300index 076b7c8..9c8d038 100644
75301--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
75302+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
75303@@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
58c5fc13
MT
75304
75305 *len = 0;
75306
75307- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
75308+ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
75309 if (*octets == NULL) {
75310 if (net_ratelimit())
57199397 75311 pr_notice("OOM in bsalg (%d)\n", __LINE__);
fe2de317
MT
75312diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
75313index 39b403f..8e6a0a8 100644
75314--- a/net/ipv4/ping.c
75315+++ b/net/ipv4/ping.c
75316@@ -837,7 +837,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
15a11c5b
MT
75317 sk_rmem_alloc_get(sp),
75318 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
75319 atomic_read(&sp->sk_refcnt), sp,
75320- atomic_read(&sp->sk_drops), len);
75321+ atomic_read_unchecked(&sp->sk_drops), len);
75322 }
75323
75324 static int ping_seq_show(struct seq_file *seq, void *v)
fe2de317
MT
75325diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
75326index 61714bd..c9cee6d 100644
75327--- a/net/ipv4/raw.c
75328+++ b/net/ipv4/raw.c
75329@@ -302,7 +302,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
8308f9c9
MT
75330 int raw_rcv(struct sock *sk, struct sk_buff *skb)
75331 {
75332 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
75333- atomic_inc(&sk->sk_drops);
75334+ atomic_inc_unchecked(&sk->sk_drops);
75335 kfree_skb(skb);
75336 return NET_RX_DROP;
75337 }
6e9df6a3 75338@@ -737,16 +737,20 @@ static int raw_init(struct sock *sk)
71d190be
MT
75339
75340 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
75341 {
75342+ struct icmp_filter filter;
75343+
75344 if (optlen > sizeof(struct icmp_filter))
75345 optlen = sizeof(struct icmp_filter);
75346- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
75347+ if (copy_from_user(&filter, optval, optlen))
75348 return -EFAULT;
15a11c5b 75349+ raw_sk(sk)->filter = filter;
71d190be
MT
75350 return 0;
75351 }
75352
75353 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
75354 {
71d190be 75355 int len, ret = -EFAULT;
15a11c5b 75356+ struct icmp_filter filter;
71d190be
MT
75357
75358 if (get_user(len, optlen))
15a11c5b 75359 goto out;
fe2de317 75360@@ -756,8 +760,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
71d190be
MT
75361 if (len > sizeof(struct icmp_filter))
75362 len = sizeof(struct icmp_filter);
75363 ret = -EFAULT;
15a11c5b 75364- if (put_user(len, optlen) ||
71d190be 75365- copy_to_user(optval, &raw_sk(sk)->filter, len))
15a11c5b 75366+ filter = raw_sk(sk)->filter;
6e9df6a3 75367+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
71d190be
MT
75368 goto out;
75369 ret = 0;
75370 out: return ret;
fe2de317 75371@@ -985,7 +989,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
8308f9c9
MT
75372 sk_wmem_alloc_get(sp),
75373 sk_rmem_alloc_get(sp),
75374 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
75375- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
66a7e928
MT
75376+ atomic_read(&sp->sk_refcnt),
75377+#ifdef CONFIG_GRKERNSEC_HIDESYM
75378+ NULL,
75379+#else
75380+ sp,
75381+#endif
75382+ atomic_read_unchecked(&sp->sk_drops));
8308f9c9
MT
75383 }
75384
75385 static int raw_seq_show(struct seq_file *seq, void *v)
fe2de317
MT
75386diff --git a/net/ipv4/route.c b/net/ipv4/route.c
75387index 05ac666c..82384a7 100644
75388--- a/net/ipv4/route.c
75389+++ b/net/ipv4/route.c
75390@@ -309,7 +309,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
8308f9c9
MT
75391
75392 static inline int rt_genid(struct net *net)
75393 {
75394- return atomic_read(&net->ipv4.rt_genid);
75395+ return atomic_read_unchecked(&net->ipv4.rt_genid);
75396 }
75397
75398 #ifdef CONFIG_PROC_FS
fe2de317 75399@@ -842,7 +842,7 @@ static void rt_cache_invalidate(struct net *net)
8308f9c9
MT
75400 unsigned char shuffle;
75401
75402 get_random_bytes(&shuffle, sizeof(shuffle));
75403- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
75404+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
fe2de317 75405 redirect_genid++;
8308f9c9
MT
75406 }
75407
fe2de317 75408@@ -2920,7 +2920,7 @@ static int rt_fill_info(struct net *net,
15a11c5b
MT
75409 error = rt->dst.error;
75410 if (peer) {
6892158b 75411 inet_peer_refcheck(rt->peer);
15a11c5b
MT
75412- id = atomic_read(&peer->ip_id_count) & 0xffff;
75413+ id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
75414 if (peer->tcp_ts_stamp) {
75415 ts = peer->tcp_ts;
75416 tsage = get_seconds() - peer->tcp_ts_stamp;
fe2de317
MT
75417diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
75418index 46febca..98b73a4 100644
75419--- a/net/ipv4/tcp.c
75420+++ b/net/ipv4/tcp.c
75421@@ -2122,6 +2122,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
66a7e928
MT
75422 int val;
75423 int err = 0;
75424
75425+ pax_track_stack();
75426+
75427 /* These are data/string values, all the others are ints */
75428 switch (optname) {
75429 case TCP_CONGESTION: {
fe2de317 75430@@ -2501,6 +2503,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
66a7e928
MT
75431 struct tcp_sock *tp = tcp_sk(sk);
75432 int val, len;
75433
75434+ pax_track_stack();
75435+
75436 if (get_user(len, optlen))
75437 return -EFAULT;
75438
fe2de317
MT
75439diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
75440index 7963e03..c44f5d0 100644
75441--- a/net/ipv4/tcp_ipv4.c
75442+++ b/net/ipv4/tcp_ipv4.c
15a11c5b 75443@@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
ae4e228f 75444 int sysctl_tcp_low_latency __read_mostly;
6892158b 75445 EXPORT_SYMBOL(sysctl_tcp_low_latency);
58c5fc13 75446
58c5fc13 75447+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
ae4e228f 75448+extern int grsec_enable_blackhole;
58c5fc13 75449+#endif
ae4e228f
MT
75450
75451 #ifdef CONFIG_TCP_MD5SIG
75452 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
fe2de317 75453@@ -1622,6 +1625,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
57199397
MT
75454 return 0;
75455
75456 reset:
75457+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75458+ if (!grsec_enable_blackhole)
75459+#endif
75460 tcp_v4_send_reset(rsk, skb);
75461 discard:
75462 kfree_skb(skb);
6e9df6a3 75463@@ -1684,12 +1690,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
ae4e228f
MT
75464 TCP_SKB_CB(skb)->sacked = 0;
75465
75466 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
75467- if (!sk)
75468+ if (!sk) {
75469+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75470+ ret = 1;
75471+#endif
75472 goto no_tcp_socket;
df50ba0c 75473-
ae4e228f 75474+ }
ae4e228f
MT
75475 process:
75476- if (sk->sk_state == TCP_TIME_WAIT)
75477+ if (sk->sk_state == TCP_TIME_WAIT) {
75478+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75479+ ret = 2;
75480+#endif
75481 goto do_time_wait;
75482+ }
75483
df50ba0c
MT
75484 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
75485 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
6e9df6a3 75486@@ -1739,6 +1752,10 @@ no_tcp_socket:
58c5fc13
MT
75487 bad_packet:
75488 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
75489 } else {
75490+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
ae4e228f
MT
75491+ if (!grsec_enable_blackhole || (ret == 1 &&
75492+ (skb->dev->flags & IFF_LOOPBACK)))
58c5fc13
MT
75493+#endif
75494 tcp_v4_send_reset(NULL, skb);
75495 }
75496
fe2de317 75497@@ -2403,7 +2420,11 @@ static void get_openreq4(struct sock *sk, struct request_sock *req,
57199397
MT
75498 0, /* non standard timer */
75499 0, /* open_requests have no inode */
75500 atomic_read(&sk->sk_refcnt),
75501+#ifdef CONFIG_GRKERNSEC_HIDESYM
75502+ NULL,
75503+#else
75504 req,
75505+#endif
75506 len);
75507 }
75508
fe2de317 75509@@ -2453,7 +2474,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
57199397
MT
75510 sock_i_uid(sk),
75511 icsk->icsk_probes_out,
75512 sock_i_ino(sk),
75513- atomic_read(&sk->sk_refcnt), sk,
75514+ atomic_read(&sk->sk_refcnt),
75515+#ifdef CONFIG_GRKERNSEC_HIDESYM
75516+ NULL,
75517+#else
75518+ sk,
75519+#endif
75520 jiffies_to_clock_t(icsk->icsk_rto),
75521 jiffies_to_clock_t(icsk->icsk_ack.ato),
75522 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
fe2de317 75523@@ -2481,7 +2507,13 @@ static void get_timewait4_sock(struct inet_timewait_sock *tw,
15a11c5b 75524 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
57199397
MT
75525 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
75526 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
75527- atomic_read(&tw->tw_refcnt), tw, len);
75528+ atomic_read(&tw->tw_refcnt),
75529+#ifdef CONFIG_GRKERNSEC_HIDESYM
75530+ NULL,
75531+#else
75532+ tw,
75533+#endif
75534+ len);
75535 }
75536
75537 #define TMPSZ 150
fe2de317
MT
75538diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
75539index 0ce3d06..e182e59 100644
75540--- a/net/ipv4/tcp_minisocks.c
75541+++ b/net/ipv4/tcp_minisocks.c
df50ba0c 75542@@ -27,6 +27,10 @@
ae4e228f
MT
75543 #include <net/inet_common.h>
75544 #include <net/xfrm.h>
75545
75546+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75547+extern int grsec_enable_blackhole;
75548+#endif
75549+
75550 int sysctl_tcp_syncookies __read_mostly = 1;
75551 EXPORT_SYMBOL(sysctl_tcp_syncookies);
75552
6e9df6a3 75553@@ -750,6 +754,10 @@ listen_overflow:
58c5fc13
MT
75554
75555 embryonic_reset:
75556 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
75557+
df50ba0c
MT
75558+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75559+ if (!grsec_enable_blackhole)
58c5fc13 75560+#endif
df50ba0c
MT
75561 if (!(flg & TCP_FLAG_RST))
75562 req->rsk_ops->send_reset(sk, skb);
58c5fc13 75563
fe2de317
MT
75564diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
75565index 882e0b0..2eba47f 100644
75566--- a/net/ipv4/tcp_output.c
75567+++ b/net/ipv4/tcp_output.c
75568@@ -2421,6 +2421,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
66a7e928
MT
75569 int mss;
75570 int s_data_desired = 0;
75571
75572+ pax_track_stack();
75573+
75574 if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
75575 s_data_desired = cvp->s_data_desired;
75576 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
fe2de317
MT
75577diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
75578index 85ee7eb..53277ab 100644
75579--- a/net/ipv4/tcp_probe.c
75580+++ b/net/ipv4/tcp_probe.c
75581@@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
ae4e228f
MT
75582 if (cnt + width >= len)
75583 break;
75584
75585- if (copy_to_user(buf + cnt, tbuf, width))
bc901d79 75586+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
ae4e228f
MT
75587 return -EFAULT;
75588 cnt += width;
75589 }
fe2de317
MT
75590diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
75591index ecd44b0..b32fba6 100644
75592--- a/net/ipv4/tcp_timer.c
75593+++ b/net/ipv4/tcp_timer.c
df50ba0c
MT
75594@@ -22,6 +22,10 @@
75595 #include <linux/gfp.h>
ae4e228f
MT
75596 #include <net/tcp.h>
75597
75598+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75599+extern int grsec_lastack_retries;
75600+#endif
75601+
75602 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
75603 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
75604 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
fe2de317 75605@@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock *sk)
ae4e228f
MT
75606 }
75607 }
75608
75609+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75610+ if ((sk->sk_state == TCP_LAST_ACK) &&
75611+ (grsec_lastack_retries > 0) &&
75612+ (grsec_lastack_retries < retry_until))
75613+ retry_until = grsec_lastack_retries;
75614+#endif
75615+
bc901d79
MT
75616 if (retransmits_timed_out(sk, retry_until,
75617 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
ae4e228f 75618 /* Has it gone just too far? */
fe2de317
MT
75619diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
75620index 1b5a193..bd354b0 100644
75621--- a/net/ipv4/udp.c
75622+++ b/net/ipv4/udp.c
58c5fc13
MT
75623@@ -86,6 +86,7 @@
75624 #include <linux/types.h>
75625 #include <linux/fcntl.h>
75626 #include <linux/module.h>
75627+#include <linux/security.h>
75628 #include <linux/socket.h>
75629 #include <linux/sockios.h>
75630 #include <linux/igmp.h>
6e9df6a3
MT
75631@@ -108,6 +109,10 @@
75632 #include <trace/events/udp.h>
ae4e228f
MT
75633 #include "udp_impl.h"
75634
75635+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75636+extern int grsec_enable_blackhole;
75637+#endif
75638+
75639 struct udp_table udp_table __read_mostly;
75640 EXPORT_SYMBOL(udp_table);
75641
6e9df6a3 75642@@ -565,6 +570,9 @@ found:
58c5fc13
MT
75643 return s;
75644 }
75645
75646+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
75647+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
75648+
75649 /*
75650 * This routine is called by the ICMP module when it gets some
75651 * sort of error condition. If err < 0 then the socket should
fe2de317 75652@@ -856,9 +864,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
58c5fc13
MT
75653 dport = usin->sin_port;
75654 if (dport == 0)
75655 return -EINVAL;
75656+
75657+ err = gr_search_udp_sendmsg(sk, usin);
75658+ if (err)
75659+ return err;
75660 } else {
75661 if (sk->sk_state != TCP_ESTABLISHED)
75662 return -EDESTADDRREQ;
75663+
75664+ err = gr_search_udp_sendmsg(sk, NULL);
75665+ if (err)
75666+ return err;
75667+
ae4e228f
MT
75668 daddr = inet->inet_daddr;
75669 dport = inet->inet_dport;
58c5fc13 75670 /* Open fast path for connected socket.
fe2de317 75671@@ -1099,7 +1116,7 @@ static unsigned int first_packet_length(struct sock *sk)
8308f9c9
MT
75672 udp_lib_checksum_complete(skb)) {
75673 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
75674 IS_UDPLITE(sk));
75675- atomic_inc(&sk->sk_drops);
75676+ atomic_inc_unchecked(&sk->sk_drops);
75677 __skb_unlink(skb, rcvq);
75678 __skb_queue_tail(&list_kill, skb);
75679 }
6e9df6a3 75680@@ -1185,6 +1202,10 @@ try_again:
58c5fc13
MT
75681 if (!skb)
75682 goto out;
75683
75684+ err = gr_search_udp_recvmsg(sk, skb);
75685+ if (err)
75686+ goto out_free;
75687+
75688 ulen = skb->len - sizeof(struct udphdr);
df50ba0c
MT
75689 if (len > ulen)
75690 len = ulen;
fe2de317 75691@@ -1485,7 +1506,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
8308f9c9
MT
75692
75693 drop:
75694 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
75695- atomic_inc(&sk->sk_drops);
75696+ atomic_inc_unchecked(&sk->sk_drops);
75697 kfree_skb(skb);
75698 return -1;
75699 }
fe2de317 75700@@ -1504,7 +1525,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
8308f9c9
MT
75701 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
75702
75703 if (!skb1) {
75704- atomic_inc(&sk->sk_drops);
75705+ atomic_inc_unchecked(&sk->sk_drops);
75706 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
75707 IS_UDPLITE(sk));
75708 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
fe2de317 75709@@ -1673,6 +1694,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
58c5fc13
MT
75710 goto csum_error;
75711
75712 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
75713+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
ae4e228f 75714+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
58c5fc13
MT
75715+#endif
75716 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
75717
75718 /*
fe2de317 75719@@ -2100,8 +2124,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
57199397
MT
75720 sk_wmem_alloc_get(sp),
75721 sk_rmem_alloc_get(sp),
75722 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
75723- atomic_read(&sp->sk_refcnt), sp,
8308f9c9 75724- atomic_read(&sp->sk_drops), len);
57199397
MT
75725+ atomic_read(&sp->sk_refcnt),
75726+#ifdef CONFIG_GRKERNSEC_HIDESYM
75727+ NULL,
75728+#else
75729+ sp,
75730+#endif
8308f9c9 75731+ atomic_read_unchecked(&sp->sk_drops), len);
57199397
MT
75732 }
75733
8308f9c9 75734 int udp4_seq_show(struct seq_file *seq, void *v)
fe2de317
MT
75735diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
75736index 12368c5..fbf899f 100644
75737--- a/net/ipv6/addrconf.c
75738+++ b/net/ipv6/addrconf.c
75739@@ -2083,7 +2083,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
6e9df6a3
MT
75740 p.iph.ihl = 5;
75741 p.iph.protocol = IPPROTO_IPV6;
75742 p.iph.ttl = 64;
75743- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
75744+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
75745
75746 if (ops->ndo_do_ioctl) {
75747 mm_segment_t oldfs = get_fs();
fe2de317
MT
75748diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
75749index 8a58e8c..8b5e631 100644
75750--- a/net/ipv6/inet6_connection_sock.c
75751+++ b/net/ipv6/inet6_connection_sock.c
75752@@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
8308f9c9
MT
75753 #ifdef CONFIG_XFRM
75754 {
75755 struct rt6_info *rt = (struct rt6_info *)dst;
75756- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
75757+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
75758 }
75759 #endif
75760 }
fe2de317 75761@@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
8308f9c9
MT
75762 #ifdef CONFIG_XFRM
75763 if (dst) {
75764 struct rt6_info *rt = (struct rt6_info *)dst;
75765- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
75766+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
75767 __sk_dst_reset(sk);
75768 dst = NULL;
75769 }
fe2de317
MT
75770diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
75771index 2fbda5f..26ed683 100644
75772--- a/net/ipv6/ipv6_sockglue.c
75773+++ b/net/ipv6/ipv6_sockglue.c
75774@@ -129,6 +129,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
66a7e928
MT
75775 int val, valbool;
75776 int retv = -ENOPROTOOPT;
75777
75778+ pax_track_stack();
75779+
75780 if (optval == NULL)
75781 val=0;
75782 else {
fe2de317 75783@@ -919,6 +921,8 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
66a7e928
MT
75784 int len;
75785 int val;
75786
75787+ pax_track_stack();
75788+
75789 if (ip6_mroute_opt(optname))
75790 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
75791
fe2de317 75792@@ -960,7 +964,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
6e9df6a3
MT
75793 if (sk->sk_type != SOCK_STREAM)
75794 return -ENOPROTOOPT;
75795
75796- msg.msg_control = optval;
75797+ msg.msg_control = (void __force_kernel *)optval;
75798 msg.msg_controllen = len;
75799 msg.msg_flags = flags;
75800
fe2de317
MT
75801diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
75802index 343852e..c92bd15 100644
75803--- a/net/ipv6/raw.c
75804+++ b/net/ipv6/raw.c
75805@@ -376,7 +376,7 @@ static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
8308f9c9
MT
75806 {
75807 if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) &&
75808 skb_checksum_complete(skb)) {
75809- atomic_inc(&sk->sk_drops);
75810+ atomic_inc_unchecked(&sk->sk_drops);
75811 kfree_skb(skb);
75812 return NET_RX_DROP;
75813 }
fe2de317 75814@@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
8308f9c9
MT
75815 struct raw6_sock *rp = raw6_sk(sk);
75816
75817 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
75818- atomic_inc(&sk->sk_drops);
75819+ atomic_inc_unchecked(&sk->sk_drops);
75820 kfree_skb(skb);
75821 return NET_RX_DROP;
75822 }
fe2de317 75823@@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
8308f9c9
MT
75824
75825 if (inet->hdrincl) {
75826 if (skb_checksum_complete(skb)) {
75827- atomic_inc(&sk->sk_drops);
75828+ atomic_inc_unchecked(&sk->sk_drops);
75829 kfree_skb(skb);
75830 return NET_RX_DROP;
75831 }
66a7e928 75832@@ -601,7 +601,7 @@ out:
58c5fc13
MT
75833 return err;
75834 }
75835
75836-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
75837+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
66a7e928 75838 struct flowi6 *fl6, struct dst_entry **dstp,
58c5fc13
MT
75839 unsigned int flags)
75840 {
fe2de317 75841@@ -742,6 +742,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
66a7e928
MT
75842 u16 proto;
75843 int err;
75844
75845+ pax_track_stack();
75846+
75847 /* Rough check on arithmetic overflow,
75848 better check is made in ip6_append_data().
75849 */
75850@@ -909,12 +911,15 @@ do_confirm:
71d190be
MT
75851 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
75852 char __user *optval, int optlen)
75853 {
75854+ struct icmp6_filter filter;
75855+
75856 switch (optname) {
75857 case ICMPV6_FILTER:
75858 if (optlen > sizeof(struct icmp6_filter))
75859 optlen = sizeof(struct icmp6_filter);
75860- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
75861+ if (copy_from_user(&filter, optval, optlen))
75862 return -EFAULT;
15a11c5b 75863+ raw6_sk(sk)->filter = filter;
71d190be
MT
75864 return 0;
75865 default:
75866 return -ENOPROTOOPT;
fe2de317 75867@@ -927,6 +932,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
71d190be
MT
75868 char __user *optval, int __user *optlen)
75869 {
71d190be 75870 int len;
15a11c5b 75871+ struct icmp6_filter filter;
71d190be
MT
75872
75873 switch (optname) {
15a11c5b 75874 case ICMPV6_FILTER:
fe2de317 75875@@ -938,7 +944,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
71d190be
MT
75876 len = sizeof(struct icmp6_filter);
75877 if (put_user(len, optlen))
75878 return -EFAULT;
75879- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
15a11c5b
MT
75880+ filter = raw6_sk(sk)->filter;
75881+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
71d190be
MT
75882 return -EFAULT;
75883 return 0;
75884 default:
fe2de317 75885@@ -1245,7 +1252,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
6892158b
MT
75886 0, 0L, 0,
75887 sock_i_uid(sp), 0,
75888 sock_i_ino(sp),
75889- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
75890+ atomic_read(&sp->sk_refcnt),
75891+#ifdef CONFIG_GRKERNSEC_HIDESYM
75892+ NULL,
75893+#else
75894+ sp,
75895+#endif
8308f9c9 75896+ atomic_read_unchecked(&sp->sk_drops));
6892158b
MT
75897 }
75898
75899 static int raw6_seq_show(struct seq_file *seq, void *v)
fe2de317
MT
75900diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
75901index 7b8fc57..c6185da 100644
75902--- a/net/ipv6/tcp_ipv6.c
75903+++ b/net/ipv6/tcp_ipv6.c
75904@@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
df50ba0c
MT
75905 }
75906 #endif
75907
75908+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75909+extern int grsec_enable_blackhole;
75910+#endif
75911+
75912 static void tcp_v6_hash(struct sock *sk)
75913 {
75914 if (sk->sk_state != TCP_CLOSE) {
fe2de317 75915@@ -1647,6 +1651,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
57199397
MT
75916 return 0;
75917
75918 reset:
75919+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75920+ if (!grsec_enable_blackhole)
75921+#endif
75922 tcp_v6_send_reset(sk, skb);
75923 discard:
75924 if (opt_skb)
fe2de317 75925@@ -1726,12 +1733,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
df50ba0c 75926 TCP_SKB_CB(skb)->sacked = 0;
58c5fc13 75927
df50ba0c
MT
75928 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
75929- if (!sk)
75930+ if (!sk) {
75931+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75932+ ret = 1;
75933+#endif
75934 goto no_tcp_socket;
75935+ }
75936
75937 process:
75938- if (sk->sk_state == TCP_TIME_WAIT)
75939+ if (sk->sk_state == TCP_TIME_WAIT) {
58c5fc13 75940+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
df50ba0c 75941+ ret = 2;
58c5fc13 75942+#endif
df50ba0c
MT
75943 goto do_time_wait;
75944+ }
75945
57199397
MT
75946 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
75947 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
6e9df6a3 75948@@ -1779,6 +1794,10 @@ no_tcp_socket:
58c5fc13
MT
75949 bad_packet:
75950 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
75951 } else {
75952+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
df50ba0c
MT
75953+ if (!grsec_enable_blackhole || (ret == 1 &&
75954+ (skb->dev->flags & IFF_LOOPBACK)))
58c5fc13
MT
75955+#endif
75956 tcp_v6_send_reset(NULL, skb);
75957 }
75958
fe2de317 75959@@ -2039,7 +2058,13 @@ static void get_openreq6(struct seq_file *seq,
6892158b
MT
75960 uid,
75961 0, /* non standard timer */
75962 0, /* open_requests have no inode */
75963- 0, req);
75964+ 0,
75965+#ifdef CONFIG_GRKERNSEC_HIDESYM
75966+ NULL
75967+#else
75968+ req
75969+#endif
75970+ );
75971 }
75972
75973 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
fe2de317 75974@@ -2089,7 +2114,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
6892158b
MT
75975 sock_i_uid(sp),
75976 icsk->icsk_probes_out,
75977 sock_i_ino(sp),
75978- atomic_read(&sp->sk_refcnt), sp,
75979+ atomic_read(&sp->sk_refcnt),
75980+#ifdef CONFIG_GRKERNSEC_HIDESYM
75981+ NULL,
75982+#else
75983+ sp,
75984+#endif
75985 jiffies_to_clock_t(icsk->icsk_rto),
75986 jiffies_to_clock_t(icsk->icsk_ack.ato),
75987 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
fe2de317 75988@@ -2124,7 +2154,13 @@ static void get_timewait6_sock(struct seq_file *seq,
6892158b
MT
75989 dest->s6_addr32[2], dest->s6_addr32[3], destp,
75990 tw->tw_substate, 0, 0,
75991 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
75992- atomic_read(&tw->tw_refcnt), tw);
75993+ atomic_read(&tw->tw_refcnt),
75994+#ifdef CONFIG_GRKERNSEC_HIDESYM
75995+ NULL
75996+#else
75997+ tw
75998+#endif
75999+ );
76000 }
76001
76002 static int tcp6_seq_show(struct seq_file *seq, void *v)
fe2de317
MT
76003diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
76004index bb95e8e..ae0ee80 100644
76005--- a/net/ipv6/udp.c
76006+++ b/net/ipv6/udp.c
df50ba0c
MT
76007@@ -50,6 +50,10 @@
76008 #include <linux/seq_file.h>
76009 #include "udp_impl.h"
76010
76011+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76012+extern int grsec_enable_blackhole;
76013+#endif
76014+
76015 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
76016 {
76017 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
fe2de317 76018@@ -548,7 +552,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
8308f9c9
MT
76019
76020 return 0;
76021 drop:
76022- atomic_inc(&sk->sk_drops);
76023+ atomic_inc_unchecked(&sk->sk_drops);
76024 drop_no_sk_drops_inc:
76025 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
76026 kfree_skb(skb);
fe2de317 76027@@ -624,7 +628,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
8308f9c9
MT
76028 continue;
76029 }
76030 drop:
76031- atomic_inc(&sk->sk_drops);
76032+ atomic_inc_unchecked(&sk->sk_drops);
76033 UDP6_INC_STATS_BH(sock_net(sk),
76034 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
76035 UDP6_INC_STATS_BH(sock_net(sk),
fe2de317 76036@@ -779,6 +783,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
58c5fc13
MT
76037 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
76038 proto == IPPROTO_UDPLITE);
76039
76040+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
df50ba0c 76041+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
58c5fc13 76042+#endif
df50ba0c 76043 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
58c5fc13
MT
76044
76045 kfree_skb(skb);
fe2de317 76046@@ -795,7 +802,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
8308f9c9
MT
76047 if (!sock_owned_by_user(sk))
76048 udpv6_queue_rcv_skb(sk, skb);
76049 else if (sk_add_backlog(sk, skb)) {
76050- atomic_inc(&sk->sk_drops);
76051+ atomic_inc_unchecked(&sk->sk_drops);
76052 bh_unlock_sock(sk);
76053 sock_put(sk);
76054 goto discard;
fe2de317 76055@@ -1406,8 +1413,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
6892158b
MT
76056 0, 0L, 0,
76057 sock_i_uid(sp), 0,
76058 sock_i_ino(sp),
76059- atomic_read(&sp->sk_refcnt), sp,
8308f9c9 76060- atomic_read(&sp->sk_drops));
6892158b
MT
76061+ atomic_read(&sp->sk_refcnt),
76062+#ifdef CONFIG_GRKERNSEC_HIDESYM
76063+ NULL,
76064+#else
76065+ sp,
76066+#endif
8308f9c9 76067+ atomic_read_unchecked(&sp->sk_drops));
6892158b
MT
76068 }
76069
8308f9c9 76070 int udp6_seq_show(struct seq_file *seq, void *v)
fe2de317
MT
76071diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
76072index b3cc8b3..baa02d0 100644
76073--- a/net/irda/ircomm/ircomm_tty.c
76074+++ b/net/irda/ircomm/ircomm_tty.c
76075@@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
58c5fc13
MT
76076 add_wait_queue(&self->open_wait, &wait);
76077
76078 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
76079- __FILE__,__LINE__, tty->driver->name, self->open_count );
c52201e0 76080+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
58c5fc13
MT
76081
76082 /* As far as I can see, we protect open_count - Jean II */
76083 spin_lock_irqsave(&self->spinlock, flags);
76084 if (!tty_hung_up_p(filp)) {
76085 extra_count = 1;
76086- self->open_count--;
c52201e0 76087+ local_dec(&self->open_count);
58c5fc13
MT
76088 }
76089 spin_unlock_irqrestore(&self->spinlock, flags);
76090- self->blocked_open++;
c52201e0 76091+ local_inc(&self->blocked_open);
58c5fc13
MT
76092
76093 while (1) {
76094 if (tty->termios->c_cflag & CBAUD) {
fe2de317 76095@@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
58c5fc13
MT
76096 }
76097
76098 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
76099- __FILE__,__LINE__, tty->driver->name, self->open_count );
c52201e0 76100+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
58c5fc13
MT
76101
76102 schedule();
76103 }
fe2de317 76104@@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
58c5fc13
MT
76105 if (extra_count) {
76106 /* ++ is not atomic, so this should be protected - Jean II */
76107 spin_lock_irqsave(&self->spinlock, flags);
76108- self->open_count++;
c52201e0 76109+ local_inc(&self->open_count);
58c5fc13
MT
76110 spin_unlock_irqrestore(&self->spinlock, flags);
76111 }
76112- self->blocked_open--;
c52201e0 76113+ local_dec(&self->blocked_open);
58c5fc13
MT
76114
76115 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
76116- __FILE__,__LINE__, tty->driver->name, self->open_count);
c52201e0 76117+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
58c5fc13
MT
76118
76119 if (!retval)
76120 self->flags |= ASYNC_NORMAL_ACTIVE;
fe2de317 76121@@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
58c5fc13
MT
76122 }
76123 /* ++ is not atomic, so this should be protected - Jean II */
76124 spin_lock_irqsave(&self->spinlock, flags);
76125- self->open_count++;
c52201e0 76126+ local_inc(&self->open_count);
58c5fc13
MT
76127
76128 tty->driver_data = self;
76129 self->tty = tty;
76130 spin_unlock_irqrestore(&self->spinlock, flags);
76131
76132 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
76133- self->line, self->open_count);
c52201e0 76134+ self->line, local_read(&self->open_count));
58c5fc13
MT
76135
76136 /* Not really used by us, but lets do it anyway */
76137 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
fe2de317 76138@@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
58c5fc13
MT
76139 return;
76140 }
76141
76142- if ((tty->count == 1) && (self->open_count != 1)) {
c52201e0 76143+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
58c5fc13
MT
76144 /*
76145 * Uh, oh. tty->count is 1, which means that the tty
76146 * structure will be freed. state->count should always
fe2de317 76147@@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
58c5fc13
MT
76148 */
76149 IRDA_DEBUG(0, "%s(), bad serial port count; "
76150 "tty->count is 1, state->count is %d\n", __func__ ,
76151- self->open_count);
76152- self->open_count = 1;
c52201e0
MT
76153+ local_read(&self->open_count));
76154+ local_set(&self->open_count, 1);
58c5fc13
MT
76155 }
76156
76157- if (--self->open_count < 0) {
c52201e0 76158+ if (local_dec_return(&self->open_count) < 0) {
58c5fc13
MT
76159 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
76160- __func__, self->line, self->open_count);
76161- self->open_count = 0;
c52201e0
MT
76162+ __func__, self->line, local_read(&self->open_count));
76163+ local_set(&self->open_count, 0);
58c5fc13
MT
76164 }
76165- if (self->open_count) {
c52201e0 76166+ if (local_read(&self->open_count)) {
58c5fc13
MT
76167 spin_unlock_irqrestore(&self->spinlock, flags);
76168
76169 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
fe2de317 76170@@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
58c5fc13
MT
76171 tty->closing = 0;
76172 self->tty = NULL;
76173
76174- if (self->blocked_open) {
c52201e0 76175+ if (local_read(&self->blocked_open)) {
58c5fc13
MT
76176 if (self->close_delay)
76177 schedule_timeout_interruptible(self->close_delay);
76178 wake_up_interruptible(&self->open_wait);
fe2de317 76179@@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
58c5fc13
MT
76180 spin_lock_irqsave(&self->spinlock, flags);
76181 self->flags &= ~ASYNC_NORMAL_ACTIVE;
76182 self->tty = NULL;
76183- self->open_count = 0;
c52201e0 76184+ local_set(&self->open_count, 0);
58c5fc13
MT
76185 spin_unlock_irqrestore(&self->spinlock, flags);
76186
76187 wake_up_interruptible(&self->open_wait);
fe2de317 76188@@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
58c5fc13
MT
76189 seq_putc(m, '\n');
76190
76191 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
76192- seq_printf(m, "Open count: %d\n", self->open_count);
c52201e0 76193+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
58c5fc13
MT
76194 seq_printf(m, "Max data size: %d\n", self->max_data_size);
76195 seq_printf(m, "Max header size: %d\n", self->max_header_size);
76196
fe2de317
MT
76197diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
76198index e2013e4..edfc1e3 100644
76199--- a/net/iucv/af_iucv.c
76200+++ b/net/iucv/af_iucv.c
76201@@ -648,10 +648,10 @@ static int iucv_sock_autobind(struct sock *sk)
8308f9c9
MT
76202
76203 write_lock_bh(&iucv_sk_list.lock);
76204
76205- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
76206+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
76207 while (__iucv_get_sock_by_name(name)) {
76208 sprintf(name, "%08x",
76209- atomic_inc_return(&iucv_sk_list.autobind_name));
76210+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
76211 }
76212
76213 write_unlock_bh(&iucv_sk_list.lock);
fe2de317
MT
76214diff --git a/net/key/af_key.c b/net/key/af_key.c
76215index 1e733e9..c84de2f 100644
76216--- a/net/key/af_key.c
76217+++ b/net/key/af_key.c
76218@@ -2481,6 +2481,8 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
66a7e928
MT
76219 struct xfrm_migrate m[XFRM_MAX_DEPTH];
76220 struct xfrm_kmaddress k;
76221
76222+ pax_track_stack();
76223+
76224 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
76225 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
76226 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
fe2de317 76227@@ -3016,10 +3018,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
8308f9c9
MT
76228 static u32 get_acqseq(void)
76229 {
76230 u32 res;
76231- static atomic_t acqseq;
76232+ static atomic_unchecked_t acqseq;
76233
76234 do {
76235- res = atomic_inc_return(&acqseq);
76236+ res = atomic_inc_return_unchecked(&acqseq);
76237 } while (!res);
76238 return res;
76239 }
fe2de317
MT
76240diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
76241index 956b7e4..f01d328 100644
76242--- a/net/lapb/lapb_iface.c
76243+++ b/net/lapb/lapb_iface.c
76244@@ -158,7 +158,7 @@ int lapb_register(struct net_device *dev, struct lapb_register_struct *callbacks
15a11c5b 76245 goto out;
66a7e928 76246
15a11c5b
MT
76247 lapb->dev = dev;
76248- lapb->callbacks = *callbacks;
76249+ lapb->callbacks = callbacks;
66a7e928 76250
15a11c5b
MT
76251 __lapb_insert_cb(lapb);
76252
fe2de317 76253@@ -380,32 +380,32 @@ int lapb_data_received(struct net_device *dev, struct sk_buff *skb)
15a11c5b
MT
76254
76255 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
76256 {
76257- if (lapb->callbacks.connect_confirmation)
76258- lapb->callbacks.connect_confirmation(lapb->dev, reason);
76259+ if (lapb->callbacks->connect_confirmation)
76260+ lapb->callbacks->connect_confirmation(lapb->dev, reason);
66a7e928 76261 }
15a11c5b
MT
76262
76263 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
76264 {
76265- if (lapb->callbacks.connect_indication)
76266- lapb->callbacks.connect_indication(lapb->dev, reason);
76267+ if (lapb->callbacks->connect_indication)
76268+ lapb->callbacks->connect_indication(lapb->dev, reason);
76269 }
76270
76271 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
76272 {
76273- if (lapb->callbacks.disconnect_confirmation)
76274- lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
76275+ if (lapb->callbacks->disconnect_confirmation)
76276+ lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
76277 }
76278
76279 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
76280 {
76281- if (lapb->callbacks.disconnect_indication)
76282- lapb->callbacks.disconnect_indication(lapb->dev, reason);
76283+ if (lapb->callbacks->disconnect_indication)
76284+ lapb->callbacks->disconnect_indication(lapb->dev, reason);
76285 }
76286
76287 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
76288 {
76289- if (lapb->callbacks.data_indication)
76290- return lapb->callbacks.data_indication(lapb->dev, skb);
76291+ if (lapb->callbacks->data_indication)
76292+ return lapb->callbacks->data_indication(lapb->dev, skb);
76293
76294 kfree_skb(skb);
76295 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
fe2de317 76296@@ -415,8 +415,8 @@ int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *skb)
15a11c5b
MT
76297 {
76298 int used = 0;
76299
76300- if (lapb->callbacks.data_transmit) {
76301- lapb->callbacks.data_transmit(lapb->dev, skb);
76302+ if (lapb->callbacks->data_transmit) {
76303+ lapb->callbacks->data_transmit(lapb->dev, skb);
76304 used = 1;
76305 }
76306
fe2de317
MT
76307diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
76308index a01d213..6a1f1ab 100644
76309--- a/net/mac80211/debugfs_sta.c
76310+++ b/net/mac80211/debugfs_sta.c
76311@@ -140,6 +140,8 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
66a7e928
MT
76312 struct tid_ampdu_rx *tid_rx;
76313 struct tid_ampdu_tx *tid_tx;
76314
76315+ pax_track_stack();
76316+
76317 rcu_read_lock();
76318
76319 p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
fe2de317 76320@@ -240,6 +242,8 @@ static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
66a7e928
MT
76321 struct sta_info *sta = file->private_data;
76322 struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
76323
76324+ pax_track_stack();
76325+
76326 p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
76327 htc->ht_supported ? "" : "not ");
76328 if (htc->ht_supported) {
fe2de317
MT
76329diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
76330index 9fab144..7f0fc14 100644
76331--- a/net/mac80211/ieee80211_i.h
76332+++ b/net/mac80211/ieee80211_i.h
16454cff 76333@@ -27,6 +27,7 @@
c52201e0
MT
76334 #include <net/ieee80211_radiotap.h>
76335 #include <net/cfg80211.h>
76336 #include <net/mac80211.h>
76337+#include <asm/local.h>
76338 #include "key.h"
76339 #include "sta_info.h"
76340
6e9df6a3 76341@@ -754,7 +755,7 @@ struct ieee80211_local {
ae4e228f 76342 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
58c5fc13
MT
76343 spinlock_t queue_stop_reason_lock;
76344
58c5fc13 76345- int open_count;
c52201e0 76346+ local_t open_count;
58c5fc13
MT
76347 int monitors, cooked_mntrs;
76348 /* number of interfaces with corresponding FIF_ flags */
bc901d79 76349 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
fe2de317
MT
76350diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
76351index 556e7e6..120dcaf 100644
76352--- a/net/mac80211/iface.c
76353+++ b/net/mac80211/iface.c
76354@@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
58c5fc13
MT
76355 break;
76356 }
76357
76358- if (local->open_count == 0) {
c52201e0 76359+ if (local_read(&local->open_count) == 0) {
58c5fc13
MT
76360 res = drv_start(local);
76361 if (res)
76362 goto err_del_bss;
fe2de317 76363@@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
bc901d79
MT
76364 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
76365
76366 if (!is_valid_ether_addr(dev->dev_addr)) {
76367- if (!local->open_count)
c52201e0 76368+ if (!local_read(&local->open_count))
bc901d79
MT
76369 drv_stop(local);
76370 return -EADDRNOTAVAIL;
76371 }
fe2de317 76372@@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
bc901d79 76373 mutex_unlock(&local->mtx);
58c5fc13 76374
bc901d79
MT
76375 if (coming_up)
76376- local->open_count++;
c52201e0 76377+ local_inc(&local->open_count);
58c5fc13 76378
58c5fc13
MT
76379 if (hw_reconf_flags) {
76380 ieee80211_hw_config(local, hw_reconf_flags);
fe2de317 76381@@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
58c5fc13 76382 err_del_interface:
df50ba0c 76383 drv_remove_interface(local, &sdata->vif);
58c5fc13
MT
76384 err_stop:
76385- if (!local->open_count)
c52201e0 76386+ if (!local_read(&local->open_count))
58c5fc13
MT
76387 drv_stop(local);
76388 err_del_bss:
76389 sdata->bss = NULL;
fe2de317 76390@@ -474,7 +474,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
58c5fc13
MT
76391 }
76392
bc901d79
MT
76393 if (going_down)
76394- local->open_count--;
c52201e0 76395+ local_dec(&local->open_count);
58c5fc13
MT
76396
76397 switch (sdata->vif.type) {
76398 case NL80211_IFTYPE_AP_VLAN:
fe2de317 76399@@ -533,7 +533,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
58c5fc13
MT
76400
76401 ieee80211_recalc_ps(local, -1);
76402
76403- if (local->open_count == 0) {
c52201e0 76404+ if (local_read(&local->open_count) == 0) {
bc901d79
MT
76405 if (local->ops->napi_poll)
76406 napi_disable(&local->napi);
ae4e228f 76407 ieee80211_clear_tx_pending(local);
fe2de317
MT
76408diff --git a/net/mac80211/main.c b/net/mac80211/main.c
76409index 3d90dad..36884d5 100644
76410--- a/net/mac80211/main.c
76411+++ b/net/mac80211/main.c
76412@@ -209,7 +209,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
58c5fc13
MT
76413 local->hw.conf.power_level = power;
76414 }
76415
76416- if (changed && local->open_count) {
c52201e0 76417+ if (changed && local_read(&local->open_count)) {
58c5fc13
MT
76418 ret = drv_config(local, changed);
76419 /*
76420 * Goal:
fe2de317
MT
76421diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
76422index 0f48368..d48e688 100644
76423--- a/net/mac80211/mlme.c
76424+++ b/net/mac80211/mlme.c
76425@@ -1464,6 +1464,8 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
66a7e928
MT
76426 bool have_higher_than_11mbit = false;
76427 u16 ap_ht_cap_flags;
76428
76429+ pax_track_stack();
76430+
76431 /* AssocResp and ReassocResp have identical structure */
76432
76433 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
fe2de317
MT
76434diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
76435index 6326d34..7225f61 100644
76436--- a/net/mac80211/pm.c
76437+++ b/net/mac80211/pm.c
76438@@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
6e9df6a3
MT
76439 struct ieee80211_sub_if_data *sdata;
76440 struct sta_info *sta;
76441
76442- if (!local->open_count)
76443+ if (!local_read(&local->open_count))
76444 goto suspend;
76445
76446 ieee80211_scan_cancel(local);
fe2de317 76447@@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
15a11c5b
MT
76448 cancel_work_sync(&local->dynamic_ps_enable_work);
76449 del_timer_sync(&local->dynamic_ps_timer);
76450
76451- local->wowlan = wowlan && local->open_count;
76452+ local->wowlan = wowlan && local_read(&local->open_count);
76453 if (local->wowlan) {
76454 int err = drv_suspend(local, wowlan);
6e9df6a3 76455 if (err < 0) {
fe2de317 76456@@ -129,7 +129,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
58c5fc13
MT
76457 }
76458
76459 /* stop hardware - this must stop RX */
ae4e228f 76460- if (local->open_count)
c52201e0 76461+ if (local_read(&local->open_count))
ae4e228f
MT
76462 ieee80211_stop_device(local);
76463
15a11c5b 76464 suspend:
fe2de317
MT
76465diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
76466index 3d5a2cb..b17ad48 100644
76467--- a/net/mac80211/rate.c
76468+++ b/net/mac80211/rate.c
76469@@ -371,7 +371,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
58c5fc13
MT
76470
76471 ASSERT_RTNL();
ae4e228f
MT
76472
76473- if (local->open_count)
c52201e0 76474+ if (local_read(&local->open_count))
58c5fc13
MT
76475 return -EBUSY;
76476
ae4e228f 76477 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
fe2de317
MT
76478diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
76479index 4851e9e..d860e05 100644
76480--- a/net/mac80211/rc80211_pid_debugfs.c
76481+++ b/net/mac80211/rc80211_pid_debugfs.c
76482@@ -192,7 +192,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
58c5fc13 76483
ae4e228f 76484 spin_unlock_irqrestore(&events->lock, status);
58c5fc13 76485
ae4e228f
MT
76486- if (copy_to_user(buf, pb, p))
76487+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
76488 return -EFAULT;
58c5fc13 76489
ae4e228f 76490 return p;
fe2de317
MT
76491diff --git a/net/mac80211/util.c b/net/mac80211/util.c
76492index fd031e8..84fbfcf 100644
76493--- a/net/mac80211/util.c
76494+++ b/net/mac80211/util.c
76495@@ -1170,7 +1170,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
6e9df6a3 76496 drv_set_coverage_class(local, hw->wiphy->coverage_class);
58c5fc13 76497
6e9df6a3
MT
76498 /* everything else happens only if HW was up & running */
76499- if (!local->open_count)
76500+ if (!local_read(&local->open_count))
76501 goto wake_up;
76502
76503 /*
fe2de317
MT
76504diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
76505index 32bff6d..d0cf986 100644
76506--- a/net/netfilter/Kconfig
76507+++ b/net/netfilter/Kconfig
76508@@ -781,6 +781,16 @@ config NETFILTER_XT_MATCH_ESP
76509
76510 To compile it as a module, choose M here. If unsure, say N.
76511
76512+config NETFILTER_XT_MATCH_GRADM
76513+ tristate '"gradm" match support'
76514+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
76515+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
76516+ ---help---
76517+ The gradm match allows to match on grsecurity RBAC being enabled.
76518+ It is useful when iptables rules are applied early on bootup to
76519+ prevent connections to the machine (except from a trusted host)
76520+ while the RBAC system is disabled.
76521+
76522 config NETFILTER_XT_MATCH_HASHLIMIT
76523 tristate '"hashlimit" match support'
76524 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
76525diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
76526index 1a02853..5d8c22e 100644
76527--- a/net/netfilter/Makefile
76528+++ b/net/netfilter/Makefile
76529@@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
76530 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
76531 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
76532 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
76533+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
76534 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
76535 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
76536 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
76537diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
76538index 12571fb..fb73976 100644
76539--- a/net/netfilter/ipvs/ip_vs_conn.c
76540+++ b/net/netfilter/ipvs/ip_vs_conn.c
76541@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
8308f9c9
MT
76542 /* Increase the refcnt counter of the dest */
76543 atomic_inc(&dest->refcnt);
76544
76545- conn_flags = atomic_read(&dest->conn_flags);
76546+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
76547 if (cp->protocol != IPPROTO_UDP)
76548 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
76549 /* Bind with the destination and its corresponding transmitter */
fe2de317 76550@@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
8308f9c9
MT
76551 atomic_set(&cp->refcnt, 1);
76552
76553 atomic_set(&cp->n_control, 0);
76554- atomic_set(&cp->in_pkts, 0);
76555+ atomic_set_unchecked(&cp->in_pkts, 0);
76556
66a7e928 76557 atomic_inc(&ipvs->conn_count);
8308f9c9 76558 if (flags & IP_VS_CONN_F_NO_CPORT)
fe2de317 76559@@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
8308f9c9
MT
76560
76561 /* Don't drop the entry if its number of incoming packets is not
76562 located in [0, 8] */
76563- i = atomic_read(&cp->in_pkts);
76564+ i = atomic_read_unchecked(&cp->in_pkts);
76565 if (i > 8 || i < 0) return 0;
76566
76567 if (!todrop_rate[i]) return 0;
fe2de317
MT
76568diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
76569index 4f77bb1..5d0bc26 100644
76570--- a/net/netfilter/ipvs/ip_vs_core.c
76571+++ b/net/netfilter/ipvs/ip_vs_core.c
76572@@ -563,7 +563,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
66a7e928 76573 ret = cp->packet_xmit(skb, cp, pd->pp);
8308f9c9
MT
76574 /* do not touch skb anymore */
76575
76576- atomic_inc(&cp->in_pkts);
76577+ atomic_inc_unchecked(&cp->in_pkts);
76578 ip_vs_conn_put(cp);
76579 return ret;
76580 }
fe2de317 76581@@ -1612,7 +1612,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
66a7e928
MT
76582 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
76583 pkts = sysctl_sync_threshold(ipvs);
76584 else
76585- pkts = atomic_add_return(1, &cp->in_pkts);
76586+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
76587
76588 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
8308f9c9 76589 cp->protocol == IPPROTO_SCTP) {
fe2de317
MT
76590diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
76591index e3be48b..d658c8c 100644
76592--- a/net/netfilter/ipvs/ip_vs_ctl.c
76593+++ b/net/netfilter/ipvs/ip_vs_ctl.c
76594@@ -782,7 +782,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
66a7e928
MT
76595 ip_vs_rs_hash(ipvs, dest);
76596 write_unlock_bh(&ipvs->rs_lock);
8308f9c9
MT
76597 }
76598- atomic_set(&dest->conn_flags, conn_flags);
76599+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
76600
76601 /* bind the service */
76602 if (!dest->svc) {
fe2de317 76603@@ -2027,7 +2027,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
8308f9c9
MT
76604 " %-7s %-6d %-10d %-10d\n",
76605 &dest->addr.in6,
76606 ntohs(dest->port),
76607- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
76608+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
76609 atomic_read(&dest->weight),
76610 atomic_read(&dest->activeconns),
76611 atomic_read(&dest->inactconns));
fe2de317 76612@@ -2038,7 +2038,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
8308f9c9
MT
76613 "%-7s %-6d %-10d %-10d\n",
76614 ntohl(dest->addr.ip),
76615 ntohs(dest->port),
76616- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
76617+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
76618 atomic_read(&dest->weight),
76619 atomic_read(&dest->activeconns),
76620 atomic_read(&dest->inactconns));
fe2de317 76621@@ -2285,6 +2285,8 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
66a7e928 76622 struct ip_vs_dest_user_kern udest;
6e9df6a3 76623 struct netns_ipvs *ipvs = net_ipvs(net);
66a7e928
MT
76624
76625+ pax_track_stack();
76626+
76627 if (!capable(CAP_NET_ADMIN))
76628 return -EPERM;
76629
fe2de317 76630@@ -2508,7 +2510,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
8308f9c9
MT
76631
76632 entry.addr = dest->addr.ip;
76633 entry.port = dest->port;
76634- entry.conn_flags = atomic_read(&dest->conn_flags);
76635+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
76636 entry.weight = atomic_read(&dest->weight);
76637 entry.u_threshold = dest->u_threshold;
76638 entry.l_threshold = dest->l_threshold;
fe2de317 76639@@ -3041,7 +3043,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
8308f9c9
MT
76640 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
76641
76642 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
76643- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
76644+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
76645 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
76646 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
76647 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
fe2de317
MT
76648diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
76649index 3cdd479..116afa8 100644
76650--- a/net/netfilter/ipvs/ip_vs_sync.c
76651+++ b/net/netfilter/ipvs/ip_vs_sync.c
6e9df6a3 76652@@ -649,7 +649,7 @@ control:
66a7e928
MT
76653 * i.e only increment in_pkts for Templates.
76654 */
76655 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
76656- int pkts = atomic_add_return(1, &cp->in_pkts);
76657+ int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
76658
76659 if (pkts % sysctl_sync_period(ipvs) != 1)
76660 return;
fe2de317 76661@@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
66a7e928
MT
76662
76663 if (opt)
76664 memcpy(&cp->in_seq, opt, sizeof(*opt));
76665- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
76666+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
76667 cp->state = state;
76668 cp->old_state = cp->state;
76669 /*
fe2de317
MT
76670diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
76671index ee319a4..8a285ee 100644
76672--- a/net/netfilter/ipvs/ip_vs_xmit.c
76673+++ b/net/netfilter/ipvs/ip_vs_xmit.c
76674@@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
8308f9c9
MT
76675 else
76676 rc = NF_ACCEPT;
76677 /* do not touch skb anymore */
76678- atomic_inc(&cp->in_pkts);
76679+ atomic_inc_unchecked(&cp->in_pkts);
76680 goto out;
76681 }
76682
fe2de317 76683@@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
8308f9c9
MT
76684 else
76685 rc = NF_ACCEPT;
76686 /* do not touch skb anymore */
76687- atomic_inc(&cp->in_pkts);
76688+ atomic_inc_unchecked(&cp->in_pkts);
76689 goto out;
76690 }
76691
fe2de317
MT
76692diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
76693index 2d8158a..5dca296 100644
76694--- a/net/netfilter/nfnetlink_log.c
76695+++ b/net/netfilter/nfnetlink_log.c
8308f9c9
MT
76696@@ -70,7 +70,7 @@ struct nfulnl_instance {
76697 };
76698
76699 static DEFINE_SPINLOCK(instances_lock);
76700-static atomic_t global_seq;
76701+static atomic_unchecked_t global_seq;
76702
76703 #define INSTANCE_BUCKETS 16
76704 static struct hlist_head instance_table[INSTANCE_BUCKETS];
fe2de317 76705@@ -505,7 +505,7 @@ __build_packet_message(struct nfulnl_instance *inst,
8308f9c9
MT
76706 /* global sequence number */
76707 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
76708 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
76709- htonl(atomic_inc_return(&global_seq)));
76710+ htonl(atomic_inc_return_unchecked(&global_seq)));
76711
76712 if (data_len) {
76713 struct nlattr *nla;
fe2de317
MT
76714diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
76715new file mode 100644
76716index 0000000..6905327
76717--- /dev/null
76718+++ b/net/netfilter/xt_gradm.c
6892158b
MT
76719@@ -0,0 +1,51 @@
76720+/*
76721+ * gradm match for netfilter
76722