]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.2.2-3.2.2-201201302345.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-3.2.2-201201302345.patch
CommitLineData
add64c76
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index dfa6fc6..0095943 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -5,6 +5,7 @@
6 *.cis
7 *.cpio
8 *.csp
9+*.dbg
10 *.dsp
11 *.dvi
12 *.elf
13@@ -14,6 +15,7 @@
14 *.gcov
15 *.gen.S
16 *.gif
17+*.gmo
18 *.grep
19 *.grp
20 *.gz
21@@ -48,9 +50,11 @@
22 *.tab.h
23 *.tex
24 *.ver
25+*.vim
26 *.xml
27 *.xz
28 *_MODULES
29+*_reg_safe.h
30 *_vga16.c
31 *~
32 \#*#
33@@ -70,6 +74,7 @@ Kerntypes
34 Module.markers
35 Module.symvers
36 PENDING
37+PERF*
38 SCCS
39 System.map*
40 TAGS
41@@ -93,19 +98,24 @@ bounds.h
42 bsetup
43 btfixupprep
44 build
45+builtin-policy.h
46 bvmlinux
47 bzImage*
48 capability_names.h
49 capflags.c
50 classlist.h*
51+clut_vga16.c
52+common-cmds.h
53 comp*.log
54 compile.h*
55 conf
56 config
57 config-*
58 config_data.h*
59+config.c
60 config.mak
61 config.mak.autogen
62+config.tmp
63 conmakehash
64 consolemap_deftbl.c*
65 cpustr.h
66@@ -119,6 +129,7 @@ dslm
67 elf2ecoff
68 elfconfig.h*
69 evergreen_reg_safe.h
70+exception_policy.conf
71 fixdep
72 flask.h
73 fore200e_mkfirm
74@@ -126,12 +137,15 @@ fore200e_pca_fw.c*
75 gconf
76 gconf.glade.h
77 gen-devlist
78+gen-kdb_cmds.c
79 gen_crc32table
80 gen_init_cpio
81 generated
82 genheaders
83 genksyms
84 *_gray256.c
85+hash
86+hid-example
87 hpet_example
88 hugepage-mmap
89 hugepage-shm
90@@ -146,7 +160,7 @@ int32.c
91 int4.c
92 int8.c
93 kallsyms
94-kconfig
95+kern_constants.h
96 keywords.c
97 ksym.c*
98 ksym.h*
99@@ -154,7 +168,6 @@ kxgettext
100 lkc_defs.h
101 lex.c
102 lex.*.c
103-linux
104 logo_*.c
105 logo_*_clut224.c
106 logo_*_mono.c
107@@ -166,14 +179,15 @@ machtypes.h
108 map
109 map_hugetlb
110 maui_boot.h
111-media
112 mconf
113+mdp
114 miboot*
115 mk_elfconfig
116 mkboot
117 mkbugboot
118 mkcpustr
119 mkdep
120+mkpiggy
121 mkprep
122 mkregtable
123 mktables
124@@ -209,6 +223,7 @@ r300_reg_safe.h
125 r420_reg_safe.h
126 r600_reg_safe.h
127 recordmcount
128+regdb.c
129 relocs
130 rlim_names.h
131 rn50_reg_safe.h
132@@ -219,6 +234,7 @@ setup
133 setup.bin
134 setup.elf
135 sImage
136+slabinfo
137 sm_tbl*
138 split-include
139 syscalltab.h
140@@ -229,6 +245,7 @@ tftpboot.img
141 timeconst.h
142 times.h*
143 trix_boot.h
144+user_constants.h
145 utsrelease.h*
146 vdso-syms.lds
147 vdso.lds
148@@ -246,7 +263,9 @@ vmlinux
149 vmlinux-*
150 vmlinux.aout
151 vmlinux.bin.all
152+vmlinux.bin.bz2
153 vmlinux.lds
154+vmlinux.relocs
155 vmlinuz
156 voffset.h
157 vsyscall.lds
158@@ -254,9 +273,11 @@ vsyscall_32.lds
159 wanxlfw.inc
160 uImage
161 unifdef
162+utsrelease.h
163 wakeup.bin
164 wakeup.elf
165 wakeup.lds
166 zImage*
167 zconf.hash.c
168+zconf.lex.c
169 zoffset.h
170diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
171index 81c287f..d456d02 100644
172--- a/Documentation/kernel-parameters.txt
173+++ b/Documentation/kernel-parameters.txt
174@@ -1935,6 +1935,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
175 the specified number of seconds. This is to be used if
176 your oopses keep scrolling off the screen.
177
178+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
179+ virtualization environments that don't cope well with the
180+ expand down segment used by UDEREF on X86-32 or the frequent
181+ page table updates on X86-64.
182+
183+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
184+
185 pcbit= [HW,ISDN]
186
187 pcd. [PARIDE]
188diff --git a/Makefile b/Makefile
189index 2f684da..bf21f8d 100644
190--- a/Makefile
191+++ b/Makefile
192@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
193
194 HOSTCC = gcc
195 HOSTCXX = g++
196-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
197-HOSTCXXFLAGS = -O2
198+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
199+HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
200+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
201
202 # Decide whether to build built-in, modular, or both.
203 # Normally, just do built-in.
204@@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
205 # Rules shared between *config targets and build targets
206
207 # Basic helpers built in scripts/
208-PHONY += scripts_basic
209-scripts_basic:
210+PHONY += scripts_basic gcc-plugins
211+scripts_basic: gcc-plugins
212 $(Q)$(MAKE) $(build)=scripts/basic
213 $(Q)rm -f .tmp_quiet_recordmcount
214
215@@ -564,6 +565,46 @@ else
216 KBUILD_CFLAGS += -O2
217 endif
218
219+ifndef DISABLE_PAX_PLUGINS
220+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
221+ifndef DISABLE_PAX_CONSTIFY_PLUGIN
222+CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
223+endif
224+ifdef CONFIG_PAX_MEMORY_STACKLEAK
225+STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
226+STACKLEAK_PLUGIN += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
227+endif
228+ifdef CONFIG_KALLOCSTAT_PLUGIN
229+KALLOCSTAT_PLUGIN := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
230+endif
231+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
232+KERNEXEC_PLUGIN := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
233+KERNEXEC_PLUGIN += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD)
234+endif
235+ifdef CONFIG_CHECKER_PLUGIN
236+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
237+CHECKER_PLUGIN := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
238+endif
239+endif
240+GCC_PLUGINS := $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN) $(KALLOCSTAT_PLUGIN) $(KERNEXEC_PLUGIN) $(CHECKER_PLUGIN)
241+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
242+ifeq ($(KBUILD_EXTMOD),)
243+gcc-plugins:
244+ $(Q)$(MAKE) $(build)=tools/gcc
245+else
246+gcc-plugins: ;
247+endif
248+else
249+gcc-plugins:
250+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
251+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
252+else
253+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
254+endif
255+ $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
256+endif
257+endif
258+
259 include $(srctree)/arch/$(SRCARCH)/Makefile
260
261 ifneq ($(CONFIG_FRAME_WARN),0)
262@@ -708,7 +749,7 @@ export mod_strip_cmd
263
264
265 ifeq ($(KBUILD_EXTMOD),)
266-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
267+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
268
269 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
270 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
271@@ -932,6 +973,7 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
272
273 # The actual objects are generated when descending,
274 # make sure no implicit rule kicks in
275+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS)
276 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
277
278 # Handle descending into subdirectories listed in $(vmlinux-dirs)
279@@ -941,7 +983,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
280 # Error messages still appears in the original language
281
282 PHONY += $(vmlinux-dirs)
283-$(vmlinux-dirs): prepare scripts
284+$(vmlinux-dirs): gcc-plugins prepare scripts
285 $(Q)$(MAKE) $(build)=$@
286
287 # Store (new) KERNELRELASE string in include/config/kernel.release
288@@ -985,6 +1027,7 @@ prepare0: archprepare FORCE
289 $(Q)$(MAKE) $(build)=.
290
291 # All the preparing..
292+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS),$(KBUILD_CFLAGS))
293 prepare: prepare0
294
295 # Generate some files
296@@ -1086,6 +1129,7 @@ all: modules
297 # using awk while concatenating to the final file.
298
299 PHONY += modules
300+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
301 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
302 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
303 @$(kecho) ' Building modules, stage 2.';
304@@ -1101,7 +1145,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
305
306 # Target to prepare building external modules
307 PHONY += modules_prepare
308-modules_prepare: prepare scripts
309+modules_prepare: gcc-plugins prepare scripts
310
311 # Target to install modules
312 PHONY += modules_install
313@@ -1198,6 +1242,7 @@ distclean: mrproper
314 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
315 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
316 -o -name '.*.rej' \
317+ -o -name '.*.rej' -o -name '*.so' \
318 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
319 -type f -print | xargs rm -f
320
321@@ -1358,6 +1403,7 @@ PHONY += $(module-dirs) modules
322 $(module-dirs): crmodverdir $(objtree)/Module.symvers
323 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
324
325+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
326 modules: $(module-dirs)
327 @$(kecho) ' Building modules, stage 2.';
328 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
329@@ -1484,17 +1530,19 @@ else
330 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
331 endif
332
333-%.s: %.c prepare scripts FORCE
334+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS)
335+%.s: %.c gcc-plugins prepare scripts FORCE
336 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
337 %.i: %.c prepare scripts FORCE
338 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
339-%.o: %.c prepare scripts FORCE
340+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS)
341+%.o: %.c gcc-plugins prepare scripts FORCE
342 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
343 %.lst: %.c prepare scripts FORCE
344 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
345-%.s: %.S prepare scripts FORCE
346+%.s: %.S gcc-plugins prepare scripts FORCE
347 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
348-%.o: %.S prepare scripts FORCE
349+%.o: %.S gcc-plugins prepare scripts FORCE
350 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
351 %.symtypes: %.c prepare scripts FORCE
352 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
353@@ -1504,11 +1552,13 @@ endif
354 $(cmd_crmodverdir)
355 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
356 $(build)=$(build-dir)
357-%/: prepare scripts FORCE
358+%/: KBUILD_CFLAGS += $(GCC_PLUGINS)
359+%/: gcc-plugins prepare scripts FORCE
360 $(cmd_crmodverdir)
361 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
362 $(build)=$(build-dir)
363-%.ko: prepare scripts FORCE
364+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS)
365+%.ko: gcc-plugins prepare scripts FORCE
366 $(cmd_crmodverdir)
367 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
368 $(build)=$(build-dir) $(@:.ko=.o)
369diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
370index da5449e..7418343 100644
371--- a/arch/alpha/include/asm/elf.h
372+++ b/arch/alpha/include/asm/elf.h
373@@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
374
375 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
376
377+#ifdef CONFIG_PAX_ASLR
378+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
379+
380+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
381+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
382+#endif
383+
384 /* $0 is set by ld.so to a pointer to a function which might be
385 registered using atexit. This provides a mean for the dynamic
386 linker to call DT_FINI functions for shared libraries that have
387diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
388index de98a73..bd4f1f8 100644
389--- a/arch/alpha/include/asm/pgtable.h
390+++ b/arch/alpha/include/asm/pgtable.h
391@@ -101,6 +101,17 @@ struct vm_area_struct;
392 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
393 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
394 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
395+
396+#ifdef CONFIG_PAX_PAGEEXEC
397+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
398+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
399+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
400+#else
401+# define PAGE_SHARED_NOEXEC PAGE_SHARED
402+# define PAGE_COPY_NOEXEC PAGE_COPY
403+# define PAGE_READONLY_NOEXEC PAGE_READONLY
404+#endif
405+
406 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
407
408 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
409diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
410index 2fd00b7..cfd5069 100644
411--- a/arch/alpha/kernel/module.c
412+++ b/arch/alpha/kernel/module.c
413@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
414
415 /* The small sections were sorted to the end of the segment.
416 The following should definitely cover them. */
417- gp = (u64)me->module_core + me->core_size - 0x8000;
418+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
419 got = sechdrs[me->arch.gotsecindex].sh_addr;
420
421 for (i = 0; i < n; i++) {
422diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
423index 01e8715..be0e80f 100644
424--- a/arch/alpha/kernel/osf_sys.c
425+++ b/arch/alpha/kernel/osf_sys.c
426@@ -1147,7 +1147,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
427 /* At this point: (!vma || addr < vma->vm_end). */
428 if (limit - len < addr)
429 return -ENOMEM;
430- if (!vma || addr + len <= vma->vm_start)
431+ if (check_heap_stack_gap(vma, addr, len))
432 return addr;
433 addr = vma->vm_end;
434 vma = vma->vm_next;
435@@ -1183,6 +1183,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
436 merely specific addresses, but regions of memory -- perhaps
437 this feature should be incorporated into all ports? */
438
439+#ifdef CONFIG_PAX_RANDMMAP
440+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
441+#endif
442+
443 if (addr) {
444 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
445 if (addr != (unsigned long) -ENOMEM)
446@@ -1190,8 +1194,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
447 }
448
449 /* Next, try allocating at TASK_UNMAPPED_BASE. */
450- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
451- len, limit);
452+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
453+
454 if (addr != (unsigned long) -ENOMEM)
455 return addr;
456
457diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
458index fadd5f8..904e73a 100644
459--- a/arch/alpha/mm/fault.c
460+++ b/arch/alpha/mm/fault.c
461@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
462 __reload_thread(pcb);
463 }
464
465+#ifdef CONFIG_PAX_PAGEEXEC
466+/*
467+ * PaX: decide what to do with offenders (regs->pc = fault address)
468+ *
469+ * returns 1 when task should be killed
470+ * 2 when patched PLT trampoline was detected
471+ * 3 when unpatched PLT trampoline was detected
472+ */
473+static int pax_handle_fetch_fault(struct pt_regs *regs)
474+{
475+
476+#ifdef CONFIG_PAX_EMUPLT
477+ int err;
478+
479+ do { /* PaX: patched PLT emulation #1 */
480+ unsigned int ldah, ldq, jmp;
481+
482+ err = get_user(ldah, (unsigned int *)regs->pc);
483+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
484+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
485+
486+ if (err)
487+ break;
488+
489+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
490+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
491+ jmp == 0x6BFB0000U)
492+ {
493+ unsigned long r27, addr;
494+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
495+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
496+
497+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
498+ err = get_user(r27, (unsigned long *)addr);
499+ if (err)
500+ break;
501+
502+ regs->r27 = r27;
503+ regs->pc = r27;
504+ return 2;
505+ }
506+ } while (0);
507+
508+ do { /* PaX: patched PLT emulation #2 */
509+ unsigned int ldah, lda, br;
510+
511+ err = get_user(ldah, (unsigned int *)regs->pc);
512+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
513+ err |= get_user(br, (unsigned int *)(regs->pc+8));
514+
515+ if (err)
516+ break;
517+
518+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
519+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
520+ (br & 0xFFE00000U) == 0xC3E00000U)
521+ {
522+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
523+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
524+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
525+
526+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
527+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
528+ return 2;
529+ }
530+ } while (0);
531+
532+ do { /* PaX: unpatched PLT emulation */
533+ unsigned int br;
534+
535+ err = get_user(br, (unsigned int *)regs->pc);
536+
537+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
538+ unsigned int br2, ldq, nop, jmp;
539+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
540+
541+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
542+ err = get_user(br2, (unsigned int *)addr);
543+ err |= get_user(ldq, (unsigned int *)(addr+4));
544+ err |= get_user(nop, (unsigned int *)(addr+8));
545+ err |= get_user(jmp, (unsigned int *)(addr+12));
546+ err |= get_user(resolver, (unsigned long *)(addr+16));
547+
548+ if (err)
549+ break;
550+
551+ if (br2 == 0xC3600000U &&
552+ ldq == 0xA77B000CU &&
553+ nop == 0x47FF041FU &&
554+ jmp == 0x6B7B0000U)
555+ {
556+ regs->r28 = regs->pc+4;
557+ regs->r27 = addr+16;
558+ regs->pc = resolver;
559+ return 3;
560+ }
561+ }
562+ } while (0);
563+#endif
564+
565+ return 1;
566+}
567+
568+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
569+{
570+ unsigned long i;
571+
572+ printk(KERN_ERR "PAX: bytes at PC: ");
573+ for (i = 0; i < 5; i++) {
574+ unsigned int c;
575+ if (get_user(c, (unsigned int *)pc+i))
576+ printk(KERN_CONT "???????? ");
577+ else
578+ printk(KERN_CONT "%08x ", c);
579+ }
580+ printk("\n");
581+}
582+#endif
583
584 /*
585 * This routine handles page faults. It determines the address,
586@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
587 good_area:
588 si_code = SEGV_ACCERR;
589 if (cause < 0) {
590- if (!(vma->vm_flags & VM_EXEC))
591+ if (!(vma->vm_flags & VM_EXEC)) {
592+
593+#ifdef CONFIG_PAX_PAGEEXEC
594+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
595+ goto bad_area;
596+
597+ up_read(&mm->mmap_sem);
598+ switch (pax_handle_fetch_fault(regs)) {
599+
600+#ifdef CONFIG_PAX_EMUPLT
601+ case 2:
602+ case 3:
603+ return;
604+#endif
605+
606+ }
607+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
608+ do_group_exit(SIGKILL);
609+#else
610 goto bad_area;
611+#endif
612+
613+ }
614 } else if (!cause) {
615 /* Allow reads even for write-only mappings */
616 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
617diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
618index 86976d0..8a57797 100644
619--- a/arch/arm/include/asm/atomic.h
620+++ b/arch/arm/include/asm/atomic.h
621@@ -239,6 +239,14 @@ typedef struct {
622 u64 __aligned(8) counter;
623 } atomic64_t;
624
625+#ifdef CONFIG_PAX_REFCOUNT
626+typedef struct {
627+ u64 __aligned(8) counter;
628+} atomic64_unchecked_t;
629+#else
630+typedef atomic64_t atomic64_unchecked_t;
631+#endif
632+
633 #define ATOMIC64_INIT(i) { (i) }
634
635 static inline u64 atomic64_read(atomic64_t *v)
636diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
637index 0e9ce8d..6ef1e03 100644
638--- a/arch/arm/include/asm/elf.h
639+++ b/arch/arm/include/asm/elf.h
640@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
641 the loader. We need to make sure that it is out of the way of the program
642 that it will "exec", and that there is sufficient room for the brk. */
643
644-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
645+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
646+
647+#ifdef CONFIG_PAX_ASLR
648+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
649+
650+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
651+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
652+#endif
653
654 /* When the program starts, a1 contains a pointer to a function to be
655 registered with atexit, as per the SVR4 ABI. A value of 0 means we
656@@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
657 extern void elf_set_personality(const struct elf32_hdr *);
658 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
659
660-struct mm_struct;
661-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
662-#define arch_randomize_brk arch_randomize_brk
663-
664 extern int vectors_user_mapping(void);
665 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
666 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
667diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
668index e51b1e8..32a3113 100644
669--- a/arch/arm/include/asm/kmap_types.h
670+++ b/arch/arm/include/asm/kmap_types.h
671@@ -21,6 +21,7 @@ enum km_type {
672 KM_L1_CACHE,
673 KM_L2_CACHE,
674 KM_KDB,
675+ KM_CLEARPAGE,
676 KM_TYPE_NR
677 };
678
679diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
680index b293616..96310e5 100644
681--- a/arch/arm/include/asm/uaccess.h
682+++ b/arch/arm/include/asm/uaccess.h
683@@ -22,6 +22,8 @@
684 #define VERIFY_READ 0
685 #define VERIFY_WRITE 1
686
687+extern void check_object_size(const void *ptr, unsigned long n, bool to);
688+
689 /*
690 * The exception table consists of pairs of addresses: the first is the
691 * address of an instruction that is allowed to fault, and the second is
692@@ -387,8 +389,23 @@ do { \
693
694
695 #ifdef CONFIG_MMU
696-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
697-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
698+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
699+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
700+
701+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
702+{
703+ if (!__builtin_constant_p(n))
704+ check_object_size(to, n, false);
705+ return ___copy_from_user(to, from, n);
706+}
707+
708+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
709+{
710+ if (!__builtin_constant_p(n))
711+ check_object_size(from, n, true);
712+ return ___copy_to_user(to, from, n);
713+}
714+
715 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
716 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
717 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
718@@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
719
720 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
721 {
722+ if ((long)n < 0)
723+ return n;
724+
725 if (access_ok(VERIFY_READ, from, n))
726 n = __copy_from_user(to, from, n);
727 else /* security hole - plug it */
728@@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
729
730 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
731 {
732+ if ((long)n < 0)
733+ return n;
734+
735 if (access_ok(VERIFY_WRITE, to, n))
736 n = __copy_to_user(to, from, n);
737 return n;
738diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
739index 5b0bce6..becd81c 100644
740--- a/arch/arm/kernel/armksyms.c
741+++ b/arch/arm/kernel/armksyms.c
742@@ -95,8 +95,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
743 #ifdef CONFIG_MMU
744 EXPORT_SYMBOL(copy_page);
745
746-EXPORT_SYMBOL(__copy_from_user);
747-EXPORT_SYMBOL(__copy_to_user);
748+EXPORT_SYMBOL(___copy_from_user);
749+EXPORT_SYMBOL(___copy_to_user);
750 EXPORT_SYMBOL(__clear_user);
751
752 EXPORT_SYMBOL(__get_user_1);
753diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
754index 3d0c6fb..3dcae52 100644
755--- a/arch/arm/kernel/process.c
756+++ b/arch/arm/kernel/process.c
757@@ -28,7 +28,6 @@
758 #include <linux/tick.h>
759 #include <linux/utsname.h>
760 #include <linux/uaccess.h>
761-#include <linux/random.h>
762 #include <linux/hw_breakpoint.h>
763 #include <linux/cpuidle.h>
764
765@@ -484,12 +483,6 @@ unsigned long get_wchan(struct task_struct *p)
766 return 0;
767 }
768
769-unsigned long arch_randomize_brk(struct mm_struct *mm)
770-{
771- unsigned long range_end = mm->brk + 0x02000000;
772- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
773-}
774-
775 #ifdef CONFIG_MMU
776 /*
777 * The vectors page is always readable from user space for the
778diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
779index 99a5727..a3d5bb1 100644
780--- a/arch/arm/kernel/traps.c
781+++ b/arch/arm/kernel/traps.c
782@@ -259,6 +259,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
783
784 static DEFINE_RAW_SPINLOCK(die_lock);
785
786+extern void gr_handle_kernel_exploit(void);
787+
788 /*
789 * This function is protected against re-entrancy.
790 */
791@@ -288,6 +290,9 @@ void die(const char *str, struct pt_regs *regs, int err)
792 panic("Fatal exception in interrupt");
793 if (panic_on_oops)
794 panic("Fatal exception");
795+
796+ gr_handle_kernel_exploit();
797+
798 if (ret != NOTIFY_STOP)
799 do_exit(SIGSEGV);
800 }
801diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
802index 66a477a..bee61d3 100644
803--- a/arch/arm/lib/copy_from_user.S
804+++ b/arch/arm/lib/copy_from_user.S
805@@ -16,7 +16,7 @@
806 /*
807 * Prototype:
808 *
809- * size_t __copy_from_user(void *to, const void *from, size_t n)
810+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
811 *
812 * Purpose:
813 *
814@@ -84,11 +84,11 @@
815
816 .text
817
818-ENTRY(__copy_from_user)
819+ENTRY(___copy_from_user)
820
821 #include "copy_template.S"
822
823-ENDPROC(__copy_from_user)
824+ENDPROC(___copy_from_user)
825
826 .pushsection .fixup,"ax"
827 .align 0
828diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
829index d066df6..df28194 100644
830--- a/arch/arm/lib/copy_to_user.S
831+++ b/arch/arm/lib/copy_to_user.S
832@@ -16,7 +16,7 @@
833 /*
834 * Prototype:
835 *
836- * size_t __copy_to_user(void *to, const void *from, size_t n)
837+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
838 *
839 * Purpose:
840 *
841@@ -88,11 +88,11 @@
842 .text
843
844 ENTRY(__copy_to_user_std)
845-WEAK(__copy_to_user)
846+WEAK(___copy_to_user)
847
848 #include "copy_template.S"
849
850-ENDPROC(__copy_to_user)
851+ENDPROC(___copy_to_user)
852 ENDPROC(__copy_to_user_std)
853
854 .pushsection .fixup,"ax"
855diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
856index d0ece2a..5ae2f39 100644
857--- a/arch/arm/lib/uaccess.S
858+++ b/arch/arm/lib/uaccess.S
859@@ -20,7 +20,7 @@
860
861 #define PAGE_SHIFT 12
862
863-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
864+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
865 * Purpose : copy a block to user memory from kernel memory
866 * Params : to - user memory
867 * : from - kernel memory
868@@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May fault
869 sub r2, r2, ip
870 b .Lc2u_dest_aligned
871
872-ENTRY(__copy_to_user)
873+ENTRY(___copy_to_user)
874 stmfd sp!, {r2, r4 - r7, lr}
875 cmp r2, #4
876 blt .Lc2u_not_enough
877@@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May fault
878 ldrgtb r3, [r1], #0
879 USER( T(strgtb) r3, [r0], #1) @ May fault
880 b .Lc2u_finished
881-ENDPROC(__copy_to_user)
882+ENDPROC(___copy_to_user)
883
884 .pushsection .fixup,"ax"
885 .align 0
886 9001: ldmfd sp!, {r0, r4 - r7, pc}
887 .popsection
888
889-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
890+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
891 * Purpose : copy a block from user memory to kernel memory
892 * Params : to - kernel memory
893 * : from - user memory
894@@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May fault
895 sub r2, r2, ip
896 b .Lcfu_dest_aligned
897
898-ENTRY(__copy_from_user)
899+ENTRY(___copy_from_user)
900 stmfd sp!, {r0, r2, r4 - r7, lr}
901 cmp r2, #4
902 blt .Lcfu_not_enough
903@@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May fault
904 USER( T(ldrgtb) r3, [r1], #1) @ May fault
905 strgtb r3, [r0], #1
906 b .Lcfu_finished
907-ENDPROC(__copy_from_user)
908+ENDPROC(___copy_from_user)
909
910 .pushsection .fixup,"ax"
911 .align 0
912diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
913index 025f742..8432b08 100644
914--- a/arch/arm/lib/uaccess_with_memcpy.c
915+++ b/arch/arm/lib/uaccess_with_memcpy.c
916@@ -104,7 +104,7 @@ out:
917 }
918
919 unsigned long
920-__copy_to_user(void __user *to, const void *from, unsigned long n)
921+___copy_to_user(void __user *to, const void *from, unsigned long n)
922 {
923 /*
924 * This test is stubbed out of the main function above to keep
925diff --git a/arch/arm/mach-ux500/mbox-db5500.c b/arch/arm/mach-ux500/mbox-db5500.c
926index 2b2d51c..0127490 100644
927--- a/arch/arm/mach-ux500/mbox-db5500.c
928+++ b/arch/arm/mach-ux500/mbox-db5500.c
929@@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct device *dev,
930 return sprintf(buf, "0x%X\n", mbox_value);
931 }
932
933-static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
934+static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
935
936 static int mbox_show(struct seq_file *s, void *data)
937 {
938diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
939index aa33949..b242a2f 100644
940--- a/arch/arm/mm/fault.c
941+++ b/arch/arm/mm/fault.c
942@@ -183,6 +183,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
943 }
944 #endif
945
946+#ifdef CONFIG_PAX_PAGEEXEC
947+ if (fsr & FSR_LNX_PF) {
948+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
949+ do_group_exit(SIGKILL);
950+ }
951+#endif
952+
953 tsk->thread.address = addr;
954 tsk->thread.error_code = fsr;
955 tsk->thread.trap_no = 14;
956@@ -384,6 +391,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
957 }
958 #endif /* CONFIG_MMU */
959
960+#ifdef CONFIG_PAX_PAGEEXEC
961+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
962+{
963+ long i;
964+
965+ printk(KERN_ERR "PAX: bytes at PC: ");
966+ for (i = 0; i < 20; i++) {
967+ unsigned char c;
968+ if (get_user(c, (__force unsigned char __user *)pc+i))
969+ printk(KERN_CONT "?? ");
970+ else
971+ printk(KERN_CONT "%02x ", c);
972+ }
973+ printk("\n");
974+
975+ printk(KERN_ERR "PAX: bytes at SP-4: ");
976+ for (i = -1; i < 20; i++) {
977+ unsigned long c;
978+ if (get_user(c, (__force unsigned long __user *)sp+i))
979+ printk(KERN_CONT "???????? ");
980+ else
981+ printk(KERN_CONT "%08lx ", c);
982+ }
983+ printk("\n");
984+}
985+#endif
986+
987 /*
988 * First Level Translation Fault Handler
989 *
990diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
991index 44b628e..623ee2a 100644
992--- a/arch/arm/mm/mmap.c
993+++ b/arch/arm/mm/mmap.c
994@@ -54,6 +54,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
995 if (len > TASK_SIZE)
996 return -ENOMEM;
997
998+#ifdef CONFIG_PAX_RANDMMAP
999+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1000+#endif
1001+
1002 if (addr) {
1003 if (do_align)
1004 addr = COLOUR_ALIGN(addr, pgoff);
1005@@ -61,15 +65,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1006 addr = PAGE_ALIGN(addr);
1007
1008 vma = find_vma(mm, addr);
1009- if (TASK_SIZE - len >= addr &&
1010- (!vma || addr + len <= vma->vm_start))
1011+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1012 return addr;
1013 }
1014 if (len > mm->cached_hole_size) {
1015- start_addr = addr = mm->free_area_cache;
1016+ start_addr = addr = mm->free_area_cache;
1017 } else {
1018- start_addr = addr = TASK_UNMAPPED_BASE;
1019- mm->cached_hole_size = 0;
1020+ start_addr = addr = mm->mmap_base;
1021+ mm->cached_hole_size = 0;
1022 }
1023 /* 8 bits of randomness in 20 address space bits */
1024 if ((current->flags & PF_RANDOMIZE) &&
1025@@ -89,14 +92,14 @@ full_search:
1026 * Start a new search - just in case we missed
1027 * some holes.
1028 */
1029- if (start_addr != TASK_UNMAPPED_BASE) {
1030- start_addr = addr = TASK_UNMAPPED_BASE;
1031+ if (start_addr != mm->mmap_base) {
1032+ start_addr = addr = mm->mmap_base;
1033 mm->cached_hole_size = 0;
1034 goto full_search;
1035 }
1036 return -ENOMEM;
1037 }
1038- if (!vma || addr + len <= vma->vm_start) {
1039+ if (check_heap_stack_gap(vma, addr, len)) {
1040 /*
1041 * Remember the place where we stopped the search:
1042 */
1043diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1044index 3b3159b..425ea94 100644
1045--- a/arch/avr32/include/asm/elf.h
1046+++ b/arch/avr32/include/asm/elf.h
1047@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
1048 the loader. We need to make sure that it is out of the way of the program
1049 that it will "exec", and that there is sufficient room for the brk. */
1050
1051-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1052+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1053
1054+#ifdef CONFIG_PAX_ASLR
1055+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1056+
1057+#define PAX_DELTA_MMAP_LEN 15
1058+#define PAX_DELTA_STACK_LEN 15
1059+#endif
1060
1061 /* This yields a mask that user programs can use to figure out what
1062 instruction set this CPU supports. This could be done in user space,
1063diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1064index b7f5c68..556135c 100644
1065--- a/arch/avr32/include/asm/kmap_types.h
1066+++ b/arch/avr32/include/asm/kmap_types.h
1067@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1068 D(11) KM_IRQ1,
1069 D(12) KM_SOFTIRQ0,
1070 D(13) KM_SOFTIRQ1,
1071-D(14) KM_TYPE_NR
1072+D(14) KM_CLEARPAGE,
1073+D(15) KM_TYPE_NR
1074 };
1075
1076 #undef D
1077diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
1078index f7040a1..db9f300 100644
1079--- a/arch/avr32/mm/fault.c
1080+++ b/arch/avr32/mm/fault.c
1081@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
1082
1083 int exception_trace = 1;
1084
1085+#ifdef CONFIG_PAX_PAGEEXEC
1086+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1087+{
1088+ unsigned long i;
1089+
1090+ printk(KERN_ERR "PAX: bytes at PC: ");
1091+ for (i = 0; i < 20; i++) {
1092+ unsigned char c;
1093+ if (get_user(c, (unsigned char *)pc+i))
1094+ printk(KERN_CONT "???????? ");
1095+ else
1096+ printk(KERN_CONT "%02x ", c);
1097+ }
1098+ printk("\n");
1099+}
1100+#endif
1101+
1102 /*
1103 * This routine handles page faults. It determines the address and the
1104 * problem, and then passes it off to one of the appropriate routines.
1105@@ -156,6 +173,16 @@ bad_area:
1106 up_read(&mm->mmap_sem);
1107
1108 if (user_mode(regs)) {
1109+
1110+#ifdef CONFIG_PAX_PAGEEXEC
1111+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1112+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1113+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1114+ do_group_exit(SIGKILL);
1115+ }
1116+ }
1117+#endif
1118+
1119 if (exception_trace && printk_ratelimit())
1120 printk("%s%s[%d]: segfault at %08lx pc %08lx "
1121 "sp %08lx ecr %lu\n",
1122diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
1123index f8e16b2..c73ff79 100644
1124--- a/arch/frv/include/asm/kmap_types.h
1125+++ b/arch/frv/include/asm/kmap_types.h
1126@@ -23,6 +23,7 @@ enum km_type {
1127 KM_IRQ1,
1128 KM_SOFTIRQ0,
1129 KM_SOFTIRQ1,
1130+ KM_CLEARPAGE,
1131 KM_TYPE_NR
1132 };
1133
1134diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
1135index 385fd30..6c3d97e 100644
1136--- a/arch/frv/mm/elf-fdpic.c
1137+++ b/arch/frv/mm/elf-fdpic.c
1138@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1139 if (addr) {
1140 addr = PAGE_ALIGN(addr);
1141 vma = find_vma(current->mm, addr);
1142- if (TASK_SIZE - len >= addr &&
1143- (!vma || addr + len <= vma->vm_start))
1144+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1145 goto success;
1146 }
1147
1148@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1149 for (; vma; vma = vma->vm_next) {
1150 if (addr > limit)
1151 break;
1152- if (addr + len <= vma->vm_start)
1153+ if (check_heap_stack_gap(vma, addr, len))
1154 goto success;
1155 addr = vma->vm_end;
1156 }
1157@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1158 for (; vma; vma = vma->vm_next) {
1159 if (addr > limit)
1160 break;
1161- if (addr + len <= vma->vm_start)
1162+ if (check_heap_stack_gap(vma, addr, len))
1163 goto success;
1164 addr = vma->vm_end;
1165 }
1166diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
1167index b5298eb..67c6e62 100644
1168--- a/arch/ia64/include/asm/elf.h
1169+++ b/arch/ia64/include/asm/elf.h
1170@@ -42,6 +42,13 @@
1171 */
1172 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
1173
1174+#ifdef CONFIG_PAX_ASLR
1175+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1176+
1177+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1178+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1179+#endif
1180+
1181 #define PT_IA_64_UNWIND 0x70000001
1182
1183 /* IA-64 relocations: */
1184diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
1185index 1a97af3..7529d31 100644
1186--- a/arch/ia64/include/asm/pgtable.h
1187+++ b/arch/ia64/include/asm/pgtable.h
1188@@ -12,7 +12,7 @@
1189 * David Mosberger-Tang <davidm@hpl.hp.com>
1190 */
1191
1192-
1193+#include <linux/const.h>
1194 #include <asm/mman.h>
1195 #include <asm/page.h>
1196 #include <asm/processor.h>
1197@@ -143,6 +143,17 @@
1198 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1199 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1200 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1201+
1202+#ifdef CONFIG_PAX_PAGEEXEC
1203+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1204+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1205+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1206+#else
1207+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1208+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1209+# define PAGE_COPY_NOEXEC PAGE_COPY
1210+#endif
1211+
1212 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1213 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1214 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1215diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
1216index b77768d..e0795eb 100644
1217--- a/arch/ia64/include/asm/spinlock.h
1218+++ b/arch/ia64/include/asm/spinlock.h
1219@@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
1220 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1221
1222 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1223- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1224+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1225 }
1226
1227 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
1228diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
1229index 449c8c0..432a3d2 100644
1230--- a/arch/ia64/include/asm/uaccess.h
1231+++ b/arch/ia64/include/asm/uaccess.h
1232@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1233 const void *__cu_from = (from); \
1234 long __cu_len = (n); \
1235 \
1236- if (__access_ok(__cu_to, __cu_len, get_fs())) \
1237+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1238 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1239 __cu_len; \
1240 })
1241@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1242 long __cu_len = (n); \
1243 \
1244 __chk_user_ptr(__cu_from); \
1245- if (__access_ok(__cu_from, __cu_len, get_fs())) \
1246+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1247 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1248 __cu_len; \
1249 })
1250diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
1251index 24603be..948052d 100644
1252--- a/arch/ia64/kernel/module.c
1253+++ b/arch/ia64/kernel/module.c
1254@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
1255 void
1256 module_free (struct module *mod, void *module_region)
1257 {
1258- if (mod && mod->arch.init_unw_table &&
1259- module_region == mod->module_init) {
1260+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1261 unw_remove_unwind_table(mod->arch.init_unw_table);
1262 mod->arch.init_unw_table = NULL;
1263 }
1264@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
1265 }
1266
1267 static inline int
1268+in_init_rx (const struct module *mod, uint64_t addr)
1269+{
1270+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1271+}
1272+
1273+static inline int
1274+in_init_rw (const struct module *mod, uint64_t addr)
1275+{
1276+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1277+}
1278+
1279+static inline int
1280 in_init (const struct module *mod, uint64_t addr)
1281 {
1282- return addr - (uint64_t) mod->module_init < mod->init_size;
1283+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1284+}
1285+
1286+static inline int
1287+in_core_rx (const struct module *mod, uint64_t addr)
1288+{
1289+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1290+}
1291+
1292+static inline int
1293+in_core_rw (const struct module *mod, uint64_t addr)
1294+{
1295+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1296 }
1297
1298 static inline int
1299 in_core (const struct module *mod, uint64_t addr)
1300 {
1301- return addr - (uint64_t) mod->module_core < mod->core_size;
1302+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1303 }
1304
1305 static inline int
1306@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
1307 break;
1308
1309 case RV_BDREL:
1310- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1311+ if (in_init_rx(mod, val))
1312+ val -= (uint64_t) mod->module_init_rx;
1313+ else if (in_init_rw(mod, val))
1314+ val -= (uint64_t) mod->module_init_rw;
1315+ else if (in_core_rx(mod, val))
1316+ val -= (uint64_t) mod->module_core_rx;
1317+ else if (in_core_rw(mod, val))
1318+ val -= (uint64_t) mod->module_core_rw;
1319 break;
1320
1321 case RV_LTV:
1322@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
1323 * addresses have been selected...
1324 */
1325 uint64_t gp;
1326- if (mod->core_size > MAX_LTOFF)
1327+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1328 /*
1329 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1330 * at the end of the module.
1331 */
1332- gp = mod->core_size - MAX_LTOFF / 2;
1333+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1334 else
1335- gp = mod->core_size / 2;
1336- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1337+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1338+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1339 mod->arch.gp = gp;
1340 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1341 }
1342diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
1343index 609d500..7dde2a8 100644
1344--- a/arch/ia64/kernel/sys_ia64.c
1345+++ b/arch/ia64/kernel/sys_ia64.c
1346@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1347 if (REGION_NUMBER(addr) == RGN_HPAGE)
1348 addr = 0;
1349 #endif
1350+
1351+#ifdef CONFIG_PAX_RANDMMAP
1352+ if (mm->pax_flags & MF_PAX_RANDMMAP)
1353+ addr = mm->free_area_cache;
1354+ else
1355+#endif
1356+
1357 if (!addr)
1358 addr = mm->free_area_cache;
1359
1360@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1361 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1362 /* At this point: (!vma || addr < vma->vm_end). */
1363 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1364- if (start_addr != TASK_UNMAPPED_BASE) {
1365+ if (start_addr != mm->mmap_base) {
1366 /* Start a new search --- just in case we missed some holes. */
1367- addr = TASK_UNMAPPED_BASE;
1368+ addr = mm->mmap_base;
1369 goto full_search;
1370 }
1371 return -ENOMEM;
1372 }
1373- if (!vma || addr + len <= vma->vm_start) {
1374+ if (check_heap_stack_gap(vma, addr, len)) {
1375 /* Remember the address where we stopped this search: */
1376 mm->free_area_cache = addr + len;
1377 return addr;
1378diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
1379index 53c0ba0..2accdde 100644
1380--- a/arch/ia64/kernel/vmlinux.lds.S
1381+++ b/arch/ia64/kernel/vmlinux.lds.S
1382@@ -199,7 +199,7 @@ SECTIONS {
1383 /* Per-cpu data: */
1384 . = ALIGN(PERCPU_PAGE_SIZE);
1385 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
1386- __phys_per_cpu_start = __per_cpu_load;
1387+ __phys_per_cpu_start = per_cpu_load;
1388 /*
1389 * ensure percpu data fits
1390 * into percpu page size
1391diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
1392index 20b3593..1ce77f0 100644
1393--- a/arch/ia64/mm/fault.c
1394+++ b/arch/ia64/mm/fault.c
1395@@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned long address)
1396 return pte_present(pte);
1397 }
1398
1399+#ifdef CONFIG_PAX_PAGEEXEC
1400+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1401+{
1402+ unsigned long i;
1403+
1404+ printk(KERN_ERR "PAX: bytes at PC: ");
1405+ for (i = 0; i < 8; i++) {
1406+ unsigned int c;
1407+ if (get_user(c, (unsigned int *)pc+i))
1408+ printk(KERN_CONT "???????? ");
1409+ else
1410+ printk(KERN_CONT "%08x ", c);
1411+ }
1412+ printk("\n");
1413+}
1414+#endif
1415+
1416 void __kprobes
1417 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1418 {
1419@@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
1420 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1421 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1422
1423- if ((vma->vm_flags & mask) != mask)
1424+ if ((vma->vm_flags & mask) != mask) {
1425+
1426+#ifdef CONFIG_PAX_PAGEEXEC
1427+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1428+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1429+ goto bad_area;
1430+
1431+ up_read(&mm->mmap_sem);
1432+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1433+ do_group_exit(SIGKILL);
1434+ }
1435+#endif
1436+
1437 goto bad_area;
1438
1439+ }
1440+
1441 /*
1442 * If for any reason at all we couldn't handle the fault, make
1443 * sure we exit gracefully rather than endlessly redo the
1444diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
1445index 5ca674b..e0e1b70 100644
1446--- a/arch/ia64/mm/hugetlbpage.c
1447+++ b/arch/ia64/mm/hugetlbpage.c
1448@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
1449 /* At this point: (!vmm || addr < vmm->vm_end). */
1450 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1451 return -ENOMEM;
1452- if (!vmm || (addr + len) <= vmm->vm_start)
1453+ if (check_heap_stack_gap(vmm, addr, len))
1454 return addr;
1455 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1456 }
1457diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
1458index 00cb0e2..2ad8024 100644
1459--- a/arch/ia64/mm/init.c
1460+++ b/arch/ia64/mm/init.c
1461@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
1462 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1463 vma->vm_end = vma->vm_start + PAGE_SIZE;
1464 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1465+
1466+#ifdef CONFIG_PAX_PAGEEXEC
1467+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1468+ vma->vm_flags &= ~VM_EXEC;
1469+
1470+#ifdef CONFIG_PAX_MPROTECT
1471+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
1472+ vma->vm_flags &= ~VM_MAYEXEC;
1473+#endif
1474+
1475+ }
1476+#endif
1477+
1478 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1479 down_write(&current->mm->mmap_sem);
1480 if (insert_vm_struct(current->mm, vma)) {
1481diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
1482index 82abd15..d95ae5d 100644
1483--- a/arch/m32r/lib/usercopy.c
1484+++ b/arch/m32r/lib/usercopy.c
1485@@ -14,6 +14,9 @@
1486 unsigned long
1487 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1488 {
1489+ if ((long)n < 0)
1490+ return n;
1491+
1492 prefetch(from);
1493 if (access_ok(VERIFY_WRITE, to, n))
1494 __copy_user(to,from,n);
1495@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1496 unsigned long
1497 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1498 {
1499+ if ((long)n < 0)
1500+ return n;
1501+
1502 prefetchw(to);
1503 if (access_ok(VERIFY_READ, from, n))
1504 __copy_user_zeroing(to,from,n);
1505diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
1506index 455c0ac..ad65fbe 100644
1507--- a/arch/mips/include/asm/elf.h
1508+++ b/arch/mips/include/asm/elf.h
1509@@ -372,13 +372,16 @@ extern const char *__elf_platform;
1510 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1511 #endif
1512
1513+#ifdef CONFIG_PAX_ASLR
1514+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1515+
1516+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1517+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1518+#endif
1519+
1520 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1521 struct linux_binprm;
1522 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1523 int uses_interp);
1524
1525-struct mm_struct;
1526-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1527-#define arch_randomize_brk arch_randomize_brk
1528-
1529 #endif /* _ASM_ELF_H */
1530diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
1531index e59cd1a..8e329d6 100644
1532--- a/arch/mips/include/asm/page.h
1533+++ b/arch/mips/include/asm/page.h
1534@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
1535 #ifdef CONFIG_CPU_MIPS32
1536 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1537 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1538- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1539+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1540 #else
1541 typedef struct { unsigned long long pte; } pte_t;
1542 #define pte_val(x) ((x).pte)
1543diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
1544index 6018c80..7c37203 100644
1545--- a/arch/mips/include/asm/system.h
1546+++ b/arch/mips/include/asm/system.h
1547@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1548 */
1549 #define __ARCH_WANT_UNLOCKED_CTXSW
1550
1551-extern unsigned long arch_align_stack(unsigned long sp);
1552+#define arch_align_stack(x) ((x) & ~0xfUL)
1553
1554 #endif /* _ASM_SYSTEM_H */
1555diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
1556index 9fdd8bc..4bd7f1a 100644
1557--- a/arch/mips/kernel/binfmt_elfn32.c
1558+++ b/arch/mips/kernel/binfmt_elfn32.c
1559@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1560 #undef ELF_ET_DYN_BASE
1561 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1562
1563+#ifdef CONFIG_PAX_ASLR
1564+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1565+
1566+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1567+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1568+#endif
1569+
1570 #include <asm/processor.h>
1571 #include <linux/module.h>
1572 #include <linux/elfcore.h>
1573diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
1574index ff44823..97f8906 100644
1575--- a/arch/mips/kernel/binfmt_elfo32.c
1576+++ b/arch/mips/kernel/binfmt_elfo32.c
1577@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1578 #undef ELF_ET_DYN_BASE
1579 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1580
1581+#ifdef CONFIG_PAX_ASLR
1582+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1583+
1584+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1585+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1586+#endif
1587+
1588 #include <asm/processor.h>
1589
1590 /*
1591diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
1592index c47f96e..661d418 100644
1593--- a/arch/mips/kernel/process.c
1594+++ b/arch/mips/kernel/process.c
1595@@ -481,15 +481,3 @@ unsigned long get_wchan(struct task_struct *task)
1596 out:
1597 return pc;
1598 }
1599-
1600-/*
1601- * Don't forget that the stack pointer must be aligned on a 8 bytes
1602- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1603- */
1604-unsigned long arch_align_stack(unsigned long sp)
1605-{
1606- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1607- sp -= get_random_int() & ~PAGE_MASK;
1608-
1609- return sp & ALMASK;
1610-}
1611diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
1612index 937cf33..adb39bb 100644
1613--- a/arch/mips/mm/fault.c
1614+++ b/arch/mips/mm/fault.c
1615@@ -28,6 +28,23 @@
1616 #include <asm/highmem.h> /* For VMALLOC_END */
1617 #include <linux/kdebug.h>
1618
1619+#ifdef CONFIG_PAX_PAGEEXEC
1620+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1621+{
1622+ unsigned long i;
1623+
1624+ printk(KERN_ERR "PAX: bytes at PC: ");
1625+ for (i = 0; i < 5; i++) {
1626+ unsigned int c;
1627+ if (get_user(c, (unsigned int *)pc+i))
1628+ printk(KERN_CONT "???????? ");
1629+ else
1630+ printk(KERN_CONT "%08x ", c);
1631+ }
1632+ printk("\n");
1633+}
1634+#endif
1635+
1636 /*
1637 * This routine handles page faults. It determines the address,
1638 * and the problem, and then passes it off to one of the appropriate
1639diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
1640index 302d779..7d35bf8 100644
1641--- a/arch/mips/mm/mmap.c
1642+++ b/arch/mips/mm/mmap.c
1643@@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1644 do_color_align = 1;
1645
1646 /* requesting a specific address */
1647+
1648+#ifdef CONFIG_PAX_RANDMMAP
1649+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1650+#endif
1651+
1652 if (addr) {
1653 if (do_color_align)
1654 addr = COLOUR_ALIGN(addr, pgoff);
1655@@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1656 addr = PAGE_ALIGN(addr);
1657
1658 vma = find_vma(mm, addr);
1659- if (TASK_SIZE - len >= addr &&
1660- (!vma || addr + len <= vma->vm_start))
1661+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
1662 return addr;
1663 }
1664
1665@@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1666 /* At this point: (!vma || addr < vma->vm_end). */
1667 if (TASK_SIZE - len < addr)
1668 return -ENOMEM;
1669- if (!vma || addr + len <= vma->vm_start)
1670+ if (check_heap_stack_gap(vmm, addr, len))
1671 return addr;
1672 addr = vma->vm_end;
1673 if (do_color_align)
1674@@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1675 /* make sure it can fit in the remaining address space */
1676 if (likely(addr > len)) {
1677 vma = find_vma(mm, addr - len);
1678- if (!vma || addr <= vma->vm_start) {
1679+ if (check_heap_stack_gap(vmm, addr - len, len))
1680 /* cache the address as a hint for next time */
1681 return mm->free_area_cache = addr - len;
1682 }
1683@@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1684 * return with success:
1685 */
1686 vma = find_vma(mm, addr);
1687- if (likely(!vma || addr + len <= vma->vm_start)) {
1688+ if (check_heap_stack_gap(vmm, addr, len)) {
1689 /* cache the address as a hint for next time */
1690 return mm->free_area_cache = addr;
1691 }
1692@@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
1693 mm->unmap_area = arch_unmap_area_topdown;
1694 }
1695 }
1696-
1697-static inline unsigned long brk_rnd(void)
1698-{
1699- unsigned long rnd = get_random_int();
1700-
1701- rnd = rnd << PAGE_SHIFT;
1702- /* 8MB for 32bit, 256MB for 64bit */
1703- if (TASK_IS_32BIT_ADDR)
1704- rnd = rnd & 0x7ffffful;
1705- else
1706- rnd = rnd & 0xffffffful;
1707-
1708- return rnd;
1709-}
1710-
1711-unsigned long arch_randomize_brk(struct mm_struct *mm)
1712-{
1713- unsigned long base = mm->brk;
1714- unsigned long ret;
1715-
1716- ret = PAGE_ALIGN(base + brk_rnd());
1717-
1718- if (ret < mm->brk)
1719- return mm->brk;
1720-
1721- return ret;
1722-}
1723diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
1724index 19f6cb1..6c78cf2 100644
1725--- a/arch/parisc/include/asm/elf.h
1726+++ b/arch/parisc/include/asm/elf.h
1727@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
1728
1729 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1730
1731+#ifdef CONFIG_PAX_ASLR
1732+#define PAX_ELF_ET_DYN_BASE 0x10000UL
1733+
1734+#define PAX_DELTA_MMAP_LEN 16
1735+#define PAX_DELTA_STACK_LEN 16
1736+#endif
1737+
1738 /* This yields a mask that user programs can use to figure out what
1739 instruction set this CPU supports. This could be done in user space,
1740 but it's not easy, and we've already done it here. */
1741diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
1742index 22dadeb..f6c2be4 100644
1743--- a/arch/parisc/include/asm/pgtable.h
1744+++ b/arch/parisc/include/asm/pgtable.h
1745@@ -210,6 +210,17 @@ struct vm_area_struct;
1746 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1747 #define PAGE_COPY PAGE_EXECREAD
1748 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1749+
1750+#ifdef CONFIG_PAX_PAGEEXEC
1751+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1752+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1753+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1754+#else
1755+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1756+# define PAGE_COPY_NOEXEC PAGE_COPY
1757+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1758+#endif
1759+
1760 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1761 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
1762 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
1763diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
1764index 5e34ccf..672bc9c 100644
1765--- a/arch/parisc/kernel/module.c
1766+++ b/arch/parisc/kernel/module.c
1767@@ -98,16 +98,38 @@
1768
1769 /* three functions to determine where in the module core
1770 * or init pieces the location is */
1771+static inline int in_init_rx(struct module *me, void *loc)
1772+{
1773+ return (loc >= me->module_init_rx &&
1774+ loc < (me->module_init_rx + me->init_size_rx));
1775+}
1776+
1777+static inline int in_init_rw(struct module *me, void *loc)
1778+{
1779+ return (loc >= me->module_init_rw &&
1780+ loc < (me->module_init_rw + me->init_size_rw));
1781+}
1782+
1783 static inline int in_init(struct module *me, void *loc)
1784 {
1785- return (loc >= me->module_init &&
1786- loc <= (me->module_init + me->init_size));
1787+ return in_init_rx(me, loc) || in_init_rw(me, loc);
1788+}
1789+
1790+static inline int in_core_rx(struct module *me, void *loc)
1791+{
1792+ return (loc >= me->module_core_rx &&
1793+ loc < (me->module_core_rx + me->core_size_rx));
1794+}
1795+
1796+static inline int in_core_rw(struct module *me, void *loc)
1797+{
1798+ return (loc >= me->module_core_rw &&
1799+ loc < (me->module_core_rw + me->core_size_rw));
1800 }
1801
1802 static inline int in_core(struct module *me, void *loc)
1803 {
1804- return (loc >= me->module_core &&
1805- loc <= (me->module_core + me->core_size));
1806+ return in_core_rx(me, loc) || in_core_rw(me, loc);
1807 }
1808
1809 static inline int in_local(struct module *me, void *loc)
1810@@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
1811 }
1812
1813 /* align things a bit */
1814- me->core_size = ALIGN(me->core_size, 16);
1815- me->arch.got_offset = me->core_size;
1816- me->core_size += gots * sizeof(struct got_entry);
1817+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1818+ me->arch.got_offset = me->core_size_rw;
1819+ me->core_size_rw += gots * sizeof(struct got_entry);
1820
1821- me->core_size = ALIGN(me->core_size, 16);
1822- me->arch.fdesc_offset = me->core_size;
1823- me->core_size += fdescs * sizeof(Elf_Fdesc);
1824+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1825+ me->arch.fdesc_offset = me->core_size_rw;
1826+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1827
1828 me->arch.got_max = gots;
1829 me->arch.fdesc_max = fdescs;
1830@@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
1831
1832 BUG_ON(value == 0);
1833
1834- got = me->module_core + me->arch.got_offset;
1835+ got = me->module_core_rw + me->arch.got_offset;
1836 for (i = 0; got[i].addr; i++)
1837 if (got[i].addr == value)
1838 goto out;
1839@@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
1840 #ifdef CONFIG_64BIT
1841 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1842 {
1843- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1844+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1845
1846 if (!value) {
1847 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1848@@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1849
1850 /* Create new one */
1851 fdesc->addr = value;
1852- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1853+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1854 return (Elf_Addr)fdesc;
1855 }
1856 #endif /* CONFIG_64BIT */
1857@@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
1858
1859 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1860 end = table + sechdrs[me->arch.unwind_section].sh_size;
1861- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1862+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1863
1864 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1865 me->arch.unwind_section, table, end, gp);
1866diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
1867index c9b9322..02d8940 100644
1868--- a/arch/parisc/kernel/sys_parisc.c
1869+++ b/arch/parisc/kernel/sys_parisc.c
1870@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
1871 /* At this point: (!vma || addr < vma->vm_end). */
1872 if (TASK_SIZE - len < addr)
1873 return -ENOMEM;
1874- if (!vma || addr + len <= vma->vm_start)
1875+ if (check_heap_stack_gap(vma, addr, len))
1876 return addr;
1877 addr = vma->vm_end;
1878 }
1879@@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
1880 /* At this point: (!vma || addr < vma->vm_end). */
1881 if (TASK_SIZE - len < addr)
1882 return -ENOMEM;
1883- if (!vma || addr + len <= vma->vm_start)
1884+ if (check_heap_stack_gap(vma, addr, len))
1885 return addr;
1886 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1887 if (addr < vma->vm_end) /* handle wraparound */
1888@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
1889 if (flags & MAP_FIXED)
1890 return addr;
1891 if (!addr)
1892- addr = TASK_UNMAPPED_BASE;
1893+ addr = current->mm->mmap_base;
1894
1895 if (filp) {
1896 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1897diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
1898index f19e660..414fe24 100644
1899--- a/arch/parisc/kernel/traps.c
1900+++ b/arch/parisc/kernel/traps.c
1901@@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
1902
1903 down_read(&current->mm->mmap_sem);
1904 vma = find_vma(current->mm,regs->iaoq[0]);
1905- if (vma && (regs->iaoq[0] >= vma->vm_start)
1906- && (vma->vm_flags & VM_EXEC)) {
1907-
1908+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1909 fault_address = regs->iaoq[0];
1910 fault_space = regs->iasq[0];
1911
1912diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
1913index 18162ce..94de376 100644
1914--- a/arch/parisc/mm/fault.c
1915+++ b/arch/parisc/mm/fault.c
1916@@ -15,6 +15,7 @@
1917 #include <linux/sched.h>
1918 #include <linux/interrupt.h>
1919 #include <linux/module.h>
1920+#include <linux/unistd.h>
1921
1922 #include <asm/uaccess.h>
1923 #include <asm/traps.h>
1924@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
1925 static unsigned long
1926 parisc_acctyp(unsigned long code, unsigned int inst)
1927 {
1928- if (code == 6 || code == 16)
1929+ if (code == 6 || code == 7 || code == 16)
1930 return VM_EXEC;
1931
1932 switch (inst & 0xf0000000) {
1933@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
1934 }
1935 #endif
1936
1937+#ifdef CONFIG_PAX_PAGEEXEC
1938+/*
1939+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1940+ *
1941+ * returns 1 when task should be killed
1942+ * 2 when rt_sigreturn trampoline was detected
1943+ * 3 when unpatched PLT trampoline was detected
1944+ */
1945+static int pax_handle_fetch_fault(struct pt_regs *regs)
1946+{
1947+
1948+#ifdef CONFIG_PAX_EMUPLT
1949+ int err;
1950+
1951+ do { /* PaX: unpatched PLT emulation */
1952+ unsigned int bl, depwi;
1953+
1954+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1955+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1956+
1957+ if (err)
1958+ break;
1959+
1960+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1961+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1962+
1963+ err = get_user(ldw, (unsigned int *)addr);
1964+ err |= get_user(bv, (unsigned int *)(addr+4));
1965+ err |= get_user(ldw2, (unsigned int *)(addr+8));
1966+
1967+ if (err)
1968+ break;
1969+
1970+ if (ldw == 0x0E801096U &&
1971+ bv == 0xEAC0C000U &&
1972+ ldw2 == 0x0E881095U)
1973+ {
1974+ unsigned int resolver, map;
1975+
1976+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1977+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1978+ if (err)
1979+ break;
1980+
1981+ regs->gr[20] = instruction_pointer(regs)+8;
1982+ regs->gr[21] = map;
1983+ regs->gr[22] = resolver;
1984+ regs->iaoq[0] = resolver | 3UL;
1985+ regs->iaoq[1] = regs->iaoq[0] + 4;
1986+ return 3;
1987+ }
1988+ }
1989+ } while (0);
1990+#endif
1991+
1992+#ifdef CONFIG_PAX_EMUTRAMP
1993+
1994+#ifndef CONFIG_PAX_EMUSIGRT
1995+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1996+ return 1;
1997+#endif
1998+
1999+ do { /* PaX: rt_sigreturn emulation */
2000+ unsigned int ldi1, ldi2, bel, nop;
2001+
2002+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
2003+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
2004+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
2005+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
2006+
2007+ if (err)
2008+ break;
2009+
2010+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
2011+ ldi2 == 0x3414015AU &&
2012+ bel == 0xE4008200U &&
2013+ nop == 0x08000240U)
2014+ {
2015+ regs->gr[25] = (ldi1 & 2) >> 1;
2016+ regs->gr[20] = __NR_rt_sigreturn;
2017+ regs->gr[31] = regs->iaoq[1] + 16;
2018+ regs->sr[0] = regs->iasq[1];
2019+ regs->iaoq[0] = 0x100UL;
2020+ regs->iaoq[1] = regs->iaoq[0] + 4;
2021+ regs->iasq[0] = regs->sr[2];
2022+ regs->iasq[1] = regs->sr[2];
2023+ return 2;
2024+ }
2025+ } while (0);
2026+#endif
2027+
2028+ return 1;
2029+}
2030+
2031+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2032+{
2033+ unsigned long i;
2034+
2035+ printk(KERN_ERR "PAX: bytes at PC: ");
2036+ for (i = 0; i < 5; i++) {
2037+ unsigned int c;
2038+ if (get_user(c, (unsigned int *)pc+i))
2039+ printk(KERN_CONT "???????? ");
2040+ else
2041+ printk(KERN_CONT "%08x ", c);
2042+ }
2043+ printk("\n");
2044+}
2045+#endif
2046+
2047 int fixup_exception(struct pt_regs *regs)
2048 {
2049 const struct exception_table_entry *fix;
2050@@ -192,8 +303,33 @@ good_area:
2051
2052 acc_type = parisc_acctyp(code,regs->iir);
2053
2054- if ((vma->vm_flags & acc_type) != acc_type)
2055+ if ((vma->vm_flags & acc_type) != acc_type) {
2056+
2057+#ifdef CONFIG_PAX_PAGEEXEC
2058+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
2059+ (address & ~3UL) == instruction_pointer(regs))
2060+ {
2061+ up_read(&mm->mmap_sem);
2062+ switch (pax_handle_fetch_fault(regs)) {
2063+
2064+#ifdef CONFIG_PAX_EMUPLT
2065+ case 3:
2066+ return;
2067+#endif
2068+
2069+#ifdef CONFIG_PAX_EMUTRAMP
2070+ case 2:
2071+ return;
2072+#endif
2073+
2074+ }
2075+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
2076+ do_group_exit(SIGKILL);
2077+ }
2078+#endif
2079+
2080 goto bad_area;
2081+ }
2082
2083 /*
2084 * If for any reason at all we couldn't handle the fault, make
2085diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
2086index 3bf9cca..e7457d0 100644
2087--- a/arch/powerpc/include/asm/elf.h
2088+++ b/arch/powerpc/include/asm/elf.h
2089@@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
2090 the loader. We need to make sure that it is out of the way of the program
2091 that it will "exec", and that there is sufficient room for the brk. */
2092
2093-extern unsigned long randomize_et_dyn(unsigned long base);
2094-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2095+#define ELF_ET_DYN_BASE (0x20000000)
2096+
2097+#ifdef CONFIG_PAX_ASLR
2098+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2099+
2100+#ifdef __powerpc64__
2101+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
2102+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
2103+#else
2104+#define PAX_DELTA_MMAP_LEN 15
2105+#define PAX_DELTA_STACK_LEN 15
2106+#endif
2107+#endif
2108
2109 /*
2110 * Our registers are always unsigned longs, whether we're a 32 bit
2111@@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2112 (0x7ff >> (PAGE_SHIFT - 12)) : \
2113 (0x3ffff >> (PAGE_SHIFT - 12)))
2114
2115-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2116-#define arch_randomize_brk arch_randomize_brk
2117-
2118 #endif /* __KERNEL__ */
2119
2120 /*
2121diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
2122index bca8fdc..61e9580 100644
2123--- a/arch/powerpc/include/asm/kmap_types.h
2124+++ b/arch/powerpc/include/asm/kmap_types.h
2125@@ -27,6 +27,7 @@ enum km_type {
2126 KM_PPC_SYNC_PAGE,
2127 KM_PPC_SYNC_ICACHE,
2128 KM_KDB,
2129+ KM_CLEARPAGE,
2130 KM_TYPE_NR
2131 };
2132
2133diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
2134index d4a7f64..451de1c 100644
2135--- a/arch/powerpc/include/asm/mman.h
2136+++ b/arch/powerpc/include/asm/mman.h
2137@@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
2138 }
2139 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
2140
2141-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
2142+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
2143 {
2144 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
2145 }
2146diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
2147index dd9c4fd..a2ced87 100644
2148--- a/arch/powerpc/include/asm/page.h
2149+++ b/arch/powerpc/include/asm/page.h
2150@@ -141,8 +141,9 @@ extern phys_addr_t kernstart_addr;
2151 * and needs to be executable. This means the whole heap ends
2152 * up being executable.
2153 */
2154-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2155- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2156+#define VM_DATA_DEFAULT_FLAGS32 \
2157+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2158+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2159
2160 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2161 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2162@@ -170,6 +171,9 @@ extern phys_addr_t kernstart_addr;
2163 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2164 #endif
2165
2166+#define ktla_ktva(addr) (addr)
2167+#define ktva_ktla(addr) (addr)
2168+
2169 /*
2170 * Use the top bit of the higher-level page table entries to indicate whether
2171 * the entries we point to contain hugepages. This works because we know that
2172diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
2173index fb40ede..d3ce956 100644
2174--- a/arch/powerpc/include/asm/page_64.h
2175+++ b/arch/powerpc/include/asm/page_64.h
2176@@ -144,15 +144,18 @@ do { \
2177 * stack by default, so in the absence of a PT_GNU_STACK program header
2178 * we turn execute permission off.
2179 */
2180-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2181- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2182+#define VM_STACK_DEFAULT_FLAGS32 \
2183+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2184+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2185
2186 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2187 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2188
2189+#ifndef CONFIG_PAX_PAGEEXEC
2190 #define VM_STACK_DEFAULT_FLAGS \
2191 (is_32bit_task() ? \
2192 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2193+#endif
2194
2195 #include <asm-generic/getorder.h>
2196
2197diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
2198index 88b0bd9..e32bc67 100644
2199--- a/arch/powerpc/include/asm/pgtable.h
2200+++ b/arch/powerpc/include/asm/pgtable.h
2201@@ -2,6 +2,7 @@
2202 #define _ASM_POWERPC_PGTABLE_H
2203 #ifdef __KERNEL__
2204
2205+#include <linux/const.h>
2206 #ifndef __ASSEMBLY__
2207 #include <asm/processor.h> /* For TASK_SIZE */
2208 #include <asm/mmu.h>
2209diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
2210index 4aad413..85d86bf 100644
2211--- a/arch/powerpc/include/asm/pte-hash32.h
2212+++ b/arch/powerpc/include/asm/pte-hash32.h
2213@@ -21,6 +21,7 @@
2214 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2215 #define _PAGE_USER 0x004 /* usermode access allowed */
2216 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2217+#define _PAGE_EXEC _PAGE_GUARDED
2218 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2219 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2220 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2221diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
2222index 559da19..7e5835c 100644
2223--- a/arch/powerpc/include/asm/reg.h
2224+++ b/arch/powerpc/include/asm/reg.h
2225@@ -212,6 +212,7 @@
2226 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2227 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2228 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2229+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2230 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2231 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2232 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2233diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
2234index e30a13d..2b7d994 100644
2235--- a/arch/powerpc/include/asm/system.h
2236+++ b/arch/powerpc/include/asm/system.h
2237@@ -530,7 +530,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
2238 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2239 #endif
2240
2241-extern unsigned long arch_align_stack(unsigned long sp);
2242+#define arch_align_stack(x) ((x) & ~0xfUL)
2243
2244 /* Used in very early kernel initialization. */
2245 extern unsigned long reloc_offset(void);
2246diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
2247index bd0fb84..a42a14b 100644
2248--- a/arch/powerpc/include/asm/uaccess.h
2249+++ b/arch/powerpc/include/asm/uaccess.h
2250@@ -13,6 +13,8 @@
2251 #define VERIFY_READ 0
2252 #define VERIFY_WRITE 1
2253
2254+extern void check_object_size(const void *ptr, unsigned long n, bool to);
2255+
2256 /*
2257 * The fs value determines whether argument validity checking should be
2258 * performed or not. If get_fs() == USER_DS, checking is performed, with
2259@@ -327,52 +329,6 @@ do { \
2260 extern unsigned long __copy_tofrom_user(void __user *to,
2261 const void __user *from, unsigned long size);
2262
2263-#ifndef __powerpc64__
2264-
2265-static inline unsigned long copy_from_user(void *to,
2266- const void __user *from, unsigned long n)
2267-{
2268- unsigned long over;
2269-
2270- if (access_ok(VERIFY_READ, from, n))
2271- return __copy_tofrom_user((__force void __user *)to, from, n);
2272- if ((unsigned long)from < TASK_SIZE) {
2273- over = (unsigned long)from + n - TASK_SIZE;
2274- return __copy_tofrom_user((__force void __user *)to, from,
2275- n - over) + over;
2276- }
2277- return n;
2278-}
2279-
2280-static inline unsigned long copy_to_user(void __user *to,
2281- const void *from, unsigned long n)
2282-{
2283- unsigned long over;
2284-
2285- if (access_ok(VERIFY_WRITE, to, n))
2286- return __copy_tofrom_user(to, (__force void __user *)from, n);
2287- if ((unsigned long)to < TASK_SIZE) {
2288- over = (unsigned long)to + n - TASK_SIZE;
2289- return __copy_tofrom_user(to, (__force void __user *)from,
2290- n - over) + over;
2291- }
2292- return n;
2293-}
2294-
2295-#else /* __powerpc64__ */
2296-
2297-#define __copy_in_user(to, from, size) \
2298- __copy_tofrom_user((to), (from), (size))
2299-
2300-extern unsigned long copy_from_user(void *to, const void __user *from,
2301- unsigned long n);
2302-extern unsigned long copy_to_user(void __user *to, const void *from,
2303- unsigned long n);
2304-extern unsigned long copy_in_user(void __user *to, const void __user *from,
2305- unsigned long n);
2306-
2307-#endif /* __powerpc64__ */
2308-
2309 static inline unsigned long __copy_from_user_inatomic(void *to,
2310 const void __user *from, unsigned long n)
2311 {
2312@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
2313 if (ret == 0)
2314 return 0;
2315 }
2316+
2317+ if (!__builtin_constant_p(n))
2318+ check_object_size(to, n, false);
2319+
2320 return __copy_tofrom_user((__force void __user *)to, from, n);
2321 }
2322
2323@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
2324 if (ret == 0)
2325 return 0;
2326 }
2327+
2328+ if (!__builtin_constant_p(n))
2329+ check_object_size(from, n, true);
2330+
2331 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2332 }
2333
2334@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
2335 return __copy_to_user_inatomic(to, from, size);
2336 }
2337
2338+#ifndef __powerpc64__
2339+
2340+static inline unsigned long __must_check copy_from_user(void *to,
2341+ const void __user *from, unsigned long n)
2342+{
2343+ unsigned long over;
2344+
2345+ if ((long)n < 0)
2346+ return n;
2347+
2348+ if (access_ok(VERIFY_READ, from, n)) {
2349+ if (!__builtin_constant_p(n))
2350+ check_object_size(to, n, false);
2351+ return __copy_tofrom_user((__force void __user *)to, from, n);
2352+ }
2353+ if ((unsigned long)from < TASK_SIZE) {
2354+ over = (unsigned long)from + n - TASK_SIZE;
2355+ if (!__builtin_constant_p(n - over))
2356+ check_object_size(to, n - over, false);
2357+ return __copy_tofrom_user((__force void __user *)to, from,
2358+ n - over) + over;
2359+ }
2360+ return n;
2361+}
2362+
2363+static inline unsigned long __must_check copy_to_user(void __user *to,
2364+ const void *from, unsigned long n)
2365+{
2366+ unsigned long over;
2367+
2368+ if ((long)n < 0)
2369+ return n;
2370+
2371+ if (access_ok(VERIFY_WRITE, to, n)) {
2372+ if (!__builtin_constant_p(n))
2373+ check_object_size(from, n, true);
2374+ return __copy_tofrom_user(to, (__force void __user *)from, n);
2375+ }
2376+ if ((unsigned long)to < TASK_SIZE) {
2377+ over = (unsigned long)to + n - TASK_SIZE;
2378+ if (!__builtin_constant_p(n))
2379+ check_object_size(from, n - over, true);
2380+ return __copy_tofrom_user(to, (__force void __user *)from,
2381+ n - over) + over;
2382+ }
2383+ return n;
2384+}
2385+
2386+#else /* __powerpc64__ */
2387+
2388+#define __copy_in_user(to, from, size) \
2389+ __copy_tofrom_user((to), (from), (size))
2390+
2391+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2392+{
2393+ if ((long)n < 0 || n > INT_MAX)
2394+ return n;
2395+
2396+ if (!__builtin_constant_p(n))
2397+ check_object_size(to, n, false);
2398+
2399+ if (likely(access_ok(VERIFY_READ, from, n)))
2400+ n = __copy_from_user(to, from, n);
2401+ else
2402+ memset(to, 0, n);
2403+ return n;
2404+}
2405+
2406+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2407+{
2408+ if ((long)n < 0 || n > INT_MAX)
2409+ return n;
2410+
2411+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
2412+ if (!__builtin_constant_p(n))
2413+ check_object_size(from, n, true);
2414+ n = __copy_to_user(to, from, n);
2415+ }
2416+ return n;
2417+}
2418+
2419+extern unsigned long copy_in_user(void __user *to, const void __user *from,
2420+ unsigned long n);
2421+
2422+#endif /* __powerpc64__ */
2423+
2424 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2425
2426 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2427diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
2428index 429983c..7af363b 100644
2429--- a/arch/powerpc/kernel/exceptions-64e.S
2430+++ b/arch/powerpc/kernel/exceptions-64e.S
2431@@ -587,6 +587,7 @@ storage_fault_common:
2432 std r14,_DAR(r1)
2433 std r15,_DSISR(r1)
2434 addi r3,r1,STACK_FRAME_OVERHEAD
2435+ bl .save_nvgprs
2436 mr r4,r14
2437 mr r5,r15
2438 ld r14,PACA_EXGEN+EX_R14(r13)
2439@@ -596,8 +597,7 @@ storage_fault_common:
2440 cmpdi r3,0
2441 bne- 1f
2442 b .ret_from_except_lite
2443-1: bl .save_nvgprs
2444- mr r5,r3
2445+1: mr r5,r3
2446 addi r3,r1,STACK_FRAME_OVERHEAD
2447 ld r4,_DAR(r1)
2448 bl .bad_page_fault
2449diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
2450index cf9c69b..ebc9640 100644
2451--- a/arch/powerpc/kernel/exceptions-64s.S
2452+++ b/arch/powerpc/kernel/exceptions-64s.S
2453@@ -1004,10 +1004,10 @@ handle_page_fault:
2454 11: ld r4,_DAR(r1)
2455 ld r5,_DSISR(r1)
2456 addi r3,r1,STACK_FRAME_OVERHEAD
2457+ bl .save_nvgprs
2458 bl .do_page_fault
2459 cmpdi r3,0
2460 beq+ 13f
2461- bl .save_nvgprs
2462 mr r5,r3
2463 addi r3,r1,STACK_FRAME_OVERHEAD
2464 lwz r4,_DAR(r1)
2465diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
2466index 0b6d796..d760ddb 100644
2467--- a/arch/powerpc/kernel/module_32.c
2468+++ b/arch/powerpc/kernel/module_32.c
2469@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
2470 me->arch.core_plt_section = i;
2471 }
2472 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2473- printk("Module doesn't contain .plt or .init.plt sections.\n");
2474+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2475 return -ENOEXEC;
2476 }
2477
2478@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
2479
2480 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2481 /* Init, or core PLT? */
2482- if (location >= mod->module_core
2483- && location < mod->module_core + mod->core_size)
2484+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2485+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2486 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2487- else
2488+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2489+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2490 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2491+ else {
2492+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2493+ return ~0UL;
2494+ }
2495
2496 /* Find this entry, or if that fails, the next avail. entry */
2497 while (entry->jump[0]) {
2498diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
2499index 6457574..08b28d3 100644
2500--- a/arch/powerpc/kernel/process.c
2501+++ b/arch/powerpc/kernel/process.c
2502@@ -660,8 +660,8 @@ void show_regs(struct pt_regs * regs)
2503 * Lookup NIP late so we have the best change of getting the
2504 * above info out without failing
2505 */
2506- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2507- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2508+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2509+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2510 #endif
2511 show_stack(current, (unsigned long *) regs->gpr[1]);
2512 if (!user_mode(regs))
2513@@ -1165,10 +1165,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
2514 newsp = stack[0];
2515 ip = stack[STACK_FRAME_LR_SAVE];
2516 if (!firstframe || ip != lr) {
2517- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2518+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2519 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2520 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2521- printk(" (%pS)",
2522+ printk(" (%pA)",
2523 (void *)current->ret_stack[curr_frame].ret);
2524 curr_frame--;
2525 }
2526@@ -1188,7 +1188,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
2527 struct pt_regs *regs = (struct pt_regs *)
2528 (sp + STACK_FRAME_OVERHEAD);
2529 lr = regs->link;
2530- printk("--- Exception: %lx at %pS\n LR = %pS\n",
2531+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
2532 regs->trap, (void *)regs->nip, (void *)lr);
2533 firstframe = 1;
2534 }
2535@@ -1263,58 +1263,3 @@ void thread_info_cache_init(void)
2536 }
2537
2538 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2539-
2540-unsigned long arch_align_stack(unsigned long sp)
2541-{
2542- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2543- sp -= get_random_int() & ~PAGE_MASK;
2544- return sp & ~0xf;
2545-}
2546-
2547-static inline unsigned long brk_rnd(void)
2548-{
2549- unsigned long rnd = 0;
2550-
2551- /* 8MB for 32bit, 1GB for 64bit */
2552- if (is_32bit_task())
2553- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2554- else
2555- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2556-
2557- return rnd << PAGE_SHIFT;
2558-}
2559-
2560-unsigned long arch_randomize_brk(struct mm_struct *mm)
2561-{
2562- unsigned long base = mm->brk;
2563- unsigned long ret;
2564-
2565-#ifdef CONFIG_PPC_STD_MMU_64
2566- /*
2567- * If we are using 1TB segments and we are allowed to randomise
2568- * the heap, we can put it above 1TB so it is backed by a 1TB
2569- * segment. Otherwise the heap will be in the bottom 1TB
2570- * which always uses 256MB segments and this may result in a
2571- * performance penalty.
2572- */
2573- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2574- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2575-#endif
2576-
2577- ret = PAGE_ALIGN(base + brk_rnd());
2578-
2579- if (ret < mm->brk)
2580- return mm->brk;
2581-
2582- return ret;
2583-}
2584-
2585-unsigned long randomize_et_dyn(unsigned long base)
2586-{
2587- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2588-
2589- if (ret < base)
2590- return base;
2591-
2592- return ret;
2593-}
2594diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
2595index 836a5a1..27289a3 100644
2596--- a/arch/powerpc/kernel/signal_32.c
2597+++ b/arch/powerpc/kernel/signal_32.c
2598@@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
2599 /* Save user registers on the stack */
2600 frame = &rt_sf->uc.uc_mcontext;
2601 addr = frame;
2602- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2603+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2604 if (save_user_regs(regs, frame, 0, 1))
2605 goto badframe;
2606 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2607diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
2608index a50b5ec..547078a 100644
2609--- a/arch/powerpc/kernel/signal_64.c
2610+++ b/arch/powerpc/kernel/signal_64.c
2611@@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
2612 current->thread.fpscr.val = 0;
2613
2614 /* Set up to return from userspace. */
2615- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2616+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2617 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2618 } else {
2619 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2620diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
2621index 5459d14..10f8070 100644
2622--- a/arch/powerpc/kernel/traps.c
2623+++ b/arch/powerpc/kernel/traps.c
2624@@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
2625 static inline void pmac_backlight_unblank(void) { }
2626 #endif
2627
2628+extern void gr_handle_kernel_exploit(void);
2629+
2630 int die(const char *str, struct pt_regs *regs, long err)
2631 {
2632 static struct {
2633@@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs *regs, long err)
2634 if (panic_on_oops)
2635 panic("Fatal exception");
2636
2637+ gr_handle_kernel_exploit();
2638+
2639 oops_exit();
2640 do_exit(err);
2641
2642diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
2643index 7d14bb6..1305601 100644
2644--- a/arch/powerpc/kernel/vdso.c
2645+++ b/arch/powerpc/kernel/vdso.c
2646@@ -35,6 +35,7 @@
2647 #include <asm/firmware.h>
2648 #include <asm/vdso.h>
2649 #include <asm/vdso_datapage.h>
2650+#include <asm/mman.h>
2651
2652 #include "setup.h"
2653
2654@@ -219,7 +220,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2655 vdso_base = VDSO32_MBASE;
2656 #endif
2657
2658- current->mm->context.vdso_base = 0;
2659+ current->mm->context.vdso_base = ~0UL;
2660
2661 /* vDSO has a problem and was disabled, just don't "enable" it for the
2662 * process
2663@@ -239,7 +240,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2664 vdso_base = get_unmapped_area(NULL, vdso_base,
2665 (vdso_pages << PAGE_SHIFT) +
2666 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2667- 0, 0);
2668+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
2669 if (IS_ERR_VALUE(vdso_base)) {
2670 rc = vdso_base;
2671 goto fail_mmapsem;
2672diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
2673index 5eea6f3..5d10396 100644
2674--- a/arch/powerpc/lib/usercopy_64.c
2675+++ b/arch/powerpc/lib/usercopy_64.c
2676@@ -9,22 +9,6 @@
2677 #include <linux/module.h>
2678 #include <asm/uaccess.h>
2679
2680-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2681-{
2682- if (likely(access_ok(VERIFY_READ, from, n)))
2683- n = __copy_from_user(to, from, n);
2684- else
2685- memset(to, 0, n);
2686- return n;
2687-}
2688-
2689-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2690-{
2691- if (likely(access_ok(VERIFY_WRITE, to, n)))
2692- n = __copy_to_user(to, from, n);
2693- return n;
2694-}
2695-
2696 unsigned long copy_in_user(void __user *to, const void __user *from,
2697 unsigned long n)
2698 {
2699@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
2700 return n;
2701 }
2702
2703-EXPORT_SYMBOL(copy_from_user);
2704-EXPORT_SYMBOL(copy_to_user);
2705 EXPORT_SYMBOL(copy_in_user);
2706
2707diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
2708index 5efe8c9..db9ceef 100644
2709--- a/arch/powerpc/mm/fault.c
2710+++ b/arch/powerpc/mm/fault.c
2711@@ -32,6 +32,10 @@
2712 #include <linux/perf_event.h>
2713 #include <linux/magic.h>
2714 #include <linux/ratelimit.h>
2715+#include <linux/slab.h>
2716+#include <linux/pagemap.h>
2717+#include <linux/compiler.h>
2718+#include <linux/unistd.h>
2719
2720 #include <asm/firmware.h>
2721 #include <asm/page.h>
2722@@ -43,6 +47,7 @@
2723 #include <asm/tlbflush.h>
2724 #include <asm/siginfo.h>
2725 #include <mm/mmu_decl.h>
2726+#include <asm/ptrace.h>
2727
2728 #ifdef CONFIG_KPROBES
2729 static inline int notify_page_fault(struct pt_regs *regs)
2730@@ -66,6 +71,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
2731 }
2732 #endif
2733
2734+#ifdef CONFIG_PAX_PAGEEXEC
2735+/*
2736+ * PaX: decide what to do with offenders (regs->nip = fault address)
2737+ *
2738+ * returns 1 when task should be killed
2739+ */
2740+static int pax_handle_fetch_fault(struct pt_regs *regs)
2741+{
2742+ return 1;
2743+}
2744+
2745+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2746+{
2747+ unsigned long i;
2748+
2749+ printk(KERN_ERR "PAX: bytes at PC: ");
2750+ for (i = 0; i < 5; i++) {
2751+ unsigned int c;
2752+ if (get_user(c, (unsigned int __user *)pc+i))
2753+ printk(KERN_CONT "???????? ");
2754+ else
2755+ printk(KERN_CONT "%08x ", c);
2756+ }
2757+ printk("\n");
2758+}
2759+#endif
2760+
2761 /*
2762 * Check whether the instruction at regs->nip is a store using
2763 * an update addressing form which will update r1.
2764@@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
2765 * indicate errors in DSISR but can validly be set in SRR1.
2766 */
2767 if (trap == 0x400)
2768- error_code &= 0x48200000;
2769+ error_code &= 0x58200000;
2770 else
2771 is_write = error_code & DSISR_ISSTORE;
2772 #else
2773@@ -259,7 +291,7 @@ good_area:
2774 * "undefined". Of those that can be set, this is the only
2775 * one which seems bad.
2776 */
2777- if (error_code & 0x10000000)
2778+ if (error_code & DSISR_GUARDED)
2779 /* Guarded storage error. */
2780 goto bad_area;
2781 #endif /* CONFIG_8xx */
2782@@ -274,7 +306,7 @@ good_area:
2783 * processors use the same I/D cache coherency mechanism
2784 * as embedded.
2785 */
2786- if (error_code & DSISR_PROTFAULT)
2787+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2788 goto bad_area;
2789 #endif /* CONFIG_PPC_STD_MMU */
2790
2791@@ -343,6 +375,23 @@ bad_area:
2792 bad_area_nosemaphore:
2793 /* User mode accesses cause a SIGSEGV */
2794 if (user_mode(regs)) {
2795+
2796+#ifdef CONFIG_PAX_PAGEEXEC
2797+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2798+#ifdef CONFIG_PPC_STD_MMU
2799+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2800+#else
2801+ if (is_exec && regs->nip == address) {
2802+#endif
2803+ switch (pax_handle_fetch_fault(regs)) {
2804+ }
2805+
2806+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2807+ do_group_exit(SIGKILL);
2808+ }
2809+ }
2810+#endif
2811+
2812 _exception(SIGSEGV, regs, code, address);
2813 return 0;
2814 }
2815diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
2816index 5a783d8..c23e14b 100644
2817--- a/arch/powerpc/mm/mmap_64.c
2818+++ b/arch/powerpc/mm/mmap_64.c
2819@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
2820 */
2821 if (mmap_is_legacy()) {
2822 mm->mmap_base = TASK_UNMAPPED_BASE;
2823+
2824+#ifdef CONFIG_PAX_RANDMMAP
2825+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2826+ mm->mmap_base += mm->delta_mmap;
2827+#endif
2828+
2829 mm->get_unmapped_area = arch_get_unmapped_area;
2830 mm->unmap_area = arch_unmap_area;
2831 } else {
2832 mm->mmap_base = mmap_base();
2833+
2834+#ifdef CONFIG_PAX_RANDMMAP
2835+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2836+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2837+#endif
2838+
2839 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2840 mm->unmap_area = arch_unmap_area_topdown;
2841 }
2842diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
2843index 73709f7..6b90313 100644
2844--- a/arch/powerpc/mm/slice.c
2845+++ b/arch/powerpc/mm/slice.c
2846@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
2847 if ((mm->task_size - len) < addr)
2848 return 0;
2849 vma = find_vma(mm, addr);
2850- return (!vma || (addr + len) <= vma->vm_start);
2851+ return check_heap_stack_gap(vma, addr, len);
2852 }
2853
2854 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2855@@ -256,7 +256,7 @@ full_search:
2856 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2857 continue;
2858 }
2859- if (!vma || addr + len <= vma->vm_start) {
2860+ if (check_heap_stack_gap(vma, addr, len)) {
2861 /*
2862 * Remember the place where we stopped the search:
2863 */
2864@@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
2865 }
2866 }
2867
2868- addr = mm->mmap_base;
2869- while (addr > len) {
2870+ if (mm->mmap_base < len)
2871+ addr = -ENOMEM;
2872+ else
2873+ addr = mm->mmap_base - len;
2874+
2875+ while (!IS_ERR_VALUE(addr)) {
2876 /* Go down by chunk size */
2877- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2878+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
2879
2880 /* Check for hit with different page size */
2881 mask = slice_range_to_mask(addr, len);
2882@@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
2883 * return with success:
2884 */
2885 vma = find_vma(mm, addr);
2886- if (!vma || (addr + len) <= vma->vm_start) {
2887+ if (check_heap_stack_gap(vma, addr, len)) {
2888 /* remember the address as a hint for next time */
2889 if (use_cache)
2890 mm->free_area_cache = addr;
2891@@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
2892 mm->cached_hole_size = vma->vm_start - addr;
2893
2894 /* try just below the current vma->vm_start */
2895- addr = vma->vm_start;
2896+ addr = skip_heap_stack_gap(vma, len);
2897 }
2898
2899 /*
2900@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
2901 if (fixed && addr > (mm->task_size - len))
2902 return -EINVAL;
2903
2904+#ifdef CONFIG_PAX_RANDMMAP
2905+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2906+ addr = 0;
2907+#endif
2908+
2909 /* If hint, make sure it matches our alignment restrictions */
2910 if (!fixed && addr) {
2911 addr = _ALIGN_UP(addr, 1ul << pshift);
2912diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
2913index 547f1a6..3fff354 100644
2914--- a/arch/s390/include/asm/elf.h
2915+++ b/arch/s390/include/asm/elf.h
2916@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
2917 the loader. We need to make sure that it is out of the way of the program
2918 that it will "exec", and that there is sufficient room for the brk. */
2919
2920-extern unsigned long randomize_et_dyn(unsigned long base);
2921-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
2922+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
2923+
2924+#ifdef CONFIG_PAX_ASLR
2925+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
2926+
2927+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2928+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2929+#endif
2930
2931 /* This yields a mask that user programs can use to figure out what
2932 instruction set this CPU supports. */
2933@@ -211,7 +217,4 @@ struct linux_binprm;
2934 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2935 int arch_setup_additional_pages(struct linux_binprm *, int);
2936
2937-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2938-#define arch_randomize_brk arch_randomize_brk
2939-
2940 #endif
2941diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
2942index ef573c1..75a1ce6 100644
2943--- a/arch/s390/include/asm/system.h
2944+++ b/arch/s390/include/asm/system.h
2945@@ -262,7 +262,7 @@ extern void (*_machine_restart)(char *command);
2946 extern void (*_machine_halt)(void);
2947 extern void (*_machine_power_off)(void);
2948
2949-extern unsigned long arch_align_stack(unsigned long sp);
2950+#define arch_align_stack(x) ((x) & ~0xfUL)
2951
2952 static inline int tprot(unsigned long addr)
2953 {
2954diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
2955index 2b23885..e136e31 100644
2956--- a/arch/s390/include/asm/uaccess.h
2957+++ b/arch/s390/include/asm/uaccess.h
2958@@ -235,6 +235,10 @@ static inline unsigned long __must_check
2959 copy_to_user(void __user *to, const void *from, unsigned long n)
2960 {
2961 might_fault();
2962+
2963+ if ((long)n < 0)
2964+ return n;
2965+
2966 if (access_ok(VERIFY_WRITE, to, n))
2967 n = __copy_to_user(to, from, n);
2968 return n;
2969@@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
2970 static inline unsigned long __must_check
2971 __copy_from_user(void *to, const void __user *from, unsigned long n)
2972 {
2973+ if ((long)n < 0)
2974+ return n;
2975+
2976 if (__builtin_constant_p(n) && (n <= 256))
2977 return uaccess.copy_from_user_small(n, from, to);
2978 else
2979@@ -294,6 +301,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
2980 unsigned int sz = __compiletime_object_size(to);
2981
2982 might_fault();
2983+
2984+ if ((long)n < 0)
2985+ return n;
2986+
2987 if (unlikely(sz != -1 && sz < n)) {
2988 copy_from_user_overflow();
2989 return n;
2990diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
2991index dfcb343..eda788a 100644
2992--- a/arch/s390/kernel/module.c
2993+++ b/arch/s390/kernel/module.c
2994@@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
2995
2996 /* Increase core size by size of got & plt and set start
2997 offsets for got and plt. */
2998- me->core_size = ALIGN(me->core_size, 4);
2999- me->arch.got_offset = me->core_size;
3000- me->core_size += me->arch.got_size;
3001- me->arch.plt_offset = me->core_size;
3002- me->core_size += me->arch.plt_size;
3003+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
3004+ me->arch.got_offset = me->core_size_rw;
3005+ me->core_size_rw += me->arch.got_size;
3006+ me->arch.plt_offset = me->core_size_rx;
3007+ me->core_size_rx += me->arch.plt_size;
3008 return 0;
3009 }
3010
3011@@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3012 if (info->got_initialized == 0) {
3013 Elf_Addr *gotent;
3014
3015- gotent = me->module_core + me->arch.got_offset +
3016+ gotent = me->module_core_rw + me->arch.got_offset +
3017 info->got_offset;
3018 *gotent = val;
3019 info->got_initialized = 1;
3020@@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3021 else if (r_type == R_390_GOTENT ||
3022 r_type == R_390_GOTPLTENT)
3023 *(unsigned int *) loc =
3024- (val + (Elf_Addr) me->module_core - loc) >> 1;
3025+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3026 else if (r_type == R_390_GOT64 ||
3027 r_type == R_390_GOTPLT64)
3028 *(unsigned long *) loc = val;
3029@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3030 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3031 if (info->plt_initialized == 0) {
3032 unsigned int *ip;
3033- ip = me->module_core + me->arch.plt_offset +
3034+ ip = me->module_core_rx + me->arch.plt_offset +
3035 info->plt_offset;
3036 #ifndef CONFIG_64BIT
3037 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3038@@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3039 val - loc + 0xffffUL < 0x1ffffeUL) ||
3040 (r_type == R_390_PLT32DBL &&
3041 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3042- val = (Elf_Addr) me->module_core +
3043+ val = (Elf_Addr) me->module_core_rx +
3044 me->arch.plt_offset +
3045 info->plt_offset;
3046 val += rela->r_addend - loc;
3047@@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3048 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3049 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3050 val = val + rela->r_addend -
3051- ((Elf_Addr) me->module_core + me->arch.got_offset);
3052+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3053 if (r_type == R_390_GOTOFF16)
3054 *(unsigned short *) loc = val;
3055 else if (r_type == R_390_GOTOFF32)
3056@@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3057 break;
3058 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3059 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3060- val = (Elf_Addr) me->module_core + me->arch.got_offset +
3061+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3062 rela->r_addend - loc;
3063 if (r_type == R_390_GOTPC)
3064 *(unsigned int *) loc = val;
3065diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
3066index 9451b21..ed8956f 100644
3067--- a/arch/s390/kernel/process.c
3068+++ b/arch/s390/kernel/process.c
3069@@ -321,39 +321,3 @@ unsigned long get_wchan(struct task_struct *p)
3070 }
3071 return 0;
3072 }
3073-
3074-unsigned long arch_align_stack(unsigned long sp)
3075-{
3076- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3077- sp -= get_random_int() & ~PAGE_MASK;
3078- return sp & ~0xf;
3079-}
3080-
3081-static inline unsigned long brk_rnd(void)
3082-{
3083- /* 8MB for 32bit, 1GB for 64bit */
3084- if (is_32bit_task())
3085- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
3086- else
3087- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
3088-}
3089-
3090-unsigned long arch_randomize_brk(struct mm_struct *mm)
3091-{
3092- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
3093-
3094- if (ret < mm->brk)
3095- return mm->brk;
3096- return ret;
3097-}
3098-
3099-unsigned long randomize_et_dyn(unsigned long base)
3100-{
3101- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
3102-
3103- if (!(current->flags & PF_RANDOMIZE))
3104- return base;
3105- if (ret < base)
3106- return base;
3107- return ret;
3108-}
3109diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
3110index f09c748..cf9ec1d 100644
3111--- a/arch/s390/mm/mmap.c
3112+++ b/arch/s390/mm/mmap.c
3113@@ -92,10 +92,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3114 */
3115 if (mmap_is_legacy()) {
3116 mm->mmap_base = TASK_UNMAPPED_BASE;
3117+
3118+#ifdef CONFIG_PAX_RANDMMAP
3119+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3120+ mm->mmap_base += mm->delta_mmap;
3121+#endif
3122+
3123 mm->get_unmapped_area = arch_get_unmapped_area;
3124 mm->unmap_area = arch_unmap_area;
3125 } else {
3126 mm->mmap_base = mmap_base();
3127+
3128+#ifdef CONFIG_PAX_RANDMMAP
3129+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3130+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3131+#endif
3132+
3133 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3134 mm->unmap_area = arch_unmap_area_topdown;
3135 }
3136@@ -167,10 +179,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3137 */
3138 if (mmap_is_legacy()) {
3139 mm->mmap_base = TASK_UNMAPPED_BASE;
3140+
3141+#ifdef CONFIG_PAX_RANDMMAP
3142+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3143+ mm->mmap_base += mm->delta_mmap;
3144+#endif
3145+
3146 mm->get_unmapped_area = s390_get_unmapped_area;
3147 mm->unmap_area = arch_unmap_area;
3148 } else {
3149 mm->mmap_base = mmap_base();
3150+
3151+#ifdef CONFIG_PAX_RANDMMAP
3152+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3153+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3154+#endif
3155+
3156 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
3157 mm->unmap_area = arch_unmap_area_topdown;
3158 }
3159diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
3160index 589d5c7..669e274 100644
3161--- a/arch/score/include/asm/system.h
3162+++ b/arch/score/include/asm/system.h
3163@@ -17,7 +17,7 @@ do { \
3164 #define finish_arch_switch(prev) do {} while (0)
3165
3166 typedef void (*vi_handler_t)(void);
3167-extern unsigned long arch_align_stack(unsigned long sp);
3168+#define arch_align_stack(x) (x)
3169
3170 #define mb() barrier()
3171 #define rmb() barrier()
3172diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
3173index 25d0803..d6c8e36 100644
3174--- a/arch/score/kernel/process.c
3175+++ b/arch/score/kernel/process.c
3176@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
3177
3178 return task_pt_regs(task)->cp0_epc;
3179 }
3180-
3181-unsigned long arch_align_stack(unsigned long sp)
3182-{
3183- return sp;
3184-}
3185diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
3186index afeb710..d1d1289 100644
3187--- a/arch/sh/mm/mmap.c
3188+++ b/arch/sh/mm/mmap.c
3189@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
3190 addr = PAGE_ALIGN(addr);
3191
3192 vma = find_vma(mm, addr);
3193- if (TASK_SIZE - len >= addr &&
3194- (!vma || addr + len <= vma->vm_start))
3195+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3196 return addr;
3197 }
3198
3199@@ -106,7 +105,7 @@ full_search:
3200 }
3201 return -ENOMEM;
3202 }
3203- if (likely(!vma || addr + len <= vma->vm_start)) {
3204+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3205 /*
3206 * Remember the place where we stopped the search:
3207 */
3208@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3209 addr = PAGE_ALIGN(addr);
3210
3211 vma = find_vma(mm, addr);
3212- if (TASK_SIZE - len >= addr &&
3213- (!vma || addr + len <= vma->vm_start))
3214+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3215 return addr;
3216 }
3217
3218@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3219 /* make sure it can fit in the remaining address space */
3220 if (likely(addr > len)) {
3221 vma = find_vma(mm, addr-len);
3222- if (!vma || addr <= vma->vm_start) {
3223+ if (check_heap_stack_gap(vma, addr - len, len)) {
3224 /* remember the address as a hint for next time */
3225 return (mm->free_area_cache = addr-len);
3226 }
3227@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3228 if (unlikely(mm->mmap_base < len))
3229 goto bottomup;
3230
3231- addr = mm->mmap_base-len;
3232- if (do_colour_align)
3233- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3234+ addr = mm->mmap_base - len;
3235
3236 do {
3237+ if (do_colour_align)
3238+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3239 /*
3240 * Lookup failure means no vma is above this address,
3241 * else if new region fits below vma->vm_start,
3242 * return with success:
3243 */
3244 vma = find_vma(mm, addr);
3245- if (likely(!vma || addr+len <= vma->vm_start)) {
3246+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3247 /* remember the address as a hint for next time */
3248 return (mm->free_area_cache = addr);
3249 }
3250@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3251 mm->cached_hole_size = vma->vm_start - addr;
3252
3253 /* try just below the current vma->vm_start */
3254- addr = vma->vm_start-len;
3255- if (do_colour_align)
3256- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3257- } while (likely(len < vma->vm_start));
3258+ addr = skip_heap_stack_gap(vma, len);
3259+ } while (!IS_ERR_VALUE(addr));
3260
3261 bottomup:
3262 /*
3263diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
3264index ad1fb5d..fc5315b 100644
3265--- a/arch/sparc/Makefile
3266+++ b/arch/sparc/Makefile
3267@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
3268 # Export what is needed by arch/sparc/boot/Makefile
3269 export VMLINUX_INIT VMLINUX_MAIN
3270 VMLINUX_INIT := $(head-y) $(init-y)
3271-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
3272+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
3273 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
3274 VMLINUX_MAIN += $(drivers-y) $(net-y)
3275
3276diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
3277index 9f421df..b81fc12 100644
3278--- a/arch/sparc/include/asm/atomic_64.h
3279+++ b/arch/sparc/include/asm/atomic_64.h
3280@@ -14,18 +14,40 @@
3281 #define ATOMIC64_INIT(i) { (i) }
3282
3283 #define atomic_read(v) (*(volatile int *)&(v)->counter)
3284+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
3285+{
3286+ return v->counter;
3287+}
3288 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
3289+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
3290+{
3291+ return v->counter;
3292+}
3293
3294 #define atomic_set(v, i) (((v)->counter) = i)
3295+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
3296+{
3297+ v->counter = i;
3298+}
3299 #define atomic64_set(v, i) (((v)->counter) = i)
3300+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
3301+{
3302+ v->counter = i;
3303+}
3304
3305 extern void atomic_add(int, atomic_t *);
3306+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
3307 extern void atomic64_add(long, atomic64_t *);
3308+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
3309 extern void atomic_sub(int, atomic_t *);
3310+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
3311 extern void atomic64_sub(long, atomic64_t *);
3312+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
3313
3314 extern int atomic_add_ret(int, atomic_t *);
3315+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
3316 extern long atomic64_add_ret(long, atomic64_t *);
3317+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
3318 extern int atomic_sub_ret(int, atomic_t *);
3319 extern long atomic64_sub_ret(long, atomic64_t *);
3320
3321@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3322 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
3323
3324 #define atomic_inc_return(v) atomic_add_ret(1, v)
3325+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
3326+{
3327+ return atomic_add_ret_unchecked(1, v);
3328+}
3329 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
3330+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
3331+{
3332+ return atomic64_add_ret_unchecked(1, v);
3333+}
3334
3335 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
3336 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
3337
3338 #define atomic_add_return(i, v) atomic_add_ret(i, v)
3339+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
3340+{
3341+ return atomic_add_ret_unchecked(i, v);
3342+}
3343 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
3344+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
3345+{
3346+ return atomic64_add_ret_unchecked(i, v);
3347+}
3348
3349 /*
3350 * atomic_inc_and_test - increment and test
3351@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3352 * other cases.
3353 */
3354 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
3355+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
3356+{
3357+ return atomic_inc_return_unchecked(v) == 0;
3358+}
3359 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
3360
3361 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
3362@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3363 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
3364
3365 #define atomic_inc(v) atomic_add(1, v)
3366+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
3367+{
3368+ atomic_add_unchecked(1, v);
3369+}
3370 #define atomic64_inc(v) atomic64_add(1, v)
3371+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
3372+{
3373+ atomic64_add_unchecked(1, v);
3374+}
3375
3376 #define atomic_dec(v) atomic_sub(1, v)
3377+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
3378+{
3379+ atomic_sub_unchecked(1, v);
3380+}
3381 #define atomic64_dec(v) atomic64_sub(1, v)
3382+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
3383+{
3384+ atomic64_sub_unchecked(1, v);
3385+}
3386
3387 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
3388 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
3389
3390 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3391+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
3392+{
3393+ return cmpxchg(&v->counter, old, new);
3394+}
3395 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
3396+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
3397+{
3398+ return xchg(&v->counter, new);
3399+}
3400
3401 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
3402 {
3403- int c, old;
3404+ int c, old, new;
3405 c = atomic_read(v);
3406 for (;;) {
3407- if (unlikely(c == (u)))
3408+ if (unlikely(c == u))
3409 break;
3410- old = atomic_cmpxchg((v), c, c + (a));
3411+
3412+ asm volatile("addcc %2, %0, %0\n"
3413+
3414+#ifdef CONFIG_PAX_REFCOUNT
3415+ "tvs %%icc, 6\n"
3416+#endif
3417+
3418+ : "=r" (new)
3419+ : "0" (c), "ir" (a)
3420+ : "cc");
3421+
3422+ old = atomic_cmpxchg(v, c, new);
3423 if (likely(old == c))
3424 break;
3425 c = old;
3426@@ -89,20 +166,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
3427 #define atomic64_cmpxchg(v, o, n) \
3428 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
3429 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
3430+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
3431+{
3432+ return xchg(&v->counter, new);
3433+}
3434
3435 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3436 {
3437- long c, old;
3438+ long c, old, new;
3439 c = atomic64_read(v);
3440 for (;;) {
3441- if (unlikely(c == (u)))
3442+ if (unlikely(c == u))
3443 break;
3444- old = atomic64_cmpxchg((v), c, c + (a));
3445+
3446+ asm volatile("addcc %2, %0, %0\n"
3447+
3448+#ifdef CONFIG_PAX_REFCOUNT
3449+ "tvs %%xcc, 6\n"
3450+#endif
3451+
3452+ : "=r" (new)
3453+ : "0" (c), "ir" (a)
3454+ : "cc");
3455+
3456+ old = atomic64_cmpxchg(v, c, new);
3457 if (likely(old == c))
3458 break;
3459 c = old;
3460 }
3461- return c != (u);
3462+ return c != u;
3463 }
3464
3465 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3466diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
3467index 69358b5..17b4745 100644
3468--- a/arch/sparc/include/asm/cache.h
3469+++ b/arch/sparc/include/asm/cache.h
3470@@ -10,7 +10,7 @@
3471 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3472
3473 #define L1_CACHE_SHIFT 5
3474-#define L1_CACHE_BYTES 32
3475+#define L1_CACHE_BYTES 32UL
3476
3477 #ifdef CONFIG_SPARC32
3478 #define SMP_CACHE_BYTES_SHIFT 5
3479diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
3480index 4269ca6..e3da77f 100644
3481--- a/arch/sparc/include/asm/elf_32.h
3482+++ b/arch/sparc/include/asm/elf_32.h
3483@@ -114,6 +114,13 @@ typedef struct {
3484
3485 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3486
3487+#ifdef CONFIG_PAX_ASLR
3488+#define PAX_ELF_ET_DYN_BASE 0x10000UL
3489+
3490+#define PAX_DELTA_MMAP_LEN 16
3491+#define PAX_DELTA_STACK_LEN 16
3492+#endif
3493+
3494 /* This yields a mask that user programs can use to figure out what
3495 instruction set this cpu supports. This can NOT be done in userspace
3496 on Sparc. */
3497diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
3498index 7df8b7f..4946269 100644
3499--- a/arch/sparc/include/asm/elf_64.h
3500+++ b/arch/sparc/include/asm/elf_64.h
3501@@ -180,6 +180,13 @@ typedef struct {
3502 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3503 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3504
3505+#ifdef CONFIG_PAX_ASLR
3506+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3507+
3508+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3509+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3510+#endif
3511+
3512 extern unsigned long sparc64_elf_hwcap;
3513 #define ELF_HWCAP sparc64_elf_hwcap
3514
3515diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
3516index a790cc6..091ed94 100644
3517--- a/arch/sparc/include/asm/pgtable_32.h
3518+++ b/arch/sparc/include/asm/pgtable_32.h
3519@@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3520 BTFIXUPDEF_INT(page_none)
3521 BTFIXUPDEF_INT(page_copy)
3522 BTFIXUPDEF_INT(page_readonly)
3523+
3524+#ifdef CONFIG_PAX_PAGEEXEC
3525+BTFIXUPDEF_INT(page_shared_noexec)
3526+BTFIXUPDEF_INT(page_copy_noexec)
3527+BTFIXUPDEF_INT(page_readonly_noexec)
3528+#endif
3529+
3530 BTFIXUPDEF_INT(page_kernel)
3531
3532 #define PMD_SHIFT SUN4C_PMD_SHIFT
3533@@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
3534 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3535 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3536
3537+#ifdef CONFIG_PAX_PAGEEXEC
3538+extern pgprot_t PAGE_SHARED_NOEXEC;
3539+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3540+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3541+#else
3542+# define PAGE_SHARED_NOEXEC PAGE_SHARED
3543+# define PAGE_COPY_NOEXEC PAGE_COPY
3544+# define PAGE_READONLY_NOEXEC PAGE_READONLY
3545+#endif
3546+
3547 extern unsigned long page_kernel;
3548
3549 #ifdef MODULE
3550diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
3551index f6ae2b2..b03ffc7 100644
3552--- a/arch/sparc/include/asm/pgtsrmmu.h
3553+++ b/arch/sparc/include/asm/pgtsrmmu.h
3554@@ -115,6 +115,13 @@
3555 SRMMU_EXEC | SRMMU_REF)
3556 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3557 SRMMU_EXEC | SRMMU_REF)
3558+
3559+#ifdef CONFIG_PAX_PAGEEXEC
3560+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3561+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3562+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3563+#endif
3564+
3565 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3566 SRMMU_DIRTY | SRMMU_REF)
3567
3568diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
3569index 9689176..63c18ea 100644
3570--- a/arch/sparc/include/asm/spinlock_64.h
3571+++ b/arch/sparc/include/asm/spinlock_64.h
3572@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
3573
3574 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3575
3576-static void inline arch_read_lock(arch_rwlock_t *lock)
3577+static inline void arch_read_lock(arch_rwlock_t *lock)
3578 {
3579 unsigned long tmp1, tmp2;
3580
3581 __asm__ __volatile__ (
3582 "1: ldsw [%2], %0\n"
3583 " brlz,pn %0, 2f\n"
3584-"4: add %0, 1, %1\n"
3585+"4: addcc %0, 1, %1\n"
3586+
3587+#ifdef CONFIG_PAX_REFCOUNT
3588+" tvs %%icc, 6\n"
3589+#endif
3590+
3591 " cas [%2], %0, %1\n"
3592 " cmp %0, %1\n"
3593 " bne,pn %%icc, 1b\n"
3594@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
3595 " .previous"
3596 : "=&r" (tmp1), "=&r" (tmp2)
3597 : "r" (lock)
3598- : "memory");
3599+ : "memory", "cc");
3600 }
3601
3602-static int inline arch_read_trylock(arch_rwlock_t *lock)
3603+static inline int arch_read_trylock(arch_rwlock_t *lock)
3604 {
3605 int tmp1, tmp2;
3606
3607@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
3608 "1: ldsw [%2], %0\n"
3609 " brlz,a,pn %0, 2f\n"
3610 " mov 0, %0\n"
3611-" add %0, 1, %1\n"
3612+" addcc %0, 1, %1\n"
3613+
3614+#ifdef CONFIG_PAX_REFCOUNT
3615+" tvs %%icc, 6\n"
3616+#endif
3617+
3618 " cas [%2], %0, %1\n"
3619 " cmp %0, %1\n"
3620 " bne,pn %%icc, 1b\n"
3621@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
3622 return tmp1;
3623 }
3624
3625-static void inline arch_read_unlock(arch_rwlock_t *lock)
3626+static inline void arch_read_unlock(arch_rwlock_t *lock)
3627 {
3628 unsigned long tmp1, tmp2;
3629
3630 __asm__ __volatile__(
3631 "1: lduw [%2], %0\n"
3632-" sub %0, 1, %1\n"
3633+" subcc %0, 1, %1\n"
3634+
3635+#ifdef CONFIG_PAX_REFCOUNT
3636+" tvs %%icc, 6\n"
3637+#endif
3638+
3639 " cas [%2], %0, %1\n"
3640 " cmp %0, %1\n"
3641 " bne,pn %%xcc, 1b\n"
3642@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
3643 : "memory");
3644 }
3645
3646-static void inline arch_write_lock(arch_rwlock_t *lock)
3647+static inline void arch_write_lock(arch_rwlock_t *lock)
3648 {
3649 unsigned long mask, tmp1, tmp2;
3650
3651@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
3652 : "memory");
3653 }
3654
3655-static void inline arch_write_unlock(arch_rwlock_t *lock)
3656+static inline void arch_write_unlock(arch_rwlock_t *lock)
3657 {
3658 __asm__ __volatile__(
3659 " stw %%g0, [%0]"
3660@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
3661 : "memory");
3662 }
3663
3664-static int inline arch_write_trylock(arch_rwlock_t *lock)
3665+static inline int arch_write_trylock(arch_rwlock_t *lock)
3666 {
3667 unsigned long mask, tmp1, tmp2, result;
3668
3669diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
3670index fa57532..e1a4c53 100644
3671--- a/arch/sparc/include/asm/thread_info_32.h
3672+++ b/arch/sparc/include/asm/thread_info_32.h
3673@@ -50,6 +50,8 @@ struct thread_info {
3674 unsigned long w_saved;
3675
3676 struct restart_block restart_block;
3677+
3678+ unsigned long lowest_stack;
3679 };
3680
3681 /*
3682diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
3683index 60d86be..952dea1 100644
3684--- a/arch/sparc/include/asm/thread_info_64.h
3685+++ b/arch/sparc/include/asm/thread_info_64.h
3686@@ -63,6 +63,8 @@ struct thread_info {
3687 struct pt_regs *kern_una_regs;
3688 unsigned int kern_una_insn;
3689
3690+ unsigned long lowest_stack;
3691+
3692 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3693 };
3694
3695diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
3696index e88fbe5..96b0ce5 100644
3697--- a/arch/sparc/include/asm/uaccess.h
3698+++ b/arch/sparc/include/asm/uaccess.h
3699@@ -1,5 +1,13 @@
3700 #ifndef ___ASM_SPARC_UACCESS_H
3701 #define ___ASM_SPARC_UACCESS_H
3702+
3703+#ifdef __KERNEL__
3704+#ifndef __ASSEMBLY__
3705+#include <linux/types.h>
3706+extern void check_object_size(const void *ptr, unsigned long n, bool to);
3707+#endif
3708+#endif
3709+
3710 #if defined(__sparc__) && defined(__arch64__)
3711 #include <asm/uaccess_64.h>
3712 #else
3713diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
3714index 8303ac4..07f333d 100644
3715--- a/arch/sparc/include/asm/uaccess_32.h
3716+++ b/arch/sparc/include/asm/uaccess_32.h
3717@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
3718
3719 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3720 {
3721- if (n && __access_ok((unsigned long) to, n))
3722+ if ((long)n < 0)
3723+ return n;
3724+
3725+ if (n && __access_ok((unsigned long) to, n)) {
3726+ if (!__builtin_constant_p(n))
3727+ check_object_size(from, n, true);
3728 return __copy_user(to, (__force void __user *) from, n);
3729- else
3730+ } else
3731 return n;
3732 }
3733
3734 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3735 {
3736+ if ((long)n < 0)
3737+ return n;
3738+
3739+ if (!__builtin_constant_p(n))
3740+ check_object_size(from, n, true);
3741+
3742 return __copy_user(to, (__force void __user *) from, n);
3743 }
3744
3745 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3746 {
3747- if (n && __access_ok((unsigned long) from, n))
3748+ if ((long)n < 0)
3749+ return n;
3750+
3751+ if (n && __access_ok((unsigned long) from, n)) {
3752+ if (!__builtin_constant_p(n))
3753+ check_object_size(to, n, false);
3754 return __copy_user((__force void __user *) to, from, n);
3755- else
3756+ } else
3757 return n;
3758 }
3759
3760 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3761 {
3762+ if ((long)n < 0)
3763+ return n;
3764+
3765 return __copy_user((__force void __user *) to, from, n);
3766 }
3767
3768diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
3769index 3e1449f..5293a0e 100644
3770--- a/arch/sparc/include/asm/uaccess_64.h
3771+++ b/arch/sparc/include/asm/uaccess_64.h
3772@@ -10,6 +10,7 @@
3773 #include <linux/compiler.h>
3774 #include <linux/string.h>
3775 #include <linux/thread_info.h>
3776+#include <linux/kernel.h>
3777 #include <asm/asi.h>
3778 #include <asm/system.h>
3779 #include <asm/spitfire.h>
3780@@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
3781 static inline unsigned long __must_check
3782 copy_from_user(void *to, const void __user *from, unsigned long size)
3783 {
3784- unsigned long ret = ___copy_from_user(to, from, size);
3785+ unsigned long ret;
3786
3787+ if ((long)size < 0 || size > INT_MAX)
3788+ return size;
3789+
3790+ if (!__builtin_constant_p(size))
3791+ check_object_size(to, size, false);
3792+
3793+ ret = ___copy_from_user(to, from, size);
3794 if (unlikely(ret))
3795 ret = copy_from_user_fixup(to, from, size);
3796
3797@@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
3798 static inline unsigned long __must_check
3799 copy_to_user(void __user *to, const void *from, unsigned long size)
3800 {
3801- unsigned long ret = ___copy_to_user(to, from, size);
3802+ unsigned long ret;
3803
3804+ if ((long)size < 0 || size > INT_MAX)
3805+ return size;
3806+
3807+ if (!__builtin_constant_p(size))
3808+ check_object_size(from, size, true);
3809+
3810+ ret = ___copy_to_user(to, from, size);
3811 if (unlikely(ret))
3812 ret = copy_to_user_fixup(to, from, size);
3813 return ret;
3814diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
3815index cb85458..e063f17 100644
3816--- a/arch/sparc/kernel/Makefile
3817+++ b/arch/sparc/kernel/Makefile
3818@@ -3,7 +3,7 @@
3819 #
3820
3821 asflags-y := -ansi
3822-ccflags-y := -Werror
3823+#ccflags-y := -Werror
3824
3825 extra-y := head_$(BITS).o
3826 extra-y += init_task.o
3827diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
3828index f793742..4d880af 100644
3829--- a/arch/sparc/kernel/process_32.c
3830+++ b/arch/sparc/kernel/process_32.c
3831@@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
3832 rw->ins[4], rw->ins[5],
3833 rw->ins[6],
3834 rw->ins[7]);
3835- printk("%pS\n", (void *) rw->ins[7]);
3836+ printk("%pA\n", (void *) rw->ins[7]);
3837 rw = (struct reg_window32 *) rw->ins[6];
3838 }
3839 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
3840@@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
3841
3842 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
3843 r->psr, r->pc, r->npc, r->y, print_tainted());
3844- printk("PC: <%pS>\n", (void *) r->pc);
3845+ printk("PC: <%pA>\n", (void *) r->pc);
3846 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3847 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
3848 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
3849 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3850 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
3851 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
3852- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
3853+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
3854
3855 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3856 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
3857@@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
3858 rw = (struct reg_window32 *) fp;
3859 pc = rw->ins[7];
3860 printk("[%08lx : ", pc);
3861- printk("%pS ] ", (void *) pc);
3862+ printk("%pA ] ", (void *) pc);
3863 fp = rw->ins[6];
3864 } while (++count < 16);
3865 printk("\n");
3866diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
3867index 3739a06..48b2ff0 100644
3868--- a/arch/sparc/kernel/process_64.c
3869+++ b/arch/sparc/kernel/process_64.c
3870@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs)
3871 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
3872 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
3873 if (regs->tstate & TSTATE_PRIV)
3874- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
3875+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
3876 }
3877
3878 void show_regs(struct pt_regs *regs)
3879 {
3880 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
3881 regs->tpc, regs->tnpc, regs->y, print_tainted());
3882- printk("TPC: <%pS>\n", (void *) regs->tpc);
3883+ printk("TPC: <%pA>\n", (void *) regs->tpc);
3884 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
3885 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
3886 regs->u_regs[3]);
3887@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
3888 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
3889 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
3890 regs->u_regs[15]);
3891- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
3892+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
3893 show_regwindow(regs);
3894 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
3895 }
3896@@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void)
3897 ((tp && tp->task) ? tp->task->pid : -1));
3898
3899 if (gp->tstate & TSTATE_PRIV) {
3900- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
3901+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
3902 (void *) gp->tpc,
3903 (void *) gp->o7,
3904 (void *) gp->i7,
3905diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
3906index 42b282f..28ce9f2 100644
3907--- a/arch/sparc/kernel/sys_sparc_32.c
3908+++ b/arch/sparc/kernel/sys_sparc_32.c
3909@@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3910 if (ARCH_SUN4C && len > 0x20000000)
3911 return -ENOMEM;
3912 if (!addr)
3913- addr = TASK_UNMAPPED_BASE;
3914+ addr = current->mm->mmap_base;
3915
3916 if (flags & MAP_SHARED)
3917 addr = COLOUR_ALIGN(addr);
3918@@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3919 }
3920 if (TASK_SIZE - PAGE_SIZE - len < addr)
3921 return -ENOMEM;
3922- if (!vmm || addr + len <= vmm->vm_start)
3923+ if (check_heap_stack_gap(vmm, addr, len))
3924 return addr;
3925 addr = vmm->vm_end;
3926 if (flags & MAP_SHARED)
3927diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
3928index 441521a..b767073 100644
3929--- a/arch/sparc/kernel/sys_sparc_64.c
3930+++ b/arch/sparc/kernel/sys_sparc_64.c
3931@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3932 /* We do not accept a shared mapping if it would violate
3933 * cache aliasing constraints.
3934 */
3935- if ((flags & MAP_SHARED) &&
3936+ if ((filp || (flags & MAP_SHARED)) &&
3937 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3938 return -EINVAL;
3939 return addr;
3940@@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3941 if (filp || (flags & MAP_SHARED))
3942 do_color_align = 1;
3943
3944+#ifdef CONFIG_PAX_RANDMMAP
3945+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3946+#endif
3947+
3948 if (addr) {
3949 if (do_color_align)
3950 addr = COLOUR_ALIGN(addr, pgoff);
3951@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3952 addr = PAGE_ALIGN(addr);
3953
3954 vma = find_vma(mm, addr);
3955- if (task_size - len >= addr &&
3956- (!vma || addr + len <= vma->vm_start))
3957+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3958 return addr;
3959 }
3960
3961 if (len > mm->cached_hole_size) {
3962- start_addr = addr = mm->free_area_cache;
3963+ start_addr = addr = mm->free_area_cache;
3964 } else {
3965- start_addr = addr = TASK_UNMAPPED_BASE;
3966+ start_addr = addr = mm->mmap_base;
3967 mm->cached_hole_size = 0;
3968 }
3969
3970@@ -174,14 +177,14 @@ full_search:
3971 vma = find_vma(mm, VA_EXCLUDE_END);
3972 }
3973 if (unlikely(task_size < addr)) {
3974- if (start_addr != TASK_UNMAPPED_BASE) {
3975- start_addr = addr = TASK_UNMAPPED_BASE;
3976+ if (start_addr != mm->mmap_base) {
3977+ start_addr = addr = mm->mmap_base;
3978 mm->cached_hole_size = 0;
3979 goto full_search;
3980 }
3981 return -ENOMEM;
3982 }
3983- if (likely(!vma || addr + len <= vma->vm_start)) {
3984+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3985 /*
3986 * Remember the place where we stopped the search:
3987 */
3988@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3989 /* We do not accept a shared mapping if it would violate
3990 * cache aliasing constraints.
3991 */
3992- if ((flags & MAP_SHARED) &&
3993+ if ((filp || (flags & MAP_SHARED)) &&
3994 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3995 return -EINVAL;
3996 return addr;
3997@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3998 addr = PAGE_ALIGN(addr);
3999
4000 vma = find_vma(mm, addr);
4001- if (task_size - len >= addr &&
4002- (!vma || addr + len <= vma->vm_start))
4003+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4004 return addr;
4005 }
4006
4007@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4008 /* make sure it can fit in the remaining address space */
4009 if (likely(addr > len)) {
4010 vma = find_vma(mm, addr-len);
4011- if (!vma || addr <= vma->vm_start) {
4012+ if (check_heap_stack_gap(vma, addr - len, len)) {
4013 /* remember the address as a hint for next time */
4014 return (mm->free_area_cache = addr-len);
4015 }
4016@@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4017 if (unlikely(mm->mmap_base < len))
4018 goto bottomup;
4019
4020- addr = mm->mmap_base-len;
4021- if (do_color_align)
4022- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4023+ addr = mm->mmap_base - len;
4024
4025 do {
4026+ if (do_color_align)
4027+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4028 /*
4029 * Lookup failure means no vma is above this address,
4030 * else if new region fits below vma->vm_start,
4031 * return with success:
4032 */
4033 vma = find_vma(mm, addr);
4034- if (likely(!vma || addr+len <= vma->vm_start)) {
4035+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4036 /* remember the address as a hint for next time */
4037 return (mm->free_area_cache = addr);
4038 }
4039@@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4040 mm->cached_hole_size = vma->vm_start - addr;
4041
4042 /* try just below the current vma->vm_start */
4043- addr = vma->vm_start-len;
4044- if (do_color_align)
4045- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4046- } while (likely(len < vma->vm_start));
4047+ addr = skip_heap_stack_gap(vma, len);
4048+ } while (!IS_ERR_VALUE(addr));
4049
4050 bottomup:
4051 /*
4052@@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4053 gap == RLIM_INFINITY ||
4054 sysctl_legacy_va_layout) {
4055 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4056+
4057+#ifdef CONFIG_PAX_RANDMMAP
4058+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4059+ mm->mmap_base += mm->delta_mmap;
4060+#endif
4061+
4062 mm->get_unmapped_area = arch_get_unmapped_area;
4063 mm->unmap_area = arch_unmap_area;
4064 } else {
4065@@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4066 gap = (task_size / 6 * 5);
4067
4068 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
4069+
4070+#ifdef CONFIG_PAX_RANDMMAP
4071+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4072+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4073+#endif
4074+
4075 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4076 mm->unmap_area = arch_unmap_area_topdown;
4077 }
4078diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
4079index 591f20c..0f1b925 100644
4080--- a/arch/sparc/kernel/traps_32.c
4081+++ b/arch/sparc/kernel/traps_32.c
4082@@ -45,6 +45,8 @@ static void instruction_dump(unsigned long *pc)
4083 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
4084 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
4085
4086+extern void gr_handle_kernel_exploit(void);
4087+
4088 void die_if_kernel(char *str, struct pt_regs *regs)
4089 {
4090 static int die_counter;
4091@@ -77,15 +79,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4092 count++ < 30 &&
4093 (((unsigned long) rw) >= PAGE_OFFSET) &&
4094 !(((unsigned long) rw) & 0x7)) {
4095- printk("Caller[%08lx]: %pS\n", rw->ins[7],
4096+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
4097 (void *) rw->ins[7]);
4098 rw = (struct reg_window32 *)rw->ins[6];
4099 }
4100 }
4101 printk("Instruction DUMP:");
4102 instruction_dump ((unsigned long *) regs->pc);
4103- if(regs->psr & PSR_PS)
4104+ if(regs->psr & PSR_PS) {
4105+ gr_handle_kernel_exploit();
4106 do_exit(SIGKILL);
4107+ }
4108 do_exit(SIGSEGV);
4109 }
4110
4111diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
4112index 0cbdaa4..438e4c9 100644
4113--- a/arch/sparc/kernel/traps_64.c
4114+++ b/arch/sparc/kernel/traps_64.c
4115@@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
4116 i + 1,
4117 p->trapstack[i].tstate, p->trapstack[i].tpc,
4118 p->trapstack[i].tnpc, p->trapstack[i].tt);
4119- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
4120+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
4121 }
4122 }
4123
4124@@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
4125
4126 lvl -= 0x100;
4127 if (regs->tstate & TSTATE_PRIV) {
4128+
4129+#ifdef CONFIG_PAX_REFCOUNT
4130+ if (lvl == 6)
4131+ pax_report_refcount_overflow(regs);
4132+#endif
4133+
4134 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
4135 die_if_kernel(buffer, regs);
4136 }
4137@@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
4138 void bad_trap_tl1(struct pt_regs *regs, long lvl)
4139 {
4140 char buffer[32];
4141-
4142+
4143 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
4144 0, lvl, SIGTRAP) == NOTIFY_STOP)
4145 return;
4146
4147+#ifdef CONFIG_PAX_REFCOUNT
4148+ if (lvl == 6)
4149+ pax_report_refcount_overflow(regs);
4150+#endif
4151+
4152 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
4153
4154 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
4155@@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
4156 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
4157 printk("%s" "ERROR(%d): ",
4158 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
4159- printk("TPC<%pS>\n", (void *) regs->tpc);
4160+ printk("TPC<%pA>\n", (void *) regs->tpc);
4161 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
4162 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
4163 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
4164@@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
4165 smp_processor_id(),
4166 (type & 0x1) ? 'I' : 'D',
4167 regs->tpc);
4168- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
4169+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
4170 panic("Irrecoverable Cheetah+ parity error.");
4171 }
4172
4173@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
4174 smp_processor_id(),
4175 (type & 0x1) ? 'I' : 'D',
4176 regs->tpc);
4177- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
4178+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
4179 }
4180
4181 struct sun4v_error_entry {
4182@@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
4183
4184 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
4185 regs->tpc, tl);
4186- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
4187+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
4188 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4189- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
4190+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
4191 (void *) regs->u_regs[UREG_I7]);
4192 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
4193 "pte[%lx] error[%lx]\n",
4194@@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
4195
4196 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
4197 regs->tpc, tl);
4198- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
4199+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
4200 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4201- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
4202+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
4203 (void *) regs->u_regs[UREG_I7]);
4204 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
4205 "pte[%lx] error[%lx]\n",
4206@@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
4207 fp = (unsigned long)sf->fp + STACK_BIAS;
4208 }
4209
4210- printk(" [%016lx] %pS\n", pc, (void *) pc);
4211+ printk(" [%016lx] %pA\n", pc, (void *) pc);
4212 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4213 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
4214 int index = tsk->curr_ret_stack;
4215 if (tsk->ret_stack && index >= graph) {
4216 pc = tsk->ret_stack[index - graph].ret;
4217- printk(" [%016lx] %pS\n", pc, (void *) pc);
4218+ printk(" [%016lx] %pA\n", pc, (void *) pc);
4219 graph++;
4220 }
4221 }
4222@@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
4223 return (struct reg_window *) (fp + STACK_BIAS);
4224 }
4225
4226+extern void gr_handle_kernel_exploit(void);
4227+
4228 void die_if_kernel(char *str, struct pt_regs *regs)
4229 {
4230 static int die_counter;
4231@@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4232 while (rw &&
4233 count++ < 30 &&
4234 kstack_valid(tp, (unsigned long) rw)) {
4235- printk("Caller[%016lx]: %pS\n", rw->ins[7],
4236+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
4237 (void *) rw->ins[7]);
4238
4239 rw = kernel_stack_up(rw);
4240@@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4241 }
4242 user_instruction_dump ((unsigned int __user *) regs->tpc);
4243 }
4244- if (regs->tstate & TSTATE_PRIV)
4245+ if (regs->tstate & TSTATE_PRIV) {
4246+ gr_handle_kernel_exploit();
4247 do_exit(SIGKILL);
4248+ }
4249 do_exit(SIGSEGV);
4250 }
4251 EXPORT_SYMBOL(die_if_kernel);
4252diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
4253index 76e4ac1..78f8bb1 100644
4254--- a/arch/sparc/kernel/unaligned_64.c
4255+++ b/arch/sparc/kernel/unaligned_64.c
4256@@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs *regs)
4257 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
4258
4259 if (__ratelimit(&ratelimit)) {
4260- printk("Kernel unaligned access at TPC[%lx] %pS\n",
4261+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
4262 regs->tpc, (void *) regs->tpc);
4263 }
4264 }
4265diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
4266index a3fc437..fea9957 100644
4267--- a/arch/sparc/lib/Makefile
4268+++ b/arch/sparc/lib/Makefile
4269@@ -2,7 +2,7 @@
4270 #
4271
4272 asflags-y := -ansi -DST_DIV0=0x02
4273-ccflags-y := -Werror
4274+#ccflags-y := -Werror
4275
4276 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4277 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4278diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
4279index 59186e0..f747d7a 100644
4280--- a/arch/sparc/lib/atomic_64.S
4281+++ b/arch/sparc/lib/atomic_64.S
4282@@ -18,7 +18,12 @@
4283 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4284 BACKOFF_SETUP(%o2)
4285 1: lduw [%o1], %g1
4286- add %g1, %o0, %g7
4287+ addcc %g1, %o0, %g7
4288+
4289+#ifdef CONFIG_PAX_REFCOUNT
4290+ tvs %icc, 6
4291+#endif
4292+
4293 cas [%o1], %g1, %g7
4294 cmp %g1, %g7
4295 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4296@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4297 2: BACKOFF_SPIN(%o2, %o3, 1b)
4298 .size atomic_add, .-atomic_add
4299
4300+ .globl atomic_add_unchecked
4301+ .type atomic_add_unchecked,#function
4302+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4303+ BACKOFF_SETUP(%o2)
4304+1: lduw [%o1], %g1
4305+ add %g1, %o0, %g7
4306+ cas [%o1], %g1, %g7
4307+ cmp %g1, %g7
4308+ bne,pn %icc, 2f
4309+ nop
4310+ retl
4311+ nop
4312+2: BACKOFF_SPIN(%o2, %o3, 1b)
4313+ .size atomic_add_unchecked, .-atomic_add_unchecked
4314+
4315 .globl atomic_sub
4316 .type atomic_sub,#function
4317 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4318 BACKOFF_SETUP(%o2)
4319 1: lduw [%o1], %g1
4320- sub %g1, %o0, %g7
4321+ subcc %g1, %o0, %g7
4322+
4323+#ifdef CONFIG_PAX_REFCOUNT
4324+ tvs %icc, 6
4325+#endif
4326+
4327 cas [%o1], %g1, %g7
4328 cmp %g1, %g7
4329 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4330@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4331 2: BACKOFF_SPIN(%o2, %o3, 1b)
4332 .size atomic_sub, .-atomic_sub
4333
4334+ .globl atomic_sub_unchecked
4335+ .type atomic_sub_unchecked,#function
4336+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4337+ BACKOFF_SETUP(%o2)
4338+1: lduw [%o1], %g1
4339+ sub %g1, %o0, %g7
4340+ cas [%o1], %g1, %g7
4341+ cmp %g1, %g7
4342+ bne,pn %icc, 2f
4343+ nop
4344+ retl
4345+ nop
4346+2: BACKOFF_SPIN(%o2, %o3, 1b)
4347+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
4348+
4349 .globl atomic_add_ret
4350 .type atomic_add_ret,#function
4351 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4352 BACKOFF_SETUP(%o2)
4353 1: lduw [%o1], %g1
4354- add %g1, %o0, %g7
4355+ addcc %g1, %o0, %g7
4356+
4357+#ifdef CONFIG_PAX_REFCOUNT
4358+ tvs %icc, 6
4359+#endif
4360+
4361 cas [%o1], %g1, %g7
4362 cmp %g1, %g7
4363 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4364@@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4365 2: BACKOFF_SPIN(%o2, %o3, 1b)
4366 .size atomic_add_ret, .-atomic_add_ret
4367
4368+ .globl atomic_add_ret_unchecked
4369+ .type atomic_add_ret_unchecked,#function
4370+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4371+ BACKOFF_SETUP(%o2)
4372+1: lduw [%o1], %g1
4373+ addcc %g1, %o0, %g7
4374+ cas [%o1], %g1, %g7
4375+ cmp %g1, %g7
4376+ bne,pn %icc, 2f
4377+ add %g7, %o0, %g7
4378+ sra %g7, 0, %o0
4379+ retl
4380+ nop
4381+2: BACKOFF_SPIN(%o2, %o3, 1b)
4382+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
4383+
4384 .globl atomic_sub_ret
4385 .type atomic_sub_ret,#function
4386 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4387 BACKOFF_SETUP(%o2)
4388 1: lduw [%o1], %g1
4389- sub %g1, %o0, %g7
4390+ subcc %g1, %o0, %g7
4391+
4392+#ifdef CONFIG_PAX_REFCOUNT
4393+ tvs %icc, 6
4394+#endif
4395+
4396 cas [%o1], %g1, %g7
4397 cmp %g1, %g7
4398 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4399@@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4400 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4401 BACKOFF_SETUP(%o2)
4402 1: ldx [%o1], %g1
4403- add %g1, %o0, %g7
4404+ addcc %g1, %o0, %g7
4405+
4406+#ifdef CONFIG_PAX_REFCOUNT
4407+ tvs %xcc, 6
4408+#endif
4409+
4410 casx [%o1], %g1, %g7
4411 cmp %g1, %g7
4412 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4413@@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4414 2: BACKOFF_SPIN(%o2, %o3, 1b)
4415 .size atomic64_add, .-atomic64_add
4416
4417+ .globl atomic64_add_unchecked
4418+ .type atomic64_add_unchecked,#function
4419+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4420+ BACKOFF_SETUP(%o2)
4421+1: ldx [%o1], %g1
4422+ addcc %g1, %o0, %g7
4423+ casx [%o1], %g1, %g7
4424+ cmp %g1, %g7
4425+ bne,pn %xcc, 2f
4426+ nop
4427+ retl
4428+ nop
4429+2: BACKOFF_SPIN(%o2, %o3, 1b)
4430+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
4431+
4432 .globl atomic64_sub
4433 .type atomic64_sub,#function
4434 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4435 BACKOFF_SETUP(%o2)
4436 1: ldx [%o1], %g1
4437- sub %g1, %o0, %g7
4438+ subcc %g1, %o0, %g7
4439+
4440+#ifdef CONFIG_PAX_REFCOUNT
4441+ tvs %xcc, 6
4442+#endif
4443+
4444 casx [%o1], %g1, %g7
4445 cmp %g1, %g7
4446 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4447@@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4448 2: BACKOFF_SPIN(%o2, %o3, 1b)
4449 .size atomic64_sub, .-atomic64_sub
4450
4451+ .globl atomic64_sub_unchecked
4452+ .type atomic64_sub_unchecked,#function
4453+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4454+ BACKOFF_SETUP(%o2)
4455+1: ldx [%o1], %g1
4456+ subcc %g1, %o0, %g7
4457+ casx [%o1], %g1, %g7
4458+ cmp %g1, %g7
4459+ bne,pn %xcc, 2f
4460+ nop
4461+ retl
4462+ nop
4463+2: BACKOFF_SPIN(%o2, %o3, 1b)
4464+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
4465+
4466 .globl atomic64_add_ret
4467 .type atomic64_add_ret,#function
4468 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4469 BACKOFF_SETUP(%o2)
4470 1: ldx [%o1], %g1
4471- add %g1, %o0, %g7
4472+ addcc %g1, %o0, %g7
4473+
4474+#ifdef CONFIG_PAX_REFCOUNT
4475+ tvs %xcc, 6
4476+#endif
4477+
4478 casx [%o1], %g1, %g7
4479 cmp %g1, %g7
4480 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4481@@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4482 2: BACKOFF_SPIN(%o2, %o3, 1b)
4483 .size atomic64_add_ret, .-atomic64_add_ret
4484
4485+ .globl atomic64_add_ret_unchecked
4486+ .type atomic64_add_ret_unchecked,#function
4487+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4488+ BACKOFF_SETUP(%o2)
4489+1: ldx [%o1], %g1
4490+ addcc %g1, %o0, %g7
4491+ casx [%o1], %g1, %g7
4492+ cmp %g1, %g7
4493+ bne,pn %xcc, 2f
4494+ add %g7, %o0, %g7
4495+ mov %g7, %o0
4496+ retl
4497+ nop
4498+2: BACKOFF_SPIN(%o2, %o3, 1b)
4499+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4500+
4501 .globl atomic64_sub_ret
4502 .type atomic64_sub_ret,#function
4503 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4504 BACKOFF_SETUP(%o2)
4505 1: ldx [%o1], %g1
4506- sub %g1, %o0, %g7
4507+ subcc %g1, %o0, %g7
4508+
4509+#ifdef CONFIG_PAX_REFCOUNT
4510+ tvs %xcc, 6
4511+#endif
4512+
4513 casx [%o1], %g1, %g7
4514 cmp %g1, %g7
4515 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4516diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
4517index 1b30bb3..b4a16c7 100644
4518--- a/arch/sparc/lib/ksyms.c
4519+++ b/arch/sparc/lib/ksyms.c
4520@@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write);
4521
4522 /* Atomic counter implementation. */
4523 EXPORT_SYMBOL(atomic_add);
4524+EXPORT_SYMBOL(atomic_add_unchecked);
4525 EXPORT_SYMBOL(atomic_add_ret);
4526+EXPORT_SYMBOL(atomic_add_ret_unchecked);
4527 EXPORT_SYMBOL(atomic_sub);
4528+EXPORT_SYMBOL(atomic_sub_unchecked);
4529 EXPORT_SYMBOL(atomic_sub_ret);
4530 EXPORT_SYMBOL(atomic64_add);
4531+EXPORT_SYMBOL(atomic64_add_unchecked);
4532 EXPORT_SYMBOL(atomic64_add_ret);
4533+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4534 EXPORT_SYMBOL(atomic64_sub);
4535+EXPORT_SYMBOL(atomic64_sub_unchecked);
4536 EXPORT_SYMBOL(atomic64_sub_ret);
4537
4538 /* Atomic bit operations. */
4539diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
4540index 301421c..e2535d1 100644
4541--- a/arch/sparc/mm/Makefile
4542+++ b/arch/sparc/mm/Makefile
4543@@ -2,7 +2,7 @@
4544 #
4545
4546 asflags-y := -ansi
4547-ccflags-y := -Werror
4548+#ccflags-y := -Werror
4549
4550 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
4551 obj-y += fault_$(BITS).o
4552diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
4553index 8023fd7..c8e89e9 100644
4554--- a/arch/sparc/mm/fault_32.c
4555+++ b/arch/sparc/mm/fault_32.c
4556@@ -21,6 +21,9 @@
4557 #include <linux/perf_event.h>
4558 #include <linux/interrupt.h>
4559 #include <linux/kdebug.h>
4560+#include <linux/slab.h>
4561+#include <linux/pagemap.h>
4562+#include <linux/compiler.h>
4563
4564 #include <asm/system.h>
4565 #include <asm/page.h>
4566@@ -208,6 +211,268 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
4567 return safe_compute_effective_address(regs, insn);
4568 }
4569
4570+#ifdef CONFIG_PAX_PAGEEXEC
4571+#ifdef CONFIG_PAX_DLRESOLVE
4572+static void pax_emuplt_close(struct vm_area_struct *vma)
4573+{
4574+ vma->vm_mm->call_dl_resolve = 0UL;
4575+}
4576+
4577+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4578+{
4579+ unsigned int *kaddr;
4580+
4581+ vmf->page = alloc_page(GFP_HIGHUSER);
4582+ if (!vmf->page)
4583+ return VM_FAULT_OOM;
4584+
4585+ kaddr = kmap(vmf->page);
4586+ memset(kaddr, 0, PAGE_SIZE);
4587+ kaddr[0] = 0x9DE3BFA8U; /* save */
4588+ flush_dcache_page(vmf->page);
4589+ kunmap(vmf->page);
4590+ return VM_FAULT_MAJOR;
4591+}
4592+
4593+static const struct vm_operations_struct pax_vm_ops = {
4594+ .close = pax_emuplt_close,
4595+ .fault = pax_emuplt_fault
4596+};
4597+
4598+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4599+{
4600+ int ret;
4601+
4602+ INIT_LIST_HEAD(&vma->anon_vma_chain);
4603+ vma->vm_mm = current->mm;
4604+ vma->vm_start = addr;
4605+ vma->vm_end = addr + PAGE_SIZE;
4606+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4607+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4608+ vma->vm_ops = &pax_vm_ops;
4609+
4610+ ret = insert_vm_struct(current->mm, vma);
4611+ if (ret)
4612+ return ret;
4613+
4614+ ++current->mm->total_vm;
4615+ return 0;
4616+}
4617+#endif
4618+
4619+/*
4620+ * PaX: decide what to do with offenders (regs->pc = fault address)
4621+ *
4622+ * returns 1 when task should be killed
4623+ * 2 when patched PLT trampoline was detected
4624+ * 3 when unpatched PLT trampoline was detected
4625+ */
4626+static int pax_handle_fetch_fault(struct pt_regs *regs)
4627+{
4628+
4629+#ifdef CONFIG_PAX_EMUPLT
4630+ int err;
4631+
4632+ do { /* PaX: patched PLT emulation #1 */
4633+ unsigned int sethi1, sethi2, jmpl;
4634+
4635+ err = get_user(sethi1, (unsigned int *)regs->pc);
4636+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4637+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4638+
4639+ if (err)
4640+ break;
4641+
4642+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4643+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4644+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4645+ {
4646+ unsigned int addr;
4647+
4648+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4649+ addr = regs->u_regs[UREG_G1];
4650+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4651+ regs->pc = addr;
4652+ regs->npc = addr+4;
4653+ return 2;
4654+ }
4655+ } while (0);
4656+
4657+ { /* PaX: patched PLT emulation #2 */
4658+ unsigned int ba;
4659+
4660+ err = get_user(ba, (unsigned int *)regs->pc);
4661+
4662+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4663+ unsigned int addr;
4664+
4665+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4666+ regs->pc = addr;
4667+ regs->npc = addr+4;
4668+ return 2;
4669+ }
4670+ }
4671+
4672+ do { /* PaX: patched PLT emulation #3 */
4673+ unsigned int sethi, jmpl, nop;
4674+
4675+ err = get_user(sethi, (unsigned int *)regs->pc);
4676+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4677+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4678+
4679+ if (err)
4680+ break;
4681+
4682+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4683+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4684+ nop == 0x01000000U)
4685+ {
4686+ unsigned int addr;
4687+
4688+ addr = (sethi & 0x003FFFFFU) << 10;
4689+ regs->u_regs[UREG_G1] = addr;
4690+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4691+ regs->pc = addr;
4692+ regs->npc = addr+4;
4693+ return 2;
4694+ }
4695+ } while (0);
4696+
4697+ do { /* PaX: unpatched PLT emulation step 1 */
4698+ unsigned int sethi, ba, nop;
4699+
4700+ err = get_user(sethi, (unsigned int *)regs->pc);
4701+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
4702+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4703+
4704+ if (err)
4705+ break;
4706+
4707+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4708+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4709+ nop == 0x01000000U)
4710+ {
4711+ unsigned int addr, save, call;
4712+
4713+ if ((ba & 0xFFC00000U) == 0x30800000U)
4714+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4715+ else
4716+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4717+
4718+ err = get_user(save, (unsigned int *)addr);
4719+ err |= get_user(call, (unsigned int *)(addr+4));
4720+ err |= get_user(nop, (unsigned int *)(addr+8));
4721+ if (err)
4722+ break;
4723+
4724+#ifdef CONFIG_PAX_DLRESOLVE
4725+ if (save == 0x9DE3BFA8U &&
4726+ (call & 0xC0000000U) == 0x40000000U &&
4727+ nop == 0x01000000U)
4728+ {
4729+ struct vm_area_struct *vma;
4730+ unsigned long call_dl_resolve;
4731+
4732+ down_read(&current->mm->mmap_sem);
4733+ call_dl_resolve = current->mm->call_dl_resolve;
4734+ up_read(&current->mm->mmap_sem);
4735+ if (likely(call_dl_resolve))
4736+ goto emulate;
4737+
4738+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4739+
4740+ down_write(&current->mm->mmap_sem);
4741+ if (current->mm->call_dl_resolve) {
4742+ call_dl_resolve = current->mm->call_dl_resolve;
4743+ up_write(&current->mm->mmap_sem);
4744+ if (vma)
4745+ kmem_cache_free(vm_area_cachep, vma);
4746+ goto emulate;
4747+ }
4748+
4749+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4750+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4751+ up_write(&current->mm->mmap_sem);
4752+ if (vma)
4753+ kmem_cache_free(vm_area_cachep, vma);
4754+ return 1;
4755+ }
4756+
4757+ if (pax_insert_vma(vma, call_dl_resolve)) {
4758+ up_write(&current->mm->mmap_sem);
4759+ kmem_cache_free(vm_area_cachep, vma);
4760+ return 1;
4761+ }
4762+
4763+ current->mm->call_dl_resolve = call_dl_resolve;
4764+ up_write(&current->mm->mmap_sem);
4765+
4766+emulate:
4767+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4768+ regs->pc = call_dl_resolve;
4769+ regs->npc = addr+4;
4770+ return 3;
4771+ }
4772+#endif
4773+
4774+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4775+ if ((save & 0xFFC00000U) == 0x05000000U &&
4776+ (call & 0xFFFFE000U) == 0x85C0A000U &&
4777+ nop == 0x01000000U)
4778+ {
4779+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4780+ regs->u_regs[UREG_G2] = addr + 4;
4781+ addr = (save & 0x003FFFFFU) << 10;
4782+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4783+ regs->pc = addr;
4784+ regs->npc = addr+4;
4785+ return 3;
4786+ }
4787+ }
4788+ } while (0);
4789+
4790+ do { /* PaX: unpatched PLT emulation step 2 */
4791+ unsigned int save, call, nop;
4792+
4793+ err = get_user(save, (unsigned int *)(regs->pc-4));
4794+ err |= get_user(call, (unsigned int *)regs->pc);
4795+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
4796+ if (err)
4797+ break;
4798+
4799+ if (save == 0x9DE3BFA8U &&
4800+ (call & 0xC0000000U) == 0x40000000U &&
4801+ nop == 0x01000000U)
4802+ {
4803+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
4804+
4805+ regs->u_regs[UREG_RETPC] = regs->pc;
4806+ regs->pc = dl_resolve;
4807+ regs->npc = dl_resolve+4;
4808+ return 3;
4809+ }
4810+ } while (0);
4811+#endif
4812+
4813+ return 1;
4814+}
4815+
4816+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4817+{
4818+ unsigned long i;
4819+
4820+ printk(KERN_ERR "PAX: bytes at PC: ");
4821+ for (i = 0; i < 8; i++) {
4822+ unsigned int c;
4823+ if (get_user(c, (unsigned int *)pc+i))
4824+ printk(KERN_CONT "???????? ");
4825+ else
4826+ printk(KERN_CONT "%08x ", c);
4827+ }
4828+ printk("\n");
4829+}
4830+#endif
4831+
4832 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
4833 int text_fault)
4834 {
4835@@ -280,6 +545,24 @@ good_area:
4836 if(!(vma->vm_flags & VM_WRITE))
4837 goto bad_area;
4838 } else {
4839+
4840+#ifdef CONFIG_PAX_PAGEEXEC
4841+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
4842+ up_read(&mm->mmap_sem);
4843+ switch (pax_handle_fetch_fault(regs)) {
4844+
4845+#ifdef CONFIG_PAX_EMUPLT
4846+ case 2:
4847+ case 3:
4848+ return;
4849+#endif
4850+
4851+ }
4852+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
4853+ do_group_exit(SIGKILL);
4854+ }
4855+#endif
4856+
4857 /* Allow reads even for write-only mappings */
4858 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
4859 goto bad_area;
4860diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
4861index 504c062..6fcb9c6 100644
4862--- a/arch/sparc/mm/fault_64.c
4863+++ b/arch/sparc/mm/fault_64.c
4864@@ -21,6 +21,9 @@
4865 #include <linux/kprobes.h>
4866 #include <linux/kdebug.h>
4867 #include <linux/percpu.h>
4868+#include <linux/slab.h>
4869+#include <linux/pagemap.h>
4870+#include <linux/compiler.h>
4871
4872 #include <asm/page.h>
4873 #include <asm/pgtable.h>
4874@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
4875 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
4876 regs->tpc);
4877 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
4878- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
4879+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
4880 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
4881 dump_stack();
4882 unhandled_fault(regs->tpc, current, regs);
4883@@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
4884 show_regs(regs);
4885 }
4886
4887+#ifdef CONFIG_PAX_PAGEEXEC
4888+#ifdef CONFIG_PAX_DLRESOLVE
4889+static void pax_emuplt_close(struct vm_area_struct *vma)
4890+{
4891+ vma->vm_mm->call_dl_resolve = 0UL;
4892+}
4893+
4894+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4895+{
4896+ unsigned int *kaddr;
4897+
4898+ vmf->page = alloc_page(GFP_HIGHUSER);
4899+ if (!vmf->page)
4900+ return VM_FAULT_OOM;
4901+
4902+ kaddr = kmap(vmf->page);
4903+ memset(kaddr, 0, PAGE_SIZE);
4904+ kaddr[0] = 0x9DE3BFA8U; /* save */
4905+ flush_dcache_page(vmf->page);
4906+ kunmap(vmf->page);
4907+ return VM_FAULT_MAJOR;
4908+}
4909+
4910+static const struct vm_operations_struct pax_vm_ops = {
4911+ .close = pax_emuplt_close,
4912+ .fault = pax_emuplt_fault
4913+};
4914+
4915+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4916+{
4917+ int ret;
4918+
4919+ INIT_LIST_HEAD(&vma->anon_vma_chain);
4920+ vma->vm_mm = current->mm;
4921+ vma->vm_start = addr;
4922+ vma->vm_end = addr + PAGE_SIZE;
4923+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4924+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4925+ vma->vm_ops = &pax_vm_ops;
4926+
4927+ ret = insert_vm_struct(current->mm, vma);
4928+ if (ret)
4929+ return ret;
4930+
4931+ ++current->mm->total_vm;
4932+ return 0;
4933+}
4934+#endif
4935+
4936+/*
4937+ * PaX: decide what to do with offenders (regs->tpc = fault address)
4938+ *
4939+ * returns 1 when task should be killed
4940+ * 2 when patched PLT trampoline was detected
4941+ * 3 when unpatched PLT trampoline was detected
4942+ */
4943+static int pax_handle_fetch_fault(struct pt_regs *regs)
4944+{
4945+
4946+#ifdef CONFIG_PAX_EMUPLT
4947+ int err;
4948+
4949+ do { /* PaX: patched PLT emulation #1 */
4950+ unsigned int sethi1, sethi2, jmpl;
4951+
4952+ err = get_user(sethi1, (unsigned int *)regs->tpc);
4953+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
4954+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
4955+
4956+ if (err)
4957+ break;
4958+
4959+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4960+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4961+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4962+ {
4963+ unsigned long addr;
4964+
4965+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4966+ addr = regs->u_regs[UREG_G1];
4967+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4968+
4969+ if (test_thread_flag(TIF_32BIT))
4970+ addr &= 0xFFFFFFFFUL;
4971+
4972+ regs->tpc = addr;
4973+ regs->tnpc = addr+4;
4974+ return 2;
4975+ }
4976+ } while (0);
4977+
4978+ { /* PaX: patched PLT emulation #2 */
4979+ unsigned int ba;
4980+
4981+ err = get_user(ba, (unsigned int *)regs->tpc);
4982+
4983+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4984+ unsigned long addr;
4985+
4986+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4987+
4988+ if (test_thread_flag(TIF_32BIT))
4989+ addr &= 0xFFFFFFFFUL;
4990+
4991+ regs->tpc = addr;
4992+ regs->tnpc = addr+4;
4993+ return 2;
4994+ }
4995+ }
4996+
4997+ do { /* PaX: patched PLT emulation #3 */
4998+ unsigned int sethi, jmpl, nop;
4999+
5000+ err = get_user(sethi, (unsigned int *)regs->tpc);
5001+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
5002+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5003+
5004+ if (err)
5005+ break;
5006+
5007+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5008+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5009+ nop == 0x01000000U)
5010+ {
5011+ unsigned long addr;
5012+
5013+ addr = (sethi & 0x003FFFFFU) << 10;
5014+ regs->u_regs[UREG_G1] = addr;
5015+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5016+
5017+ if (test_thread_flag(TIF_32BIT))
5018+ addr &= 0xFFFFFFFFUL;
5019+
5020+ regs->tpc = addr;
5021+ regs->tnpc = addr+4;
5022+ return 2;
5023+ }
5024+ } while (0);
5025+
5026+ do { /* PaX: patched PLT emulation #4 */
5027+ unsigned int sethi, mov1, call, mov2;
5028+
5029+ err = get_user(sethi, (unsigned int *)regs->tpc);
5030+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
5031+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
5032+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
5033+
5034+ if (err)
5035+ break;
5036+
5037+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5038+ mov1 == 0x8210000FU &&
5039+ (call & 0xC0000000U) == 0x40000000U &&
5040+ mov2 == 0x9E100001U)
5041+ {
5042+ unsigned long addr;
5043+
5044+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
5045+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5046+
5047+ if (test_thread_flag(TIF_32BIT))
5048+ addr &= 0xFFFFFFFFUL;
5049+
5050+ regs->tpc = addr;
5051+ regs->tnpc = addr+4;
5052+ return 2;
5053+ }
5054+ } while (0);
5055+
5056+ do { /* PaX: patched PLT emulation #5 */
5057+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
5058+
5059+ err = get_user(sethi, (unsigned int *)regs->tpc);
5060+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5061+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5062+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
5063+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
5064+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
5065+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
5066+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
5067+
5068+ if (err)
5069+ break;
5070+
5071+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5072+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
5073+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5074+ (or1 & 0xFFFFE000U) == 0x82106000U &&
5075+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
5076+ sllx == 0x83287020U &&
5077+ jmpl == 0x81C04005U &&
5078+ nop == 0x01000000U)
5079+ {
5080+ unsigned long addr;
5081+
5082+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5083+ regs->u_regs[UREG_G1] <<= 32;
5084+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5085+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5086+ regs->tpc = addr;
5087+ regs->tnpc = addr+4;
5088+ return 2;
5089+ }
5090+ } while (0);
5091+
5092+ do { /* PaX: patched PLT emulation #6 */
5093+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
5094+
5095+ err = get_user(sethi, (unsigned int *)regs->tpc);
5096+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5097+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5098+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
5099+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
5100+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
5101+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
5102+
5103+ if (err)
5104+ break;
5105+
5106+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5107+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
5108+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5109+ sllx == 0x83287020U &&
5110+ (or & 0xFFFFE000U) == 0x8A116000U &&
5111+ jmpl == 0x81C04005U &&
5112+ nop == 0x01000000U)
5113+ {
5114+ unsigned long addr;
5115+
5116+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
5117+ regs->u_regs[UREG_G1] <<= 32;
5118+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
5119+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5120+ regs->tpc = addr;
5121+ regs->tnpc = addr+4;
5122+ return 2;
5123+ }
5124+ } while (0);
5125+
5126+ do { /* PaX: unpatched PLT emulation step 1 */
5127+ unsigned int sethi, ba, nop;
5128+
5129+ err = get_user(sethi, (unsigned int *)regs->tpc);
5130+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5131+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5132+
5133+ if (err)
5134+ break;
5135+
5136+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5137+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5138+ nop == 0x01000000U)
5139+ {
5140+ unsigned long addr;
5141+ unsigned int save, call;
5142+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
5143+
5144+ if ((ba & 0xFFC00000U) == 0x30800000U)
5145+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5146+ else
5147+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5148+
5149+ if (test_thread_flag(TIF_32BIT))
5150+ addr &= 0xFFFFFFFFUL;
5151+
5152+ err = get_user(save, (unsigned int *)addr);
5153+ err |= get_user(call, (unsigned int *)(addr+4));
5154+ err |= get_user(nop, (unsigned int *)(addr+8));
5155+ if (err)
5156+ break;
5157+
5158+#ifdef CONFIG_PAX_DLRESOLVE
5159+ if (save == 0x9DE3BFA8U &&
5160+ (call & 0xC0000000U) == 0x40000000U &&
5161+ nop == 0x01000000U)
5162+ {
5163+ struct vm_area_struct *vma;
5164+ unsigned long call_dl_resolve;
5165+
5166+ down_read(&current->mm->mmap_sem);
5167+ call_dl_resolve = current->mm->call_dl_resolve;
5168+ up_read(&current->mm->mmap_sem);
5169+ if (likely(call_dl_resolve))
5170+ goto emulate;
5171+
5172+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5173+
5174+ down_write(&current->mm->mmap_sem);
5175+ if (current->mm->call_dl_resolve) {
5176+ call_dl_resolve = current->mm->call_dl_resolve;
5177+ up_write(&current->mm->mmap_sem);
5178+ if (vma)
5179+ kmem_cache_free(vm_area_cachep, vma);
5180+ goto emulate;
5181+ }
5182+
5183+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5184+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5185+ up_write(&current->mm->mmap_sem);
5186+ if (vma)
5187+ kmem_cache_free(vm_area_cachep, vma);
5188+ return 1;
5189+ }
5190+
5191+ if (pax_insert_vma(vma, call_dl_resolve)) {
5192+ up_write(&current->mm->mmap_sem);
5193+ kmem_cache_free(vm_area_cachep, vma);
5194+ return 1;
5195+ }
5196+
5197+ current->mm->call_dl_resolve = call_dl_resolve;
5198+ up_write(&current->mm->mmap_sem);
5199+
5200+emulate:
5201+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5202+ regs->tpc = call_dl_resolve;
5203+ regs->tnpc = addr+4;
5204+ return 3;
5205+ }
5206+#endif
5207+
5208+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5209+ if ((save & 0xFFC00000U) == 0x05000000U &&
5210+ (call & 0xFFFFE000U) == 0x85C0A000U &&
5211+ nop == 0x01000000U)
5212+ {
5213+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5214+ regs->u_regs[UREG_G2] = addr + 4;
5215+ addr = (save & 0x003FFFFFU) << 10;
5216+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5217+
5218+ if (test_thread_flag(TIF_32BIT))
5219+ addr &= 0xFFFFFFFFUL;
5220+
5221+ regs->tpc = addr;
5222+ regs->tnpc = addr+4;
5223+ return 3;
5224+ }
5225+
5226+ /* PaX: 64-bit PLT stub */
5227+ err = get_user(sethi1, (unsigned int *)addr);
5228+ err |= get_user(sethi2, (unsigned int *)(addr+4));
5229+ err |= get_user(or1, (unsigned int *)(addr+8));
5230+ err |= get_user(or2, (unsigned int *)(addr+12));
5231+ err |= get_user(sllx, (unsigned int *)(addr+16));
5232+ err |= get_user(add, (unsigned int *)(addr+20));
5233+ err |= get_user(jmpl, (unsigned int *)(addr+24));
5234+ err |= get_user(nop, (unsigned int *)(addr+28));
5235+ if (err)
5236+ break;
5237+
5238+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
5239+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5240+ (or1 & 0xFFFFE000U) == 0x88112000U &&
5241+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
5242+ sllx == 0x89293020U &&
5243+ add == 0x8A010005U &&
5244+ jmpl == 0x89C14000U &&
5245+ nop == 0x01000000U)
5246+ {
5247+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5248+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5249+ regs->u_regs[UREG_G4] <<= 32;
5250+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5251+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
5252+ regs->u_regs[UREG_G4] = addr + 24;
5253+ addr = regs->u_regs[UREG_G5];
5254+ regs->tpc = addr;
5255+ regs->tnpc = addr+4;
5256+ return 3;
5257+ }
5258+ }
5259+ } while (0);
5260+
5261+#ifdef CONFIG_PAX_DLRESOLVE
5262+ do { /* PaX: unpatched PLT emulation step 2 */
5263+ unsigned int save, call, nop;
5264+
5265+ err = get_user(save, (unsigned int *)(regs->tpc-4));
5266+ err |= get_user(call, (unsigned int *)regs->tpc);
5267+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
5268+ if (err)
5269+ break;
5270+
5271+ if (save == 0x9DE3BFA8U &&
5272+ (call & 0xC0000000U) == 0x40000000U &&
5273+ nop == 0x01000000U)
5274+ {
5275+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5276+
5277+ if (test_thread_flag(TIF_32BIT))
5278+ dl_resolve &= 0xFFFFFFFFUL;
5279+
5280+ regs->u_regs[UREG_RETPC] = regs->tpc;
5281+ regs->tpc = dl_resolve;
5282+ regs->tnpc = dl_resolve+4;
5283+ return 3;
5284+ }
5285+ } while (0);
5286+#endif
5287+
5288+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
5289+ unsigned int sethi, ba, nop;
5290+
5291+ err = get_user(sethi, (unsigned int *)regs->tpc);
5292+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5293+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5294+
5295+ if (err)
5296+ break;
5297+
5298+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5299+ (ba & 0xFFF00000U) == 0x30600000U &&
5300+ nop == 0x01000000U)
5301+ {
5302+ unsigned long addr;
5303+
5304+ addr = (sethi & 0x003FFFFFU) << 10;
5305+ regs->u_regs[UREG_G1] = addr;
5306+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5307+
5308+ if (test_thread_flag(TIF_32BIT))
5309+ addr &= 0xFFFFFFFFUL;
5310+
5311+ regs->tpc = addr;
5312+ regs->tnpc = addr+4;
5313+ return 2;
5314+ }
5315+ } while (0);
5316+
5317+#endif
5318+
5319+ return 1;
5320+}
5321+
5322+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5323+{
5324+ unsigned long i;
5325+
5326+ printk(KERN_ERR "PAX: bytes at PC: ");
5327+ for (i = 0; i < 8; i++) {
5328+ unsigned int c;
5329+ if (get_user(c, (unsigned int *)pc+i))
5330+ printk(KERN_CONT "???????? ");
5331+ else
5332+ printk(KERN_CONT "%08x ", c);
5333+ }
5334+ printk("\n");
5335+}
5336+#endif
5337+
5338 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5339 {
5340 struct mm_struct *mm = current->mm;
5341@@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5342 if (!vma)
5343 goto bad_area;
5344
5345+#ifdef CONFIG_PAX_PAGEEXEC
5346+ /* PaX: detect ITLB misses on non-exec pages */
5347+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
5348+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
5349+ {
5350+ if (address != regs->tpc)
5351+ goto good_area;
5352+
5353+ up_read(&mm->mmap_sem);
5354+ switch (pax_handle_fetch_fault(regs)) {
5355+
5356+#ifdef CONFIG_PAX_EMUPLT
5357+ case 2:
5358+ case 3:
5359+ return;
5360+#endif
5361+
5362+ }
5363+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
5364+ do_group_exit(SIGKILL);
5365+ }
5366+#endif
5367+
5368 /* Pure DTLB misses do not tell us whether the fault causing
5369 * load/store/atomic was a write or not, it only says that there
5370 * was no match. So in such a case we (carefully) read the
5371diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
5372index 07e1453..0a7d9e9 100644
5373--- a/arch/sparc/mm/hugetlbpage.c
5374+++ b/arch/sparc/mm/hugetlbpage.c
5375@@ -67,7 +67,7 @@ full_search:
5376 }
5377 return -ENOMEM;
5378 }
5379- if (likely(!vma || addr + len <= vma->vm_start)) {
5380+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5381 /*
5382 * Remember the place where we stopped the search:
5383 */
5384@@ -106,7 +106,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5385 /* make sure it can fit in the remaining address space */
5386 if (likely(addr > len)) {
5387 vma = find_vma(mm, addr-len);
5388- if (!vma || addr <= vma->vm_start) {
5389+ if (check_heap_stack_gap(vma, addr - len, len)) {
5390 /* remember the address as a hint for next time */
5391 return (mm->free_area_cache = addr-len);
5392 }
5393@@ -115,16 +115,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5394 if (unlikely(mm->mmap_base < len))
5395 goto bottomup;
5396
5397- addr = (mm->mmap_base-len) & HPAGE_MASK;
5398+ addr = mm->mmap_base - len;
5399
5400 do {
5401+ addr &= HPAGE_MASK;
5402 /*
5403 * Lookup failure means no vma is above this address,
5404 * else if new region fits below vma->vm_start,
5405 * return with success:
5406 */
5407 vma = find_vma(mm, addr);
5408- if (likely(!vma || addr+len <= vma->vm_start)) {
5409+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5410 /* remember the address as a hint for next time */
5411 return (mm->free_area_cache = addr);
5412 }
5413@@ -134,8 +135,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5414 mm->cached_hole_size = vma->vm_start - addr;
5415
5416 /* try just below the current vma->vm_start */
5417- addr = (vma->vm_start-len) & HPAGE_MASK;
5418- } while (likely(len < vma->vm_start));
5419+ addr = skip_heap_stack_gap(vma, len);
5420+ } while (!IS_ERR_VALUE(addr));
5421
5422 bottomup:
5423 /*
5424@@ -181,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
5425 if (addr) {
5426 addr = ALIGN(addr, HPAGE_SIZE);
5427 vma = find_vma(mm, addr);
5428- if (task_size - len >= addr &&
5429- (!vma || addr + len <= vma->vm_start))
5430+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5431 return addr;
5432 }
5433 if (mm->get_unmapped_area == arch_get_unmapped_area)
5434diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
5435index 7b00de6..78239f4 100644
5436--- a/arch/sparc/mm/init_32.c
5437+++ b/arch/sparc/mm/init_32.c
5438@@ -316,6 +316,9 @@ extern void device_scan(void);
5439 pgprot_t PAGE_SHARED __read_mostly;
5440 EXPORT_SYMBOL(PAGE_SHARED);
5441
5442+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
5443+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
5444+
5445 void __init paging_init(void)
5446 {
5447 switch(sparc_cpu_model) {
5448@@ -344,17 +347,17 @@ void __init paging_init(void)
5449
5450 /* Initialize the protection map with non-constant, MMU dependent values. */
5451 protection_map[0] = PAGE_NONE;
5452- protection_map[1] = PAGE_READONLY;
5453- protection_map[2] = PAGE_COPY;
5454- protection_map[3] = PAGE_COPY;
5455+ protection_map[1] = PAGE_READONLY_NOEXEC;
5456+ protection_map[2] = PAGE_COPY_NOEXEC;
5457+ protection_map[3] = PAGE_COPY_NOEXEC;
5458 protection_map[4] = PAGE_READONLY;
5459 protection_map[5] = PAGE_READONLY;
5460 protection_map[6] = PAGE_COPY;
5461 protection_map[7] = PAGE_COPY;
5462 protection_map[8] = PAGE_NONE;
5463- protection_map[9] = PAGE_READONLY;
5464- protection_map[10] = PAGE_SHARED;
5465- protection_map[11] = PAGE_SHARED;
5466+ protection_map[9] = PAGE_READONLY_NOEXEC;
5467+ protection_map[10] = PAGE_SHARED_NOEXEC;
5468+ protection_map[11] = PAGE_SHARED_NOEXEC;
5469 protection_map[12] = PAGE_READONLY;
5470 protection_map[13] = PAGE_READONLY;
5471 protection_map[14] = PAGE_SHARED;
5472diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
5473index cbef74e..c38fead 100644
5474--- a/arch/sparc/mm/srmmu.c
5475+++ b/arch/sparc/mm/srmmu.c
5476@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5477 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5478 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5479 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5480+
5481+#ifdef CONFIG_PAX_PAGEEXEC
5482+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5483+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5484+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5485+#endif
5486+
5487 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5488 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5489
5490diff --git a/arch/um/Makefile b/arch/um/Makefile
5491index 7730af6..cce5b19 100644
5492--- a/arch/um/Makefile
5493+++ b/arch/um/Makefile
5494@@ -61,6 +61,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
5495 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
5496 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
5497
5498+ifdef CONSTIFY_PLUGIN
5499+USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5500+endif
5501+
5502 #This will adjust *FLAGS accordingly to the platform.
5503 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
5504
5505diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
5506index 6c03acd..a5e0215 100644
5507--- a/arch/um/include/asm/kmap_types.h
5508+++ b/arch/um/include/asm/kmap_types.h
5509@@ -23,6 +23,7 @@ enum km_type {
5510 KM_IRQ1,
5511 KM_SOFTIRQ0,
5512 KM_SOFTIRQ1,
5513+ KM_CLEARPAGE,
5514 KM_TYPE_NR
5515 };
5516
5517diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
5518index 7cfc3ce..cbd1a58 100644
5519--- a/arch/um/include/asm/page.h
5520+++ b/arch/um/include/asm/page.h
5521@@ -14,6 +14,9 @@
5522 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5523 #define PAGE_MASK (~(PAGE_SIZE-1))
5524
5525+#define ktla_ktva(addr) (addr)
5526+#define ktva_ktla(addr) (addr)
5527+
5528 #ifndef __ASSEMBLY__
5529
5530 struct page;
5531diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
5532index c533835..84db18e 100644
5533--- a/arch/um/kernel/process.c
5534+++ b/arch/um/kernel/process.c
5535@@ -406,22 +406,6 @@ int singlestepping(void * t)
5536 return 2;
5537 }
5538
5539-/*
5540- * Only x86 and x86_64 have an arch_align_stack().
5541- * All other arches have "#define arch_align_stack(x) (x)"
5542- * in their asm/system.h
5543- * As this is included in UML from asm-um/system-generic.h,
5544- * we can use it to behave as the subarch does.
5545- */
5546-#ifndef arch_align_stack
5547-unsigned long arch_align_stack(unsigned long sp)
5548-{
5549- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5550- sp -= get_random_int() % 8192;
5551- return sp & ~0xf;
5552-}
5553-#endif
5554-
5555 unsigned long get_wchan(struct task_struct *p)
5556 {
5557 unsigned long stack_page, sp, ip;
5558diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
5559index efb4294..61bc18c 100644
5560--- a/arch/x86/Kconfig
5561+++ b/arch/x86/Kconfig
5562@@ -235,7 +235,7 @@ config X86_HT
5563
5564 config X86_32_LAZY_GS
5565 def_bool y
5566- depends on X86_32 && !CC_STACKPROTECTOR
5567+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
5568
5569 config ARCH_HWEIGHT_CFLAGS
5570 string
5571@@ -1022,7 +1022,7 @@ choice
5572
5573 config NOHIGHMEM
5574 bool "off"
5575- depends on !X86_NUMAQ
5576+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
5577 ---help---
5578 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
5579 However, the address space of 32-bit x86 processors is only 4
5580@@ -1059,7 +1059,7 @@ config NOHIGHMEM
5581
5582 config HIGHMEM4G
5583 bool "4GB"
5584- depends on !X86_NUMAQ
5585+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
5586 ---help---
5587 Select this if you have a 32-bit processor and between 1 and 4
5588 gigabytes of physical RAM.
5589@@ -1113,7 +1113,7 @@ config PAGE_OFFSET
5590 hex
5591 default 0xB0000000 if VMSPLIT_3G_OPT
5592 default 0x80000000 if VMSPLIT_2G
5593- default 0x78000000 if VMSPLIT_2G_OPT
5594+ default 0x70000000 if VMSPLIT_2G_OPT
5595 default 0x40000000 if VMSPLIT_1G
5596 default 0xC0000000
5597 depends on X86_32
5598@@ -1496,6 +1496,7 @@ config SECCOMP
5599
5600 config CC_STACKPROTECTOR
5601 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
5602+ depends on X86_64 || !PAX_MEMORY_UDEREF
5603 ---help---
5604 This option turns on the -fstack-protector GCC feature. This
5605 feature puts, at the beginning of functions, a canary value on
5606@@ -1553,6 +1554,7 @@ config KEXEC_JUMP
5607 config PHYSICAL_START
5608 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
5609 default "0x1000000"
5610+ range 0x400000 0x40000000
5611 ---help---
5612 This gives the physical address where the kernel is loaded.
5613
5614@@ -1616,6 +1618,7 @@ config X86_NEED_RELOCS
5615 config PHYSICAL_ALIGN
5616 hex "Alignment value to which kernel should be aligned" if X86_32
5617 default "0x1000000"
5618+ range 0x400000 0x1000000 if PAX_KERNEXEC
5619 range 0x2000 0x1000000
5620 ---help---
5621 This value puts the alignment restrictions on physical address
5622@@ -1647,9 +1650,10 @@ config HOTPLUG_CPU
5623 Say N if you want to disable CPU hotplug.
5624
5625 config COMPAT_VDSO
5626- def_bool y
5627+ def_bool n
5628 prompt "Compat VDSO support"
5629 depends on X86_32 || IA32_EMULATION
5630+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
5631 ---help---
5632 Map the 32-bit VDSO to the predictable old-style address too.
5633
5634diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
5635index e3ca7e0..b30b28a 100644
5636--- a/arch/x86/Kconfig.cpu
5637+++ b/arch/x86/Kconfig.cpu
5638@@ -341,7 +341,7 @@ config X86_PPRO_FENCE
5639
5640 config X86_F00F_BUG
5641 def_bool y
5642- depends on M586MMX || M586TSC || M586 || M486 || M386
5643+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
5644
5645 config X86_INVD_BUG
5646 def_bool y
5647@@ -365,7 +365,7 @@ config X86_POPAD_OK
5648
5649 config X86_ALIGNMENT_16
5650 def_bool y
5651- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
5652+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
5653
5654 config X86_INTEL_USERCOPY
5655 def_bool y
5656@@ -411,7 +411,7 @@ config X86_CMPXCHG64
5657 # generates cmov.
5658 config X86_CMOV
5659 def_bool y
5660- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
5661+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
5662
5663 config X86_MINIMUM_CPU_FAMILY
5664 int
5665diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
5666index bf56e17..05f9891 100644
5667--- a/arch/x86/Kconfig.debug
5668+++ b/arch/x86/Kconfig.debug
5669@@ -81,7 +81,7 @@ config X86_PTDUMP
5670 config DEBUG_RODATA
5671 bool "Write protect kernel read-only data structures"
5672 default y
5673- depends on DEBUG_KERNEL
5674+ depends on DEBUG_KERNEL && BROKEN
5675 ---help---
5676 Mark the kernel read-only data as write-protected in the pagetables,
5677 in order to catch accidental (and incorrect) writes to such const
5678@@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
5679
5680 config DEBUG_SET_MODULE_RONX
5681 bool "Set loadable kernel module data as NX and text as RO"
5682- depends on MODULES
5683+ depends on MODULES && BROKEN
5684 ---help---
5685 This option helps catch unintended modifications to loadable
5686 kernel module's text and read-only data. It also prevents execution
5687diff --git a/arch/x86/Makefile b/arch/x86/Makefile
5688index b02e509..2631e48 100644
5689--- a/arch/x86/Makefile
5690+++ b/arch/x86/Makefile
5691@@ -46,6 +46,7 @@ else
5692 UTS_MACHINE := x86_64
5693 CHECKFLAGS += -D__x86_64__ -m64
5694
5695+ biarch := $(call cc-option,-m64)
5696 KBUILD_AFLAGS += -m64
5697 KBUILD_CFLAGS += -m64
5698
5699@@ -195,3 +196,12 @@ define archhelp
5700 echo ' FDARGS="..." arguments for the booted kernel'
5701 echo ' FDINITRD=file initrd for the booted kernel'
5702 endef
5703+
5704+define OLD_LD
5705+
5706+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
5707+*** Please upgrade your binutils to 2.18 or newer
5708+endef
5709+
5710+archprepare:
5711+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
5712diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
5713index 95365a8..52f857b 100644
5714--- a/arch/x86/boot/Makefile
5715+++ b/arch/x86/boot/Makefile
5716@@ -63,6 +63,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
5717 $(call cc-option, -fno-stack-protector) \
5718 $(call cc-option, -mpreferred-stack-boundary=2)
5719 KBUILD_CFLAGS += $(call cc-option, -m32)
5720+ifdef CONSTIFY_PLUGIN
5721+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5722+endif
5723 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5724 GCOV_PROFILE := n
5725
5726diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
5727index 878e4b9..20537ab 100644
5728--- a/arch/x86/boot/bitops.h
5729+++ b/arch/x86/boot/bitops.h
5730@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
5731 u8 v;
5732 const u32 *p = (const u32 *)addr;
5733
5734- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5735+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5736 return v;
5737 }
5738
5739@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
5740
5741 static inline void set_bit(int nr, void *addr)
5742 {
5743- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5744+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5745 }
5746
5747 #endif /* BOOT_BITOPS_H */
5748diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
5749index c7093bd..d4247ffe0 100644
5750--- a/arch/x86/boot/boot.h
5751+++ b/arch/x86/boot/boot.h
5752@@ -85,7 +85,7 @@ static inline void io_delay(void)
5753 static inline u16 ds(void)
5754 {
5755 u16 seg;
5756- asm("movw %%ds,%0" : "=rm" (seg));
5757+ asm volatile("movw %%ds,%0" : "=rm" (seg));
5758 return seg;
5759 }
5760
5761@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
5762 static inline int memcmp(const void *s1, const void *s2, size_t len)
5763 {
5764 u8 diff;
5765- asm("repe; cmpsb; setnz %0"
5766+ asm volatile("repe; cmpsb; setnz %0"
5767 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5768 return diff;
5769 }
5770diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
5771index 09664ef..edc5d03 100644
5772--- a/arch/x86/boot/compressed/Makefile
5773+++ b/arch/x86/boot/compressed/Makefile
5774@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
5775 KBUILD_CFLAGS += $(cflags-y)
5776 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
5777 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
5778+ifdef CONSTIFY_PLUGIN
5779+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5780+endif
5781
5782 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5783 GCOV_PROFILE := n
5784diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
5785index 67a655a..b924059 100644
5786--- a/arch/x86/boot/compressed/head_32.S
5787+++ b/arch/x86/boot/compressed/head_32.S
5788@@ -76,7 +76,7 @@ ENTRY(startup_32)
5789 notl %eax
5790 andl %eax, %ebx
5791 #else
5792- movl $LOAD_PHYSICAL_ADDR, %ebx
5793+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5794 #endif
5795
5796 /* Target address to relocate to for decompression */
5797@@ -162,7 +162,7 @@ relocated:
5798 * and where it was actually loaded.
5799 */
5800 movl %ebp, %ebx
5801- subl $LOAD_PHYSICAL_ADDR, %ebx
5802+ subl $____LOAD_PHYSICAL_ADDR, %ebx
5803 jz 2f /* Nothing to be done if loaded at compiled addr. */
5804 /*
5805 * Process relocations.
5806@@ -170,8 +170,7 @@ relocated:
5807
5808 1: subl $4, %edi
5809 movl (%edi), %ecx
5810- testl %ecx, %ecx
5811- jz 2f
5812+ jecxz 2f
5813 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5814 jmp 1b
5815 2:
5816diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
5817index 35af09d..99c9676 100644
5818--- a/arch/x86/boot/compressed/head_64.S
5819+++ b/arch/x86/boot/compressed/head_64.S
5820@@ -91,7 +91,7 @@ ENTRY(startup_32)
5821 notl %eax
5822 andl %eax, %ebx
5823 #else
5824- movl $LOAD_PHYSICAL_ADDR, %ebx
5825+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5826 #endif
5827
5828 /* Target address to relocate to for decompression */
5829@@ -233,7 +233,7 @@ ENTRY(startup_64)
5830 notq %rax
5831 andq %rax, %rbp
5832 #else
5833- movq $LOAD_PHYSICAL_ADDR, %rbp
5834+ movq $____LOAD_PHYSICAL_ADDR, %rbp
5835 #endif
5836
5837 /* Target address to relocate to for decompression */
5838diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
5839index 3a19d04..7c1d55a 100644
5840--- a/arch/x86/boot/compressed/misc.c
5841+++ b/arch/x86/boot/compressed/misc.c
5842@@ -310,7 +310,7 @@ static void parse_elf(void *output)
5843 case PT_LOAD:
5844 #ifdef CONFIG_RELOCATABLE
5845 dest = output;
5846- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5847+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5848 #else
5849 dest = (void *)(phdr->p_paddr);
5850 #endif
5851@@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
5852 error("Destination address too large");
5853 #endif
5854 #ifndef CONFIG_RELOCATABLE
5855- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5856+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5857 error("Wrong destination address");
5858 #endif
5859
5860diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
5861index 89bbf4e..869908e 100644
5862--- a/arch/x86/boot/compressed/relocs.c
5863+++ b/arch/x86/boot/compressed/relocs.c
5864@@ -13,8 +13,11 @@
5865
5866 static void die(char *fmt, ...);
5867
5868+#include "../../../../include/generated/autoconf.h"
5869+
5870 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5871 static Elf32_Ehdr ehdr;
5872+static Elf32_Phdr *phdr;
5873 static unsigned long reloc_count, reloc_idx;
5874 static unsigned long *relocs;
5875
5876@@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
5877 }
5878 }
5879
5880+static void read_phdrs(FILE *fp)
5881+{
5882+ unsigned int i;
5883+
5884+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5885+ if (!phdr) {
5886+ die("Unable to allocate %d program headers\n",
5887+ ehdr.e_phnum);
5888+ }
5889+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5890+ die("Seek to %d failed: %s\n",
5891+ ehdr.e_phoff, strerror(errno));
5892+ }
5893+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5894+ die("Cannot read ELF program headers: %s\n",
5895+ strerror(errno));
5896+ }
5897+ for(i = 0; i < ehdr.e_phnum; i++) {
5898+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5899+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5900+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5901+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5902+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5903+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5904+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5905+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5906+ }
5907+
5908+}
5909+
5910 static void read_shdrs(FILE *fp)
5911 {
5912- int i;
5913+ unsigned int i;
5914 Elf32_Shdr shdr;
5915
5916 secs = calloc(ehdr.e_shnum, sizeof(struct section));
5917@@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
5918
5919 static void read_strtabs(FILE *fp)
5920 {
5921- int i;
5922+ unsigned int i;
5923 for (i = 0; i < ehdr.e_shnum; i++) {
5924 struct section *sec = &secs[i];
5925 if (sec->shdr.sh_type != SHT_STRTAB) {
5926@@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
5927
5928 static void read_symtabs(FILE *fp)
5929 {
5930- int i,j;
5931+ unsigned int i,j;
5932 for (i = 0; i < ehdr.e_shnum; i++) {
5933 struct section *sec = &secs[i];
5934 if (sec->shdr.sh_type != SHT_SYMTAB) {
5935@@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
5936
5937 static void read_relocs(FILE *fp)
5938 {
5939- int i,j;
5940+ unsigned int i,j;
5941+ uint32_t base;
5942+
5943 for (i = 0; i < ehdr.e_shnum; i++) {
5944 struct section *sec = &secs[i];
5945 if (sec->shdr.sh_type != SHT_REL) {
5946@@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
5947 die("Cannot read symbol table: %s\n",
5948 strerror(errno));
5949 }
5950+ base = 0;
5951+ for (j = 0; j < ehdr.e_phnum; j++) {
5952+ if (phdr[j].p_type != PT_LOAD )
5953+ continue;
5954+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
5955+ continue;
5956+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
5957+ break;
5958+ }
5959 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
5960 Elf32_Rel *rel = &sec->reltab[j];
5961- rel->r_offset = elf32_to_cpu(rel->r_offset);
5962+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
5963 rel->r_info = elf32_to_cpu(rel->r_info);
5964 }
5965 }
5966@@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
5967
5968 static void print_absolute_symbols(void)
5969 {
5970- int i;
5971+ unsigned int i;
5972 printf("Absolute symbols\n");
5973 printf(" Num: Value Size Type Bind Visibility Name\n");
5974 for (i = 0; i < ehdr.e_shnum; i++) {
5975 struct section *sec = &secs[i];
5976 char *sym_strtab;
5977 Elf32_Sym *sh_symtab;
5978- int j;
5979+ unsigned int j;
5980
5981 if (sec->shdr.sh_type != SHT_SYMTAB) {
5982 continue;
5983@@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
5984
5985 static void print_absolute_relocs(void)
5986 {
5987- int i, printed = 0;
5988+ unsigned int i, printed = 0;
5989
5990 for (i = 0; i < ehdr.e_shnum; i++) {
5991 struct section *sec = &secs[i];
5992 struct section *sec_applies, *sec_symtab;
5993 char *sym_strtab;
5994 Elf32_Sym *sh_symtab;
5995- int j;
5996+ unsigned int j;
5997 if (sec->shdr.sh_type != SHT_REL) {
5998 continue;
5999 }
6000@@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
6001
6002 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6003 {
6004- int i;
6005+ unsigned int i;
6006 /* Walk through the relocations */
6007 for (i = 0; i < ehdr.e_shnum; i++) {
6008 char *sym_strtab;
6009 Elf32_Sym *sh_symtab;
6010 struct section *sec_applies, *sec_symtab;
6011- int j;
6012+ unsigned int j;
6013 struct section *sec = &secs[i];
6014
6015 if (sec->shdr.sh_type != SHT_REL) {
6016@@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6017 !is_rel_reloc(sym_name(sym_strtab, sym))) {
6018 continue;
6019 }
6020+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
6021+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
6022+ continue;
6023+
6024+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
6025+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
6026+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
6027+ continue;
6028+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
6029+ continue;
6030+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
6031+ continue;
6032+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
6033+ continue;
6034+#endif
6035+
6036 switch (r_type) {
6037 case R_386_NONE:
6038 case R_386_PC32:
6039@@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, const void *vb)
6040
6041 static void emit_relocs(int as_text)
6042 {
6043- int i;
6044+ unsigned int i;
6045 /* Count how many relocations I have and allocate space for them. */
6046 reloc_count = 0;
6047 walk_relocs(count_reloc);
6048@@ -665,6 +725,7 @@ int main(int argc, char **argv)
6049 fname, strerror(errno));
6050 }
6051 read_ehdr(fp);
6052+ read_phdrs(fp);
6053 read_shdrs(fp);
6054 read_strtabs(fp);
6055 read_symtabs(fp);
6056diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
6057index 4d3ff03..e4972ff 100644
6058--- a/arch/x86/boot/cpucheck.c
6059+++ b/arch/x86/boot/cpucheck.c
6060@@ -74,7 +74,7 @@ static int has_fpu(void)
6061 u16 fcw = -1, fsw = -1;
6062 u32 cr0;
6063
6064- asm("movl %%cr0,%0" : "=r" (cr0));
6065+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
6066 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
6067 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
6068 asm volatile("movl %0,%%cr0" : : "r" (cr0));
6069@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
6070 {
6071 u32 f0, f1;
6072
6073- asm("pushfl ; "
6074+ asm volatile("pushfl ; "
6075 "pushfl ; "
6076 "popl %0 ; "
6077 "movl %0,%1 ; "
6078@@ -115,7 +115,7 @@ static void get_flags(void)
6079 set_bit(X86_FEATURE_FPU, cpu.flags);
6080
6081 if (has_eflag(X86_EFLAGS_ID)) {
6082- asm("cpuid"
6083+ asm volatile("cpuid"
6084 : "=a" (max_intel_level),
6085 "=b" (cpu_vendor[0]),
6086 "=d" (cpu_vendor[1]),
6087@@ -124,7 +124,7 @@ static void get_flags(void)
6088
6089 if (max_intel_level >= 0x00000001 &&
6090 max_intel_level <= 0x0000ffff) {
6091- asm("cpuid"
6092+ asm volatile("cpuid"
6093 : "=a" (tfms),
6094 "=c" (cpu.flags[4]),
6095 "=d" (cpu.flags[0])
6096@@ -136,7 +136,7 @@ static void get_flags(void)
6097 cpu.model += ((tfms >> 16) & 0xf) << 4;
6098 }
6099
6100- asm("cpuid"
6101+ asm volatile("cpuid"
6102 : "=a" (max_amd_level)
6103 : "a" (0x80000000)
6104 : "ebx", "ecx", "edx");
6105@@ -144,7 +144,7 @@ static void get_flags(void)
6106 if (max_amd_level >= 0x80000001 &&
6107 max_amd_level <= 0x8000ffff) {
6108 u32 eax = 0x80000001;
6109- asm("cpuid"
6110+ asm volatile("cpuid"
6111 : "+a" (eax),
6112 "=c" (cpu.flags[6]),
6113 "=d" (cpu.flags[1])
6114@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6115 u32 ecx = MSR_K7_HWCR;
6116 u32 eax, edx;
6117
6118- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6119+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6120 eax &= ~(1 << 15);
6121- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6122+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6123
6124 get_flags(); /* Make sure it really did something */
6125 err = check_flags();
6126@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6127 u32 ecx = MSR_VIA_FCR;
6128 u32 eax, edx;
6129
6130- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6131+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6132 eax |= (1<<1)|(1<<7);
6133- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6134+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6135
6136 set_bit(X86_FEATURE_CX8, cpu.flags);
6137 err = check_flags();
6138@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6139 u32 eax, edx;
6140 u32 level = 1;
6141
6142- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6143- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6144- asm("cpuid"
6145+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6146+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6147+ asm volatile("cpuid"
6148 : "+a" (level), "=d" (cpu.flags[0])
6149 : : "ecx", "ebx");
6150- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6151+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6152
6153 err = check_flags();
6154 }
6155diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
6156index bdb4d45..0476680 100644
6157--- a/arch/x86/boot/header.S
6158+++ b/arch/x86/boot/header.S
6159@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
6160 # single linked list of
6161 # struct setup_data
6162
6163-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
6164+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
6165
6166 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
6167 #define VO_INIT_SIZE (VO__end - VO__text)
6168diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
6169index db75d07..8e6d0af 100644
6170--- a/arch/x86/boot/memory.c
6171+++ b/arch/x86/boot/memory.c
6172@@ -19,7 +19,7 @@
6173
6174 static int detect_memory_e820(void)
6175 {
6176- int count = 0;
6177+ unsigned int count = 0;
6178 struct biosregs ireg, oreg;
6179 struct e820entry *desc = boot_params.e820_map;
6180 static struct e820entry buf; /* static so it is zeroed */
6181diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
6182index 11e8c6e..fdbb1ed 100644
6183--- a/arch/x86/boot/video-vesa.c
6184+++ b/arch/x86/boot/video-vesa.c
6185@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
6186
6187 boot_params.screen_info.vesapm_seg = oreg.es;
6188 boot_params.screen_info.vesapm_off = oreg.di;
6189+ boot_params.screen_info.vesapm_size = oreg.cx;
6190 }
6191
6192 /*
6193diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
6194index 43eda28..5ab5fdb 100644
6195--- a/arch/x86/boot/video.c
6196+++ b/arch/x86/boot/video.c
6197@@ -96,7 +96,7 @@ static void store_mode_params(void)
6198 static unsigned int get_entry(void)
6199 {
6200 char entry_buf[4];
6201- int i, len = 0;
6202+ unsigned int i, len = 0;
6203 int key;
6204 unsigned int v;
6205
6206diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
6207index 5b577d5..3c1fed4 100644
6208--- a/arch/x86/crypto/aes-x86_64-asm_64.S
6209+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
6210@@ -8,6 +8,8 @@
6211 * including this sentence is retained in full.
6212 */
6213
6214+#include <asm/alternative-asm.h>
6215+
6216 .extern crypto_ft_tab
6217 .extern crypto_it_tab
6218 .extern crypto_fl_tab
6219@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
6220 je B192; \
6221 leaq 32(r9),r9;
6222
6223+#define ret pax_force_retaddr 0, 1; ret
6224+
6225 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
6226 movq r1,r2; \
6227 movq r3,r4; \
6228diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
6229index be6d9e3..21fbbca 100644
6230--- a/arch/x86/crypto/aesni-intel_asm.S
6231+++ b/arch/x86/crypto/aesni-intel_asm.S
6232@@ -31,6 +31,7 @@
6233
6234 #include <linux/linkage.h>
6235 #include <asm/inst.h>
6236+#include <asm/alternative-asm.h>
6237
6238 #ifdef __x86_64__
6239 .data
6240@@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
6241 pop %r14
6242 pop %r13
6243 pop %r12
6244+ pax_force_retaddr 0, 1
6245 ret
6246+ENDPROC(aesni_gcm_dec)
6247
6248
6249 /*****************************************************************************
6250@@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
6251 pop %r14
6252 pop %r13
6253 pop %r12
6254+ pax_force_retaddr 0, 1
6255 ret
6256+ENDPROC(aesni_gcm_enc)
6257
6258 #endif
6259
6260@@ -1714,6 +1719,7 @@ _key_expansion_256a:
6261 pxor %xmm1, %xmm0
6262 movaps %xmm0, (TKEYP)
6263 add $0x10, TKEYP
6264+ pax_force_retaddr_bts
6265 ret
6266
6267 .align 4
6268@@ -1738,6 +1744,7 @@ _key_expansion_192a:
6269 shufps $0b01001110, %xmm2, %xmm1
6270 movaps %xmm1, 0x10(TKEYP)
6271 add $0x20, TKEYP
6272+ pax_force_retaddr_bts
6273 ret
6274
6275 .align 4
6276@@ -1757,6 +1764,7 @@ _key_expansion_192b:
6277
6278 movaps %xmm0, (TKEYP)
6279 add $0x10, TKEYP
6280+ pax_force_retaddr_bts
6281 ret
6282
6283 .align 4
6284@@ -1769,6 +1777,7 @@ _key_expansion_256b:
6285 pxor %xmm1, %xmm2
6286 movaps %xmm2, (TKEYP)
6287 add $0x10, TKEYP
6288+ pax_force_retaddr_bts
6289 ret
6290
6291 /*
6292@@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
6293 #ifndef __x86_64__
6294 popl KEYP
6295 #endif
6296+ pax_force_retaddr 0, 1
6297 ret
6298+ENDPROC(aesni_set_key)
6299
6300 /*
6301 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
6302@@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
6303 popl KLEN
6304 popl KEYP
6305 #endif
6306+ pax_force_retaddr 0, 1
6307 ret
6308+ENDPROC(aesni_enc)
6309
6310 /*
6311 * _aesni_enc1: internal ABI
6312@@ -1959,6 +1972,7 @@ _aesni_enc1:
6313 AESENC KEY STATE
6314 movaps 0x70(TKEYP), KEY
6315 AESENCLAST KEY STATE
6316+ pax_force_retaddr_bts
6317 ret
6318
6319 /*
6320@@ -2067,6 +2081,7 @@ _aesni_enc4:
6321 AESENCLAST KEY STATE2
6322 AESENCLAST KEY STATE3
6323 AESENCLAST KEY STATE4
6324+ pax_force_retaddr_bts
6325 ret
6326
6327 /*
6328@@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
6329 popl KLEN
6330 popl KEYP
6331 #endif
6332+ pax_force_retaddr 0, 1
6333 ret
6334+ENDPROC(aesni_dec)
6335
6336 /*
6337 * _aesni_dec1: internal ABI
6338@@ -2146,6 +2163,7 @@ _aesni_dec1:
6339 AESDEC KEY STATE
6340 movaps 0x70(TKEYP), KEY
6341 AESDECLAST KEY STATE
6342+ pax_force_retaddr_bts
6343 ret
6344
6345 /*
6346@@ -2254,6 +2272,7 @@ _aesni_dec4:
6347 AESDECLAST KEY STATE2
6348 AESDECLAST KEY STATE3
6349 AESDECLAST KEY STATE4
6350+ pax_force_retaddr_bts
6351 ret
6352
6353 /*
6354@@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
6355 popl KEYP
6356 popl LEN
6357 #endif
6358+ pax_force_retaddr 0, 1
6359 ret
6360+ENDPROC(aesni_ecb_enc)
6361
6362 /*
6363 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6364@@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
6365 popl KEYP
6366 popl LEN
6367 #endif
6368+ pax_force_retaddr 0, 1
6369 ret
6370+ENDPROC(aesni_ecb_dec)
6371
6372 /*
6373 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6374@@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
6375 popl LEN
6376 popl IVP
6377 #endif
6378+ pax_force_retaddr 0, 1
6379 ret
6380+ENDPROC(aesni_cbc_enc)
6381
6382 /*
6383 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6384@@ -2498,7 +2523,9 @@ ENTRY(aesni_cbc_dec)
6385 popl LEN
6386 popl IVP
6387 #endif
6388+ pax_force_retaddr 0, 1
6389 ret
6390+ENDPROC(aesni_cbc_dec)
6391
6392 #ifdef __x86_64__
6393 .align 16
6394@@ -2524,6 +2551,7 @@ _aesni_inc_init:
6395 mov $1, TCTR_LOW
6396 MOVQ_R64_XMM TCTR_LOW INC
6397 MOVQ_R64_XMM CTR TCTR_LOW
6398+ pax_force_retaddr_bts
6399 ret
6400
6401 /*
6402@@ -2552,6 +2580,7 @@ _aesni_inc:
6403 .Linc_low:
6404 movaps CTR, IV
6405 PSHUFB_XMM BSWAP_MASK IV
6406+ pax_force_retaddr_bts
6407 ret
6408
6409 /*
6410@@ -2612,5 +2641,7 @@ ENTRY(aesni_ctr_enc)
6411 .Lctr_enc_ret:
6412 movups IV, (IVP)
6413 .Lctr_enc_just_ret:
6414+ pax_force_retaddr 0, 1
6415 ret
6416+ENDPROC(aesni_ctr_enc)
6417 #endif
6418diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
6419index 391d245..67f35c2 100644
6420--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
6421+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
6422@@ -20,6 +20,8 @@
6423 *
6424 */
6425
6426+#include <asm/alternative-asm.h>
6427+
6428 .file "blowfish-x86_64-asm.S"
6429 .text
6430
6431@@ -151,9 +153,11 @@ __blowfish_enc_blk:
6432 jnz __enc_xor;
6433
6434 write_block();
6435+ pax_force_retaddr 0, 1
6436 ret;
6437 __enc_xor:
6438 xor_block();
6439+ pax_force_retaddr 0, 1
6440 ret;
6441
6442 .align 8
6443@@ -188,6 +192,7 @@ blowfish_dec_blk:
6444
6445 movq %r11, %rbp;
6446
6447+ pax_force_retaddr 0, 1
6448 ret;
6449
6450 /**********************************************************************
6451@@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
6452
6453 popq %rbx;
6454 popq %rbp;
6455+ pax_force_retaddr 0, 1
6456 ret;
6457
6458 __enc_xor4:
6459@@ -349,6 +355,7 @@ __enc_xor4:
6460
6461 popq %rbx;
6462 popq %rbp;
6463+ pax_force_retaddr 0, 1
6464 ret;
6465
6466 .align 8
6467@@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
6468 popq %rbx;
6469 popq %rbp;
6470
6471+ pax_force_retaddr 0, 1
6472 ret;
6473
6474diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
6475index 6214a9b..1f4fc9a 100644
6476--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
6477+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
6478@@ -1,3 +1,5 @@
6479+#include <asm/alternative-asm.h>
6480+
6481 # enter ECRYPT_encrypt_bytes
6482 .text
6483 .p2align 5
6484@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
6485 add %r11,%rsp
6486 mov %rdi,%rax
6487 mov %rsi,%rdx
6488+ pax_force_retaddr 0, 1
6489 ret
6490 # bytesatleast65:
6491 ._bytesatleast65:
6492@@ -891,6 +894,7 @@ ECRYPT_keysetup:
6493 add %r11,%rsp
6494 mov %rdi,%rax
6495 mov %rsi,%rdx
6496+ pax_force_retaddr
6497 ret
6498 # enter ECRYPT_ivsetup
6499 .text
6500@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
6501 add %r11,%rsp
6502 mov %rdi,%rax
6503 mov %rsi,%rdx
6504+ pax_force_retaddr
6505 ret
6506diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
6507index b2c2f57..8470cab 100644
6508--- a/arch/x86/crypto/sha1_ssse3_asm.S
6509+++ b/arch/x86/crypto/sha1_ssse3_asm.S
6510@@ -28,6 +28,8 @@
6511 * (at your option) any later version.
6512 */
6513
6514+#include <asm/alternative-asm.h>
6515+
6516 #define CTX %rdi // arg1
6517 #define BUF %rsi // arg2
6518 #define CNT %rdx // arg3
6519@@ -104,6 +106,7 @@
6520 pop %r12
6521 pop %rbp
6522 pop %rbx
6523+ pax_force_retaddr 0, 1
6524 ret
6525
6526 .size \name, .-\name
6527diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
6528index 5b012a2..36d5364 100644
6529--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
6530+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
6531@@ -20,6 +20,8 @@
6532 *
6533 */
6534
6535+#include <asm/alternative-asm.h>
6536+
6537 .file "twofish-x86_64-asm-3way.S"
6538 .text
6539
6540@@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
6541 popq %r13;
6542 popq %r14;
6543 popq %r15;
6544+ pax_force_retaddr 0, 1
6545 ret;
6546
6547 __enc_xor3:
6548@@ -271,6 +274,7 @@ __enc_xor3:
6549 popq %r13;
6550 popq %r14;
6551 popq %r15;
6552+ pax_force_retaddr 0, 1
6553 ret;
6554
6555 .global twofish_dec_blk_3way
6556@@ -312,5 +316,6 @@ twofish_dec_blk_3way:
6557 popq %r13;
6558 popq %r14;
6559 popq %r15;
6560+ pax_force_retaddr 0, 1
6561 ret;
6562
6563diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
6564index 7bcf3fc..f53832f 100644
6565--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
6566+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
6567@@ -21,6 +21,7 @@
6568 .text
6569
6570 #include <asm/asm-offsets.h>
6571+#include <asm/alternative-asm.h>
6572
6573 #define a_offset 0
6574 #define b_offset 4
6575@@ -268,6 +269,7 @@ twofish_enc_blk:
6576
6577 popq R1
6578 movq $1,%rax
6579+ pax_force_retaddr 0, 1
6580 ret
6581
6582 twofish_dec_blk:
6583@@ -319,4 +321,5 @@ twofish_dec_blk:
6584
6585 popq R1
6586 movq $1,%rax
6587+ pax_force_retaddr 0, 1
6588 ret
6589diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
6590index fd84387..0b4af7d 100644
6591--- a/arch/x86/ia32/ia32_aout.c
6592+++ b/arch/x86/ia32/ia32_aout.c
6593@@ -162,6 +162,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
6594 unsigned long dump_start, dump_size;
6595 struct user32 dump;
6596
6597+ memset(&dump, 0, sizeof(dump));
6598+
6599 fs = get_fs();
6600 set_fs(KERNEL_DS);
6601 has_dumped = 1;
6602diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
6603index 6557769..ef6ae89 100644
6604--- a/arch/x86/ia32/ia32_signal.c
6605+++ b/arch/x86/ia32/ia32_signal.c
6606@@ -169,7 +169,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
6607 }
6608 seg = get_fs();
6609 set_fs(KERNEL_DS);
6610- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
6611+ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
6612 set_fs(seg);
6613 if (ret >= 0 && uoss_ptr) {
6614 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
6615@@ -370,7 +370,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
6616 */
6617 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6618 size_t frame_size,
6619- void **fpstate)
6620+ void __user **fpstate)
6621 {
6622 unsigned long sp;
6623
6624@@ -391,7 +391,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6625
6626 if (used_math()) {
6627 sp = sp - sig_xstate_ia32_size;
6628- *fpstate = (struct _fpstate_ia32 *) sp;
6629+ *fpstate = (struct _fpstate_ia32 __user *) sp;
6630 if (save_i387_xstate_ia32(*fpstate) < 0)
6631 return (void __user *) -1L;
6632 }
6633@@ -399,7 +399,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6634 sp -= frame_size;
6635 /* Align the stack pointer according to the i386 ABI,
6636 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
6637- sp = ((sp + 4) & -16ul) - 4;
6638+ sp = ((sp - 12) & -16ul) - 4;
6639 return (void __user *) sp;
6640 }
6641
6642@@ -457,7 +457,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
6643 * These are actually not used anymore, but left because some
6644 * gdb versions depend on them as a marker.
6645 */
6646- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6647+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
6648 } put_user_catch(err);
6649
6650 if (err)
6651@@ -499,7 +499,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
6652 0xb8,
6653 __NR_ia32_rt_sigreturn,
6654 0x80cd,
6655- 0,
6656+ 0
6657 };
6658
6659 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
6660@@ -529,16 +529,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
6661
6662 if (ka->sa.sa_flags & SA_RESTORER)
6663 restorer = ka->sa.sa_restorer;
6664+ else if (current->mm->context.vdso)
6665+ /* Return stub is in 32bit vsyscall page */
6666+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
6667 else
6668- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
6669- rt_sigreturn);
6670+ restorer = &frame->retcode;
6671 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
6672
6673 /*
6674 * Not actually used anymore, but left because some gdb
6675 * versions need it.
6676 */
6677- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6678+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
6679 } put_user_catch(err);
6680
6681 if (err)
6682diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
6683index a6253ec..4ad2120 100644
6684--- a/arch/x86/ia32/ia32entry.S
6685+++ b/arch/x86/ia32/ia32entry.S
6686@@ -13,7 +13,9 @@
6687 #include <asm/thread_info.h>
6688 #include <asm/segment.h>
6689 #include <asm/irqflags.h>
6690+#include <asm/pgtable.h>
6691 #include <linux/linkage.h>
6692+#include <asm/alternative-asm.h>
6693
6694 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
6695 #include <linux/elf-em.h>
6696@@ -95,6 +97,32 @@ ENTRY(native_irq_enable_sysexit)
6697 ENDPROC(native_irq_enable_sysexit)
6698 #endif
6699
6700+ .macro pax_enter_kernel_user
6701+ pax_set_fptr_mask
6702+#ifdef CONFIG_PAX_MEMORY_UDEREF
6703+ call pax_enter_kernel_user
6704+#endif
6705+ .endm
6706+
6707+ .macro pax_exit_kernel_user
6708+#ifdef CONFIG_PAX_MEMORY_UDEREF
6709+ call pax_exit_kernel_user
6710+#endif
6711+#ifdef CONFIG_PAX_RANDKSTACK
6712+ pushq %rax
6713+ pushq %r11
6714+ call pax_randomize_kstack
6715+ popq %r11
6716+ popq %rax
6717+#endif
6718+ .endm
6719+
6720+.macro pax_erase_kstack
6721+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
6722+ call pax_erase_kstack
6723+#endif
6724+.endm
6725+
6726 /*
6727 * 32bit SYSENTER instruction entry.
6728 *
6729@@ -121,12 +149,6 @@ ENTRY(ia32_sysenter_target)
6730 CFI_REGISTER rsp,rbp
6731 SWAPGS_UNSAFE_STACK
6732 movq PER_CPU_VAR(kernel_stack), %rsp
6733- addq $(KERNEL_STACK_OFFSET),%rsp
6734- /*
6735- * No need to follow this irqs on/off section: the syscall
6736- * disabled irqs, here we enable it straight after entry:
6737- */
6738- ENABLE_INTERRUPTS(CLBR_NONE)
6739 movl %ebp,%ebp /* zero extension */
6740 pushq_cfi $__USER32_DS
6741 /*CFI_REL_OFFSET ss,0*/
6742@@ -134,25 +156,39 @@ ENTRY(ia32_sysenter_target)
6743 CFI_REL_OFFSET rsp,0
6744 pushfq_cfi
6745 /*CFI_REL_OFFSET rflags,0*/
6746- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
6747- CFI_REGISTER rip,r10
6748+ orl $X86_EFLAGS_IF,(%rsp)
6749+ GET_THREAD_INFO(%r11)
6750+ movl TI_sysenter_return(%r11), %r11d
6751+ CFI_REGISTER rip,r11
6752 pushq_cfi $__USER32_CS
6753 /*CFI_REL_OFFSET cs,0*/
6754 movl %eax, %eax
6755- pushq_cfi %r10
6756+ pushq_cfi %r11
6757 CFI_REL_OFFSET rip,0
6758 pushq_cfi %rax
6759 cld
6760 SAVE_ARGS 0,1,0
6761+ pax_enter_kernel_user
6762+ /*
6763+ * No need to follow this irqs on/off section: the syscall
6764+ * disabled irqs, here we enable it straight after entry:
6765+ */
6766+ ENABLE_INTERRUPTS(CLBR_NONE)
6767 /* no need to do an access_ok check here because rbp has been
6768 32bit zero extended */
6769+
6770+#ifdef CONFIG_PAX_MEMORY_UDEREF
6771+ mov $PAX_USER_SHADOW_BASE,%r11
6772+ add %r11,%rbp
6773+#endif
6774+
6775 1: movl (%rbp),%ebp
6776 .section __ex_table,"a"
6777 .quad 1b,ia32_badarg
6778 .previous
6779- GET_THREAD_INFO(%r10)
6780- orl $TS_COMPAT,TI_status(%r10)
6781- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
6782+ GET_THREAD_INFO(%r11)
6783+ orl $TS_COMPAT,TI_status(%r11)
6784+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
6785 CFI_REMEMBER_STATE
6786 jnz sysenter_tracesys
6787 cmpq $(IA32_NR_syscalls-1),%rax
6788@@ -162,13 +198,15 @@ sysenter_do_call:
6789 sysenter_dispatch:
6790 call *ia32_sys_call_table(,%rax,8)
6791 movq %rax,RAX-ARGOFFSET(%rsp)
6792- GET_THREAD_INFO(%r10)
6793+ GET_THREAD_INFO(%r11)
6794 DISABLE_INTERRUPTS(CLBR_NONE)
6795 TRACE_IRQS_OFF
6796- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6797+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
6798 jnz sysexit_audit
6799 sysexit_from_sys_call:
6800- andl $~TS_COMPAT,TI_status(%r10)
6801+ pax_exit_kernel_user
6802+ pax_erase_kstack
6803+ andl $~TS_COMPAT,TI_status(%r11)
6804 /* clear IF, that popfq doesn't enable interrupts early */
6805 andl $~0x200,EFLAGS-R11(%rsp)
6806 movl RIP-R11(%rsp),%edx /* User %eip */
6807@@ -194,6 +232,9 @@ sysexit_from_sys_call:
6808 movl %eax,%esi /* 2nd arg: syscall number */
6809 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
6810 call audit_syscall_entry
6811+
6812+ pax_erase_kstack
6813+
6814 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
6815 cmpq $(IA32_NR_syscalls-1),%rax
6816 ja ia32_badsys
6817@@ -205,7 +246,7 @@ sysexit_from_sys_call:
6818 .endm
6819
6820 .macro auditsys_exit exit
6821- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
6822+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
6823 jnz ia32_ret_from_sys_call
6824 TRACE_IRQS_ON
6825 sti
6826@@ -215,12 +256,12 @@ sysexit_from_sys_call:
6827 movzbl %al,%edi /* zero-extend that into %edi */
6828 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
6829 call audit_syscall_exit
6830- GET_THREAD_INFO(%r10)
6831+ GET_THREAD_INFO(%r11)
6832 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
6833 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
6834 cli
6835 TRACE_IRQS_OFF
6836- testl %edi,TI_flags(%r10)
6837+ testl %edi,TI_flags(%r11)
6838 jz \exit
6839 CLEAR_RREGS -ARGOFFSET
6840 jmp int_with_check
6841@@ -238,7 +279,7 @@ sysexit_audit:
6842
6843 sysenter_tracesys:
6844 #ifdef CONFIG_AUDITSYSCALL
6845- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
6846+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
6847 jz sysenter_auditsys
6848 #endif
6849 SAVE_REST
6850@@ -246,6 +287,9 @@ sysenter_tracesys:
6851 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
6852 movq %rsp,%rdi /* &pt_regs -> arg1 */
6853 call syscall_trace_enter
6854+
6855+ pax_erase_kstack
6856+
6857 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6858 RESTORE_REST
6859 cmpq $(IA32_NR_syscalls-1),%rax
6860@@ -277,19 +321,20 @@ ENDPROC(ia32_sysenter_target)
6861 ENTRY(ia32_cstar_target)
6862 CFI_STARTPROC32 simple
6863 CFI_SIGNAL_FRAME
6864- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
6865+ CFI_DEF_CFA rsp,0
6866 CFI_REGISTER rip,rcx
6867 /*CFI_REGISTER rflags,r11*/
6868 SWAPGS_UNSAFE_STACK
6869 movl %esp,%r8d
6870 CFI_REGISTER rsp,r8
6871 movq PER_CPU_VAR(kernel_stack),%rsp
6872+ SAVE_ARGS 8*6,0,0
6873+ pax_enter_kernel_user
6874 /*
6875 * No need to follow this irqs on/off section: the syscall
6876 * disabled irqs and here we enable it straight after entry:
6877 */
6878 ENABLE_INTERRUPTS(CLBR_NONE)
6879- SAVE_ARGS 8,0,0
6880 movl %eax,%eax /* zero extension */
6881 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
6882 movq %rcx,RIP-ARGOFFSET(%rsp)
6883@@ -305,13 +350,19 @@ ENTRY(ia32_cstar_target)
6884 /* no need to do an access_ok check here because r8 has been
6885 32bit zero extended */
6886 /* hardware stack frame is complete now */
6887+
6888+#ifdef CONFIG_PAX_MEMORY_UDEREF
6889+ mov $PAX_USER_SHADOW_BASE,%r11
6890+ add %r11,%r8
6891+#endif
6892+
6893 1: movl (%r8),%r9d
6894 .section __ex_table,"a"
6895 .quad 1b,ia32_badarg
6896 .previous
6897- GET_THREAD_INFO(%r10)
6898- orl $TS_COMPAT,TI_status(%r10)
6899- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
6900+ GET_THREAD_INFO(%r11)
6901+ orl $TS_COMPAT,TI_status(%r11)
6902+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
6903 CFI_REMEMBER_STATE
6904 jnz cstar_tracesys
6905 cmpq $IA32_NR_syscalls-1,%rax
6906@@ -321,13 +372,15 @@ cstar_do_call:
6907 cstar_dispatch:
6908 call *ia32_sys_call_table(,%rax,8)
6909 movq %rax,RAX-ARGOFFSET(%rsp)
6910- GET_THREAD_INFO(%r10)
6911+ GET_THREAD_INFO(%r11)
6912 DISABLE_INTERRUPTS(CLBR_NONE)
6913 TRACE_IRQS_OFF
6914- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6915+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
6916 jnz sysretl_audit
6917 sysretl_from_sys_call:
6918- andl $~TS_COMPAT,TI_status(%r10)
6919+ pax_exit_kernel_user
6920+ pax_erase_kstack
6921+ andl $~TS_COMPAT,TI_status(%r11)
6922 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
6923 movl RIP-ARGOFFSET(%rsp),%ecx
6924 CFI_REGISTER rip,rcx
6925@@ -355,7 +408,7 @@ sysretl_audit:
6926
6927 cstar_tracesys:
6928 #ifdef CONFIG_AUDITSYSCALL
6929- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
6930+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
6931 jz cstar_auditsys
6932 #endif
6933 xchgl %r9d,%ebp
6934@@ -364,6 +417,9 @@ cstar_tracesys:
6935 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6936 movq %rsp,%rdi /* &pt_regs -> arg1 */
6937 call syscall_trace_enter
6938+
6939+ pax_erase_kstack
6940+
6941 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
6942 RESTORE_REST
6943 xchgl %ebp,%r9d
6944@@ -409,20 +465,21 @@ ENTRY(ia32_syscall)
6945 CFI_REL_OFFSET rip,RIP-RIP
6946 PARAVIRT_ADJUST_EXCEPTION_FRAME
6947 SWAPGS
6948- /*
6949- * No need to follow this irqs on/off section: the syscall
6950- * disabled irqs and here we enable it straight after entry:
6951- */
6952- ENABLE_INTERRUPTS(CLBR_NONE)
6953 movl %eax,%eax
6954 pushq_cfi %rax
6955 cld
6956 /* note the registers are not zero extended to the sf.
6957 this could be a problem. */
6958 SAVE_ARGS 0,1,0
6959- GET_THREAD_INFO(%r10)
6960- orl $TS_COMPAT,TI_status(%r10)
6961- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
6962+ pax_enter_kernel_user
6963+ /*
6964+ * No need to follow this irqs on/off section: the syscall
6965+ * disabled irqs and here we enable it straight after entry:
6966+ */
6967+ ENABLE_INTERRUPTS(CLBR_NONE)
6968+ GET_THREAD_INFO(%r11)
6969+ orl $TS_COMPAT,TI_status(%r11)
6970+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
6971 jnz ia32_tracesys
6972 cmpq $(IA32_NR_syscalls-1),%rax
6973 ja ia32_badsys
6974@@ -441,6 +498,9 @@ ia32_tracesys:
6975 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6976 movq %rsp,%rdi /* &pt_regs -> arg1 */
6977 call syscall_trace_enter
6978+
6979+ pax_erase_kstack
6980+
6981 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6982 RESTORE_REST
6983 cmpq $(IA32_NR_syscalls-1),%rax
6984@@ -455,6 +515,7 @@ ia32_badsys:
6985
6986 quiet_ni_syscall:
6987 movq $-ENOSYS,%rax
6988+ pax_force_retaddr
6989 ret
6990 CFI_ENDPROC
6991
6992diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
6993index f6f5c53..b358b28 100644
6994--- a/arch/x86/ia32/sys_ia32.c
6995+++ b/arch/x86/ia32/sys_ia32.c
6996@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
6997 */
6998 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
6999 {
7000- typeof(ubuf->st_uid) uid = 0;
7001- typeof(ubuf->st_gid) gid = 0;
7002+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
7003+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
7004 SET_UID(uid, stat->uid);
7005 SET_GID(gid, stat->gid);
7006 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
7007@@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
7008 }
7009 set_fs(KERNEL_DS);
7010 ret = sys_rt_sigprocmask(how,
7011- set ? (sigset_t __user *)&s : NULL,
7012- oset ? (sigset_t __user *)&s : NULL,
7013+ set ? (sigset_t __force_user *)&s : NULL,
7014+ oset ? (sigset_t __force_user *)&s : NULL,
7015 sigsetsize);
7016 set_fs(old_fs);
7017 if (ret)
7018@@ -332,7 +332,7 @@ asmlinkage long sys32_alarm(unsigned int seconds)
7019 return alarm_setitimer(seconds);
7020 }
7021
7022-asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
7023+asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
7024 int options)
7025 {
7026 return compat_sys_wait4(pid, stat_addr, options, NULL);
7027@@ -353,7 +353,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
7028 mm_segment_t old_fs = get_fs();
7029
7030 set_fs(KERNEL_DS);
7031- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
7032+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
7033 set_fs(old_fs);
7034 if (put_compat_timespec(&t, interval))
7035 return -EFAULT;
7036@@ -369,7 +369,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
7037 mm_segment_t old_fs = get_fs();
7038
7039 set_fs(KERNEL_DS);
7040- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
7041+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
7042 set_fs(old_fs);
7043 if (!ret) {
7044 switch (_NSIG_WORDS) {
7045@@ -394,7 +394,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
7046 if (copy_siginfo_from_user32(&info, uinfo))
7047 return -EFAULT;
7048 set_fs(KERNEL_DS);
7049- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
7050+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
7051 set_fs(old_fs);
7052 return ret;
7053 }
7054@@ -439,7 +439,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
7055 return -EFAULT;
7056
7057 set_fs(KERNEL_DS);
7058- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
7059+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
7060 count);
7061 set_fs(old_fs);
7062
7063diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
7064index 091508b..e245ff2 100644
7065--- a/arch/x86/include/asm/alternative-asm.h
7066+++ b/arch/x86/include/asm/alternative-asm.h
7067@@ -4,10 +4,10 @@
7068
7069 #ifdef CONFIG_SMP
7070 .macro LOCK_PREFIX
7071-1: lock
7072+672: lock
7073 .section .smp_locks,"a"
7074 .balign 4
7075- .long 1b - .
7076+ .long 672b - .
7077 .previous
7078 .endm
7079 #else
7080@@ -15,6 +15,45 @@
7081 .endm
7082 #endif
7083
7084+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
7085+ .macro pax_force_retaddr_bts rip=0
7086+ btsq $63,\rip(%rsp)
7087+ .endm
7088+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
7089+ .macro pax_force_retaddr rip=0, reload=0
7090+ btsq $63,\rip(%rsp)
7091+ .endm
7092+ .macro pax_force_fptr ptr
7093+ btsq $63,\ptr
7094+ .endm
7095+ .macro pax_set_fptr_mask
7096+ .endm
7097+#endif
7098+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
7099+ .macro pax_force_retaddr rip=0, reload=0
7100+ .if \reload
7101+ pax_set_fptr_mask
7102+ .endif
7103+ orq %r10,\rip(%rsp)
7104+ .endm
7105+ .macro pax_force_fptr ptr
7106+ orq %r10,\ptr
7107+ .endm
7108+ .macro pax_set_fptr_mask
7109+ movabs $0x8000000000000000,%r10
7110+ .endm
7111+#endif
7112+#else
7113+ .macro pax_force_retaddr rip=0, reload=0
7114+ .endm
7115+ .macro pax_force_fptr ptr
7116+ .endm
7117+ .macro pax_force_retaddr_bts rip=0
7118+ .endm
7119+ .macro pax_set_fptr_mask
7120+ .endm
7121+#endif
7122+
7123 .macro altinstruction_entry orig alt feature orig_len alt_len
7124 .long \orig - .
7125 .long \alt - .
7126diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
7127index 37ad100..7d47faa 100644
7128--- a/arch/x86/include/asm/alternative.h
7129+++ b/arch/x86/include/asm/alternative.h
7130@@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
7131 ".section .discard,\"aw\",@progbits\n" \
7132 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
7133 ".previous\n" \
7134- ".section .altinstr_replacement, \"ax\"\n" \
7135+ ".section .altinstr_replacement, \"a\"\n" \
7136 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
7137 ".previous"
7138
7139diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
7140index 1a6c09a..fec2432 100644
7141--- a/arch/x86/include/asm/apic.h
7142+++ b/arch/x86/include/asm/apic.h
7143@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
7144
7145 #ifdef CONFIG_X86_LOCAL_APIC
7146
7147-extern unsigned int apic_verbosity;
7148+extern int apic_verbosity;
7149 extern int local_apic_timer_c2_ok;
7150
7151 extern int disable_apic;
7152diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
7153index 20370c6..a2eb9b0 100644
7154--- a/arch/x86/include/asm/apm.h
7155+++ b/arch/x86/include/asm/apm.h
7156@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
7157 __asm__ __volatile__(APM_DO_ZERO_SEGS
7158 "pushl %%edi\n\t"
7159 "pushl %%ebp\n\t"
7160- "lcall *%%cs:apm_bios_entry\n\t"
7161+ "lcall *%%ss:apm_bios_entry\n\t"
7162 "setc %%al\n\t"
7163 "popl %%ebp\n\t"
7164 "popl %%edi\n\t"
7165@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
7166 __asm__ __volatile__(APM_DO_ZERO_SEGS
7167 "pushl %%edi\n\t"
7168 "pushl %%ebp\n\t"
7169- "lcall *%%cs:apm_bios_entry\n\t"
7170+ "lcall *%%ss:apm_bios_entry\n\t"
7171 "setc %%bl\n\t"
7172 "popl %%ebp\n\t"
7173 "popl %%edi\n\t"
7174diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
7175index 58cb6d4..ca9010d 100644
7176--- a/arch/x86/include/asm/atomic.h
7177+++ b/arch/x86/include/asm/atomic.h
7178@@ -22,7 +22,18 @@
7179 */
7180 static inline int atomic_read(const atomic_t *v)
7181 {
7182- return (*(volatile int *)&(v)->counter);
7183+ return (*(volatile const int *)&(v)->counter);
7184+}
7185+
7186+/**
7187+ * atomic_read_unchecked - read atomic variable
7188+ * @v: pointer of type atomic_unchecked_t
7189+ *
7190+ * Atomically reads the value of @v.
7191+ */
7192+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7193+{
7194+ return (*(volatile const int *)&(v)->counter);
7195 }
7196
7197 /**
7198@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
7199 }
7200
7201 /**
7202+ * atomic_set_unchecked - set atomic variable
7203+ * @v: pointer of type atomic_unchecked_t
7204+ * @i: required value
7205+ *
7206+ * Atomically sets the value of @v to @i.
7207+ */
7208+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7209+{
7210+ v->counter = i;
7211+}
7212+
7213+/**
7214 * atomic_add - add integer to atomic variable
7215 * @i: integer value to add
7216 * @v: pointer of type atomic_t
7217@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
7218 */
7219 static inline void atomic_add(int i, atomic_t *v)
7220 {
7221- asm volatile(LOCK_PREFIX "addl %1,%0"
7222+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
7223+
7224+#ifdef CONFIG_PAX_REFCOUNT
7225+ "jno 0f\n"
7226+ LOCK_PREFIX "subl %1,%0\n"
7227+ "int $4\n0:\n"
7228+ _ASM_EXTABLE(0b, 0b)
7229+#endif
7230+
7231+ : "+m" (v->counter)
7232+ : "ir" (i));
7233+}
7234+
7235+/**
7236+ * atomic_add_unchecked - add integer to atomic variable
7237+ * @i: integer value to add
7238+ * @v: pointer of type atomic_unchecked_t
7239+ *
7240+ * Atomically adds @i to @v.
7241+ */
7242+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7243+{
7244+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
7245 : "+m" (v->counter)
7246 : "ir" (i));
7247 }
7248@@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
7249 */
7250 static inline void atomic_sub(int i, atomic_t *v)
7251 {
7252- asm volatile(LOCK_PREFIX "subl %1,%0"
7253+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7254+
7255+#ifdef CONFIG_PAX_REFCOUNT
7256+ "jno 0f\n"
7257+ LOCK_PREFIX "addl %1,%0\n"
7258+ "int $4\n0:\n"
7259+ _ASM_EXTABLE(0b, 0b)
7260+#endif
7261+
7262+ : "+m" (v->counter)
7263+ : "ir" (i));
7264+}
7265+
7266+/**
7267+ * atomic_sub_unchecked - subtract integer from atomic variable
7268+ * @i: integer value to subtract
7269+ * @v: pointer of type atomic_unchecked_t
7270+ *
7271+ * Atomically subtracts @i from @v.
7272+ */
7273+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7274+{
7275+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7276 : "+m" (v->counter)
7277 : "ir" (i));
7278 }
7279@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
7280 {
7281 unsigned char c;
7282
7283- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7284+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
7285+
7286+#ifdef CONFIG_PAX_REFCOUNT
7287+ "jno 0f\n"
7288+ LOCK_PREFIX "addl %2,%0\n"
7289+ "int $4\n0:\n"
7290+ _ASM_EXTABLE(0b, 0b)
7291+#endif
7292+
7293+ "sete %1\n"
7294 : "+m" (v->counter), "=qm" (c)
7295 : "ir" (i) : "memory");
7296 return c;
7297@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
7298 */
7299 static inline void atomic_inc(atomic_t *v)
7300 {
7301- asm volatile(LOCK_PREFIX "incl %0"
7302+ asm volatile(LOCK_PREFIX "incl %0\n"
7303+
7304+#ifdef CONFIG_PAX_REFCOUNT
7305+ "jno 0f\n"
7306+ LOCK_PREFIX "decl %0\n"
7307+ "int $4\n0:\n"
7308+ _ASM_EXTABLE(0b, 0b)
7309+#endif
7310+
7311+ : "+m" (v->counter));
7312+}
7313+
7314+/**
7315+ * atomic_inc_unchecked - increment atomic variable
7316+ * @v: pointer of type atomic_unchecked_t
7317+ *
7318+ * Atomically increments @v by 1.
7319+ */
7320+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7321+{
7322+ asm volatile(LOCK_PREFIX "incl %0\n"
7323 : "+m" (v->counter));
7324 }
7325
7326@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
7327 */
7328 static inline void atomic_dec(atomic_t *v)
7329 {
7330- asm volatile(LOCK_PREFIX "decl %0"
7331+ asm volatile(LOCK_PREFIX "decl %0\n"
7332+
7333+#ifdef CONFIG_PAX_REFCOUNT
7334+ "jno 0f\n"
7335+ LOCK_PREFIX "incl %0\n"
7336+ "int $4\n0:\n"
7337+ _ASM_EXTABLE(0b, 0b)
7338+#endif
7339+
7340+ : "+m" (v->counter));
7341+}
7342+
7343+/**
7344+ * atomic_dec_unchecked - decrement atomic variable
7345+ * @v: pointer of type atomic_unchecked_t
7346+ *
7347+ * Atomically decrements @v by 1.
7348+ */
7349+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7350+{
7351+ asm volatile(LOCK_PREFIX "decl %0\n"
7352 : "+m" (v->counter));
7353 }
7354
7355@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
7356 {
7357 unsigned char c;
7358
7359- asm volatile(LOCK_PREFIX "decl %0; sete %1"
7360+ asm volatile(LOCK_PREFIX "decl %0\n"
7361+
7362+#ifdef CONFIG_PAX_REFCOUNT
7363+ "jno 0f\n"
7364+ LOCK_PREFIX "incl %0\n"
7365+ "int $4\n0:\n"
7366+ _ASM_EXTABLE(0b, 0b)
7367+#endif
7368+
7369+ "sete %1\n"
7370 : "+m" (v->counter), "=qm" (c)
7371 : : "memory");
7372 return c != 0;
7373@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
7374 {
7375 unsigned char c;
7376
7377- asm volatile(LOCK_PREFIX "incl %0; sete %1"
7378+ asm volatile(LOCK_PREFIX "incl %0\n"
7379+
7380+#ifdef CONFIG_PAX_REFCOUNT
7381+ "jno 0f\n"
7382+ LOCK_PREFIX "decl %0\n"
7383+ "int $4\n0:\n"
7384+ _ASM_EXTABLE(0b, 0b)
7385+#endif
7386+
7387+ "sete %1\n"
7388+ : "+m" (v->counter), "=qm" (c)
7389+ : : "memory");
7390+ return c != 0;
7391+}
7392+
7393+/**
7394+ * atomic_inc_and_test_unchecked - increment and test
7395+ * @v: pointer of type atomic_unchecked_t
7396+ *
7397+ * Atomically increments @v by 1
7398+ * and returns true if the result is zero, or false for all
7399+ * other cases.
7400+ */
7401+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7402+{
7403+ unsigned char c;
7404+
7405+ asm volatile(LOCK_PREFIX "incl %0\n"
7406+ "sete %1\n"
7407 : "+m" (v->counter), "=qm" (c)
7408 : : "memory");
7409 return c != 0;
7410@@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
7411 {
7412 unsigned char c;
7413
7414- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7415+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
7416+
7417+#ifdef CONFIG_PAX_REFCOUNT
7418+ "jno 0f\n"
7419+ LOCK_PREFIX "subl %2,%0\n"
7420+ "int $4\n0:\n"
7421+ _ASM_EXTABLE(0b, 0b)
7422+#endif
7423+
7424+ "sets %1\n"
7425 : "+m" (v->counter), "=qm" (c)
7426 : "ir" (i) : "memory");
7427 return c;
7428@@ -179,7 +341,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
7429 goto no_xadd;
7430 #endif
7431 /* Modern 486+ processor */
7432- return i + xadd(&v->counter, i);
7433+ return i + xadd_check_overflow(&v->counter, i);
7434
7435 #ifdef CONFIG_M386
7436 no_xadd: /* Legacy 386 processor */
7437@@ -192,6 +354,34 @@ no_xadd: /* Legacy 386 processor */
7438 }
7439
7440 /**
7441+ * atomic_add_return_unchecked - add integer and return
7442+ * @i: integer value to add
7443+ * @v: pointer of type atomic_unchecked_t
7444+ *
7445+ * Atomically adds @i to @v and returns @i + @v
7446+ */
7447+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7448+{
7449+#ifdef CONFIG_M386
7450+ int __i;
7451+ unsigned long flags;
7452+ if (unlikely(boot_cpu_data.x86 <= 3))
7453+ goto no_xadd;
7454+#endif
7455+ /* Modern 486+ processor */
7456+ return i + xadd(&v->counter, i);
7457+
7458+#ifdef CONFIG_M386
7459+no_xadd: /* Legacy 386 processor */
7460+ raw_local_irq_save(flags);
7461+ __i = atomic_read_unchecked(v);
7462+ atomic_set_unchecked(v, i + __i);
7463+ raw_local_irq_restore(flags);
7464+ return i + __i;
7465+#endif
7466+}
7467+
7468+/**
7469 * atomic_sub_return - subtract integer and return
7470 * @v: pointer of type atomic_t
7471 * @i: integer value to subtract
7472@@ -204,6 +394,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
7473 }
7474
7475 #define atomic_inc_return(v) (atomic_add_return(1, v))
7476+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7477+{
7478+ return atomic_add_return_unchecked(1, v);
7479+}
7480 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7481
7482 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
7483@@ -211,11 +405,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
7484 return cmpxchg(&v->counter, old, new);
7485 }
7486
7487+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7488+{
7489+ return cmpxchg(&v->counter, old, new);
7490+}
7491+
7492 static inline int atomic_xchg(atomic_t *v, int new)
7493 {
7494 return xchg(&v->counter, new);
7495 }
7496
7497+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7498+{
7499+ return xchg(&v->counter, new);
7500+}
7501+
7502 /**
7503 * __atomic_add_unless - add unless the number is already a given value
7504 * @v: pointer of type atomic_t
7505@@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
7506 */
7507 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7508 {
7509- int c, old;
7510+ int c, old, new;
7511 c = atomic_read(v);
7512 for (;;) {
7513- if (unlikely(c == (u)))
7514+ if (unlikely(c == u))
7515 break;
7516- old = atomic_cmpxchg((v), c, c + (a));
7517+
7518+ asm volatile("addl %2,%0\n"
7519+
7520+#ifdef CONFIG_PAX_REFCOUNT
7521+ "jno 0f\n"
7522+ "subl %2,%0\n"
7523+ "int $4\n0:\n"
7524+ _ASM_EXTABLE(0b, 0b)
7525+#endif
7526+
7527+ : "=r" (new)
7528+ : "0" (c), "ir" (a));
7529+
7530+ old = atomic_cmpxchg(v, c, new);
7531 if (likely(old == c))
7532 break;
7533 c = old;
7534@@ -240,6 +457,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7535 return c;
7536 }
7537
7538+/**
7539+ * atomic_inc_not_zero_hint - increment if not null
7540+ * @v: pointer of type atomic_t
7541+ * @hint: probable value of the atomic before the increment
7542+ *
7543+ * This version of atomic_inc_not_zero() gives a hint of probable
7544+ * value of the atomic. This helps processor to not read the memory
7545+ * before doing the atomic read/modify/write cycle, lowering
7546+ * number of bus transactions on some arches.
7547+ *
7548+ * Returns: 0 if increment was not done, 1 otherwise.
7549+ */
7550+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
7551+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
7552+{
7553+ int val, c = hint, new;
7554+
7555+ /* sanity test, should be removed by compiler if hint is a constant */
7556+ if (!hint)
7557+ return __atomic_add_unless(v, 1, 0);
7558+
7559+ do {
7560+ asm volatile("incl %0\n"
7561+
7562+#ifdef CONFIG_PAX_REFCOUNT
7563+ "jno 0f\n"
7564+ "decl %0\n"
7565+ "int $4\n0:\n"
7566+ _ASM_EXTABLE(0b, 0b)
7567+#endif
7568+
7569+ : "=r" (new)
7570+ : "0" (c));
7571+
7572+ val = atomic_cmpxchg(v, c, new);
7573+ if (val == c)
7574+ return 1;
7575+ c = val;
7576+ } while (c);
7577+
7578+ return 0;
7579+}
7580
7581 /*
7582 * atomic_dec_if_positive - decrement by 1 if old value positive
7583diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
7584index 24098aa..1e37723 100644
7585--- a/arch/x86/include/asm/atomic64_32.h
7586+++ b/arch/x86/include/asm/atomic64_32.h
7587@@ -12,6 +12,14 @@ typedef struct {
7588 u64 __aligned(8) counter;
7589 } atomic64_t;
7590
7591+#ifdef CONFIG_PAX_REFCOUNT
7592+typedef struct {
7593+ u64 __aligned(8) counter;
7594+} atomic64_unchecked_t;
7595+#else
7596+typedef atomic64_t atomic64_unchecked_t;
7597+#endif
7598+
7599 #define ATOMIC64_INIT(val) { (val) }
7600
7601 #ifdef CONFIG_X86_CMPXCHG64
7602@@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
7603 }
7604
7605 /**
7606+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
7607+ * @p: pointer to type atomic64_unchecked_t
7608+ * @o: expected value
7609+ * @n: new value
7610+ *
7611+ * Atomically sets @v to @n if it was equal to @o and returns
7612+ * the old value.
7613+ */
7614+
7615+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
7616+{
7617+ return cmpxchg64(&v->counter, o, n);
7618+}
7619+
7620+/**
7621 * atomic64_xchg - xchg atomic64 variable
7622 * @v: pointer to type atomic64_t
7623 * @n: value to assign
7624@@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64_t *v, long long i)
7625 }
7626
7627 /**
7628+ * atomic64_set_unchecked - set atomic64 variable
7629+ * @v: pointer to type atomic64_unchecked_t
7630+ * @n: value to assign
7631+ *
7632+ * Atomically sets the value of @v to @n.
7633+ */
7634+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
7635+{
7636+ unsigned high = (unsigned)(i >> 32);
7637+ unsigned low = (unsigned)i;
7638+ asm volatile(ATOMIC64_ALTERNATIVE(set)
7639+ : "+b" (low), "+c" (high)
7640+ : "S" (v)
7641+ : "eax", "edx", "memory"
7642+ );
7643+}
7644+
7645+/**
7646 * atomic64_read - read atomic64 variable
7647 * @v: pointer to type atomic64_t
7648 *
7649@@ -93,6 +134,22 @@ static inline long long atomic64_read(atomic64_t *v)
7650 }
7651
7652 /**
7653+ * atomic64_read_unchecked - read atomic64 variable
7654+ * @v: pointer to type atomic64_unchecked_t
7655+ *
7656+ * Atomically reads the value of @v and returns it.
7657+ */
7658+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
7659+{
7660+ long long r;
7661+ asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
7662+ : "=A" (r), "+c" (v)
7663+ : : "memory"
7664+ );
7665+ return r;
7666+ }
7667+
7668+/**
7669 * atomic64_add_return - add and return
7670 * @i: integer value to add
7671 * @v: pointer to type atomic64_t
7672@@ -108,6 +165,22 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
7673 return i;
7674 }
7675
7676+/**
7677+ * atomic64_add_return_unchecked - add and return
7678+ * @i: integer value to add
7679+ * @v: pointer to type atomic64_unchecked_t
7680+ *
7681+ * Atomically adds @i to @v and returns @i + *@v
7682+ */
7683+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
7684+{
7685+ asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
7686+ : "+A" (i), "+c" (v)
7687+ : : "memory"
7688+ );
7689+ return i;
7690+}
7691+
7692 /*
7693 * Other variants with different arithmetic operators:
7694 */
7695@@ -131,6 +204,17 @@ static inline long long atomic64_inc_return(atomic64_t *v)
7696 return a;
7697 }
7698
7699+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7700+{
7701+ long long a;
7702+ asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
7703+ : "=A" (a)
7704+ : "S" (v)
7705+ : "memory", "ecx"
7706+ );
7707+ return a;
7708+}
7709+
7710 static inline long long atomic64_dec_return(atomic64_t *v)
7711 {
7712 long long a;
7713@@ -159,6 +243,22 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
7714 }
7715
7716 /**
7717+ * atomic64_add_unchecked - add integer to atomic64 variable
7718+ * @i: integer value to add
7719+ * @v: pointer to type atomic64_unchecked_t
7720+ *
7721+ * Atomically adds @i to @v.
7722+ */
7723+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
7724+{
7725+ asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
7726+ : "+A" (i), "+c" (v)
7727+ : : "memory"
7728+ );
7729+ return i;
7730+}
7731+
7732+/**
7733 * atomic64_sub - subtract the atomic64 variable
7734 * @i: integer value to subtract
7735 * @v: pointer to type atomic64_t
7736diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
7737index 0e1cbfc..5623683 100644
7738--- a/arch/x86/include/asm/atomic64_64.h
7739+++ b/arch/x86/include/asm/atomic64_64.h
7740@@ -18,7 +18,19 @@
7741 */
7742 static inline long atomic64_read(const atomic64_t *v)
7743 {
7744- return (*(volatile long *)&(v)->counter);
7745+ return (*(volatile const long *)&(v)->counter);
7746+}
7747+
7748+/**
7749+ * atomic64_read_unchecked - read atomic64 variable
7750+ * @v: pointer of type atomic64_unchecked_t
7751+ *
7752+ * Atomically reads the value of @v.
7753+ * Doesn't imply a read memory barrier.
7754+ */
7755+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7756+{
7757+ return (*(volatile const long *)&(v)->counter);
7758 }
7759
7760 /**
7761@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
7762 }
7763
7764 /**
7765+ * atomic64_set_unchecked - set atomic64 variable
7766+ * @v: pointer to type atomic64_unchecked_t
7767+ * @i: required value
7768+ *
7769+ * Atomically sets the value of @v to @i.
7770+ */
7771+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7772+{
7773+ v->counter = i;
7774+}
7775+
7776+/**
7777 * atomic64_add - add integer to atomic64 variable
7778 * @i: integer value to add
7779 * @v: pointer to type atomic64_t
7780@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
7781 */
7782 static inline void atomic64_add(long i, atomic64_t *v)
7783 {
7784+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
7785+
7786+#ifdef CONFIG_PAX_REFCOUNT
7787+ "jno 0f\n"
7788+ LOCK_PREFIX "subq %1,%0\n"
7789+ "int $4\n0:\n"
7790+ _ASM_EXTABLE(0b, 0b)
7791+#endif
7792+
7793+ : "=m" (v->counter)
7794+ : "er" (i), "m" (v->counter));
7795+}
7796+
7797+/**
7798+ * atomic64_add_unchecked - add integer to atomic64 variable
7799+ * @i: integer value to add
7800+ * @v: pointer to type atomic64_unchecked_t
7801+ *
7802+ * Atomically adds @i to @v.
7803+ */
7804+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
7805+{
7806 asm volatile(LOCK_PREFIX "addq %1,%0"
7807 : "=m" (v->counter)
7808 : "er" (i), "m" (v->counter));
7809@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
7810 */
7811 static inline void atomic64_sub(long i, atomic64_t *v)
7812 {
7813- asm volatile(LOCK_PREFIX "subq %1,%0"
7814+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
7815+
7816+#ifdef CONFIG_PAX_REFCOUNT
7817+ "jno 0f\n"
7818+ LOCK_PREFIX "addq %1,%0\n"
7819+ "int $4\n0:\n"
7820+ _ASM_EXTABLE(0b, 0b)
7821+#endif
7822+
7823+ : "=m" (v->counter)
7824+ : "er" (i), "m" (v->counter));
7825+}
7826+
7827+/**
7828+ * atomic64_sub_unchecked - subtract the atomic64 variable
7829+ * @i: integer value to subtract
7830+ * @v: pointer to type atomic64_unchecked_t
7831+ *
7832+ * Atomically subtracts @i from @v.
7833+ */
7834+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
7835+{
7836+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
7837 : "=m" (v->counter)
7838 : "er" (i), "m" (v->counter));
7839 }
7840@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
7841 {
7842 unsigned char c;
7843
7844- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
7845+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
7846+
7847+#ifdef CONFIG_PAX_REFCOUNT
7848+ "jno 0f\n"
7849+ LOCK_PREFIX "addq %2,%0\n"
7850+ "int $4\n0:\n"
7851+ _ASM_EXTABLE(0b, 0b)
7852+#endif
7853+
7854+ "sete %1\n"
7855 : "=m" (v->counter), "=qm" (c)
7856 : "er" (i), "m" (v->counter) : "memory");
7857 return c;
7858@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
7859 */
7860 static inline void atomic64_inc(atomic64_t *v)
7861 {
7862+ asm volatile(LOCK_PREFIX "incq %0\n"
7863+
7864+#ifdef CONFIG_PAX_REFCOUNT
7865+ "jno 0f\n"
7866+ LOCK_PREFIX "decq %0\n"
7867+ "int $4\n0:\n"
7868+ _ASM_EXTABLE(0b, 0b)
7869+#endif
7870+
7871+ : "=m" (v->counter)
7872+ : "m" (v->counter));
7873+}
7874+
7875+/**
7876+ * atomic64_inc_unchecked - increment atomic64 variable
7877+ * @v: pointer to type atomic64_unchecked_t
7878+ *
7879+ * Atomically increments @v by 1.
7880+ */
7881+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
7882+{
7883 asm volatile(LOCK_PREFIX "incq %0"
7884 : "=m" (v->counter)
7885 : "m" (v->counter));
7886@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
7887 */
7888 static inline void atomic64_dec(atomic64_t *v)
7889 {
7890- asm volatile(LOCK_PREFIX "decq %0"
7891+ asm volatile(LOCK_PREFIX "decq %0\n"
7892+
7893+#ifdef CONFIG_PAX_REFCOUNT
7894+ "jno 0f\n"
7895+ LOCK_PREFIX "incq %0\n"
7896+ "int $4\n0:\n"
7897+ _ASM_EXTABLE(0b, 0b)
7898+#endif
7899+
7900+ : "=m" (v->counter)
7901+ : "m" (v->counter));
7902+}
7903+
7904+/**
7905+ * atomic64_dec_unchecked - decrement atomic64 variable
7906+ * @v: pointer to type atomic64_t
7907+ *
7908+ * Atomically decrements @v by 1.
7909+ */
7910+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
7911+{
7912+ asm volatile(LOCK_PREFIX "decq %0\n"
7913 : "=m" (v->counter)
7914 : "m" (v->counter));
7915 }
7916@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
7917 {
7918 unsigned char c;
7919
7920- asm volatile(LOCK_PREFIX "decq %0; sete %1"
7921+ asm volatile(LOCK_PREFIX "decq %0\n"
7922+
7923+#ifdef CONFIG_PAX_REFCOUNT
7924+ "jno 0f\n"
7925+ LOCK_PREFIX "incq %0\n"
7926+ "int $4\n0:\n"
7927+ _ASM_EXTABLE(0b, 0b)
7928+#endif
7929+
7930+ "sete %1\n"
7931 : "=m" (v->counter), "=qm" (c)
7932 : "m" (v->counter) : "memory");
7933 return c != 0;
7934@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
7935 {
7936 unsigned char c;
7937
7938- asm volatile(LOCK_PREFIX "incq %0; sete %1"
7939+ asm volatile(LOCK_PREFIX "incq %0\n"
7940+
7941+#ifdef CONFIG_PAX_REFCOUNT
7942+ "jno 0f\n"
7943+ LOCK_PREFIX "decq %0\n"
7944+ "int $4\n0:\n"
7945+ _ASM_EXTABLE(0b, 0b)
7946+#endif
7947+
7948+ "sete %1\n"
7949 : "=m" (v->counter), "=qm" (c)
7950 : "m" (v->counter) : "memory");
7951 return c != 0;
7952@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
7953 {
7954 unsigned char c;
7955
7956- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
7957+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
7958+
7959+#ifdef CONFIG_PAX_REFCOUNT
7960+ "jno 0f\n"
7961+ LOCK_PREFIX "subq %2,%0\n"
7962+ "int $4\n0:\n"
7963+ _ASM_EXTABLE(0b, 0b)
7964+#endif
7965+
7966+ "sets %1\n"
7967 : "=m" (v->counter), "=qm" (c)
7968 : "er" (i), "m" (v->counter) : "memory");
7969 return c;
7970@@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
7971 */
7972 static inline long atomic64_add_return(long i, atomic64_t *v)
7973 {
7974+ return i + xadd_check_overflow(&v->counter, i);
7975+}
7976+
7977+/**
7978+ * atomic64_add_return_unchecked - add and return
7979+ * @i: integer value to add
7980+ * @v: pointer to type atomic64_unchecked_t
7981+ *
7982+ * Atomically adds @i to @v and returns @i + @v
7983+ */
7984+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
7985+{
7986 return i + xadd(&v->counter, i);
7987 }
7988
7989@@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
7990 }
7991
7992 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
7993+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7994+{
7995+ return atomic64_add_return_unchecked(1, v);
7996+}
7997 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
7998
7999 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
8000@@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
8001 return cmpxchg(&v->counter, old, new);
8002 }
8003
8004+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
8005+{
8006+ return cmpxchg(&v->counter, old, new);
8007+}
8008+
8009 static inline long atomic64_xchg(atomic64_t *v, long new)
8010 {
8011 return xchg(&v->counter, new);
8012@@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
8013 */
8014 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
8015 {
8016- long c, old;
8017+ long c, old, new;
8018 c = atomic64_read(v);
8019 for (;;) {
8020- if (unlikely(c == (u)))
8021+ if (unlikely(c == u))
8022 break;
8023- old = atomic64_cmpxchg((v), c, c + (a));
8024+
8025+ asm volatile("add %2,%0\n"
8026+
8027+#ifdef CONFIG_PAX_REFCOUNT
8028+ "jno 0f\n"
8029+ "sub %2,%0\n"
8030+ "int $4\n0:\n"
8031+ _ASM_EXTABLE(0b, 0b)
8032+#endif
8033+
8034+ : "=r" (new)
8035+ : "0" (c), "ir" (a));
8036+
8037+ old = atomic64_cmpxchg(v, c, new);
8038 if (likely(old == c))
8039 break;
8040 c = old;
8041 }
8042- return c != (u);
8043+ return c != u;
8044 }
8045
8046 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
8047diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
8048index 1775d6e..b65017f 100644
8049--- a/arch/x86/include/asm/bitops.h
8050+++ b/arch/x86/include/asm/bitops.h
8051@@ -38,7 +38,7 @@
8052 * a mask operation on a byte.
8053 */
8054 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
8055-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
8056+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
8057 #define CONST_MASK(nr) (1 << ((nr) & 7))
8058
8059 /**
8060diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
8061index 5e1a2ee..c9f9533 100644
8062--- a/arch/x86/include/asm/boot.h
8063+++ b/arch/x86/include/asm/boot.h
8064@@ -11,10 +11,15 @@
8065 #include <asm/pgtable_types.h>
8066
8067 /* Physical address where kernel should be loaded. */
8068-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8069+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8070 + (CONFIG_PHYSICAL_ALIGN - 1)) \
8071 & ~(CONFIG_PHYSICAL_ALIGN - 1))
8072
8073+#ifndef __ASSEMBLY__
8074+extern unsigned char __LOAD_PHYSICAL_ADDR[];
8075+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
8076+#endif
8077+
8078 /* Minimum kernel alignment, as a power of two */
8079 #ifdef CONFIG_X86_64
8080 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
8081diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
8082index 48f99f1..d78ebf9 100644
8083--- a/arch/x86/include/asm/cache.h
8084+++ b/arch/x86/include/asm/cache.h
8085@@ -5,12 +5,13 @@
8086
8087 /* L1 cache line size */
8088 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
8089-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8090+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8091
8092 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
8093+#define __read_only __attribute__((__section__(".data..read_only")))
8094
8095 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
8096-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
8097+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
8098
8099 #ifdef CONFIG_X86_VSMP
8100 #ifdef CONFIG_SMP
8101diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
8102index 4e12668..501d239 100644
8103--- a/arch/x86/include/asm/cacheflush.h
8104+++ b/arch/x86/include/asm/cacheflush.h
8105@@ -26,7 +26,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
8106 unsigned long pg_flags = pg->flags & _PGMT_MASK;
8107
8108 if (pg_flags == _PGMT_DEFAULT)
8109- return -1;
8110+ return ~0UL;
8111 else if (pg_flags == _PGMT_WC)
8112 return _PAGE_CACHE_WC;
8113 else if (pg_flags == _PGMT_UC_MINUS)
8114diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
8115index 46fc474..b02b0f9 100644
8116--- a/arch/x86/include/asm/checksum_32.h
8117+++ b/arch/x86/include/asm/checksum_32.h
8118@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
8119 int len, __wsum sum,
8120 int *src_err_ptr, int *dst_err_ptr);
8121
8122+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
8123+ int len, __wsum sum,
8124+ int *src_err_ptr, int *dst_err_ptr);
8125+
8126+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
8127+ int len, __wsum sum,
8128+ int *src_err_ptr, int *dst_err_ptr);
8129+
8130 /*
8131 * Note: when you get a NULL pointer exception here this means someone
8132 * passed in an incorrect kernel address to one of these functions.
8133@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
8134 int *err_ptr)
8135 {
8136 might_sleep();
8137- return csum_partial_copy_generic((__force void *)src, dst,
8138+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
8139 len, sum, err_ptr, NULL);
8140 }
8141
8142@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
8143 {
8144 might_sleep();
8145 if (access_ok(VERIFY_WRITE, dst, len))
8146- return csum_partial_copy_generic(src, (__force void *)dst,
8147+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
8148 len, sum, NULL, err_ptr);
8149
8150 if (len)
8151diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
8152index 5d3acdf..6447a02 100644
8153--- a/arch/x86/include/asm/cmpxchg.h
8154+++ b/arch/x86/include/asm/cmpxchg.h
8155@@ -14,6 +14,8 @@ extern void __cmpxchg_wrong_size(void)
8156 __compiletime_error("Bad argument size for cmpxchg");
8157 extern void __xadd_wrong_size(void)
8158 __compiletime_error("Bad argument size for xadd");
8159+extern void __xadd_check_overflow_wrong_size(void)
8160+ __compiletime_error("Bad argument size for xadd_check_overflow");
8161
8162 /*
8163 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
8164@@ -195,6 +197,34 @@ extern void __xadd_wrong_size(void)
8165 __ret; \
8166 })
8167
8168+#define __xadd_check_overflow(ptr, inc, lock) \
8169+ ({ \
8170+ __typeof__ (*(ptr)) __ret = (inc); \
8171+ switch (sizeof(*(ptr))) { \
8172+ case __X86_CASE_L: \
8173+ asm volatile (lock "xaddl %0, %1\n" \
8174+ "jno 0f\n" \
8175+ "mov %0,%1\n" \
8176+ "int $4\n0:\n" \
8177+ _ASM_EXTABLE(0b, 0b) \
8178+ : "+r" (__ret), "+m" (*(ptr)) \
8179+ : : "memory", "cc"); \
8180+ break; \
8181+ case __X86_CASE_Q: \
8182+ asm volatile (lock "xaddq %q0, %1\n" \
8183+ "jno 0f\n" \
8184+ "mov %0,%1\n" \
8185+ "int $4\n0:\n" \
8186+ _ASM_EXTABLE(0b, 0b) \
8187+ : "+r" (__ret), "+m" (*(ptr)) \
8188+ : : "memory", "cc"); \
8189+ break; \
8190+ default: \
8191+ __xadd_check_overflow_wrong_size(); \
8192+ } \
8193+ __ret; \
8194+ })
8195+
8196 /*
8197 * xadd() adds "inc" to "*ptr" and atomically returns the previous
8198 * value of "*ptr".
8199@@ -207,4 +237,6 @@ extern void __xadd_wrong_size(void)
8200 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
8201 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
8202
8203+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
8204+
8205 #endif /* ASM_X86_CMPXCHG_H */
8206diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
8207index f3444f7..051a196 100644
8208--- a/arch/x86/include/asm/cpufeature.h
8209+++ b/arch/x86/include/asm/cpufeature.h
8210@@ -363,7 +363,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
8211 ".section .discard,\"aw\",@progbits\n"
8212 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
8213 ".previous\n"
8214- ".section .altinstr_replacement,\"ax\"\n"
8215+ ".section .altinstr_replacement,\"a\"\n"
8216 "3: movb $1,%0\n"
8217 "4:\n"
8218 ".previous\n"
8219diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
8220index 41935fa..3b40db8 100644
8221--- a/arch/x86/include/asm/desc.h
8222+++ b/arch/x86/include/asm/desc.h
8223@@ -4,6 +4,7 @@
8224 #include <asm/desc_defs.h>
8225 #include <asm/ldt.h>
8226 #include <asm/mmu.h>
8227+#include <asm/pgtable.h>
8228
8229 #include <linux/smp.h>
8230
8231@@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
8232
8233 desc->type = (info->read_exec_only ^ 1) << 1;
8234 desc->type |= info->contents << 2;
8235+ desc->type |= info->seg_not_present ^ 1;
8236
8237 desc->s = 1;
8238 desc->dpl = 0x3;
8239@@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
8240 }
8241
8242 extern struct desc_ptr idt_descr;
8243-extern gate_desc idt_table[];
8244-
8245-struct gdt_page {
8246- struct desc_struct gdt[GDT_ENTRIES];
8247-} __attribute__((aligned(PAGE_SIZE)));
8248-
8249-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
8250+extern gate_desc idt_table[256];
8251
8252+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
8253 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
8254 {
8255- return per_cpu(gdt_page, cpu).gdt;
8256+ return cpu_gdt_table[cpu];
8257 }
8258
8259 #ifdef CONFIG_X86_64
8260@@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
8261 unsigned long base, unsigned dpl, unsigned flags,
8262 unsigned short seg)
8263 {
8264- gate->a = (seg << 16) | (base & 0xffff);
8265- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
8266+ gate->gate.offset_low = base;
8267+ gate->gate.seg = seg;
8268+ gate->gate.reserved = 0;
8269+ gate->gate.type = type;
8270+ gate->gate.s = 0;
8271+ gate->gate.dpl = dpl;
8272+ gate->gate.p = 1;
8273+ gate->gate.offset_high = base >> 16;
8274 }
8275
8276 #endif
8277@@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
8278
8279 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
8280 {
8281+ pax_open_kernel();
8282 memcpy(&idt[entry], gate, sizeof(*gate));
8283+ pax_close_kernel();
8284 }
8285
8286 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
8287 {
8288+ pax_open_kernel();
8289 memcpy(&ldt[entry], desc, 8);
8290+ pax_close_kernel();
8291 }
8292
8293 static inline void
8294@@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
8295 default: size = sizeof(*gdt); break;
8296 }
8297
8298+ pax_open_kernel();
8299 memcpy(&gdt[entry], desc, size);
8300+ pax_close_kernel();
8301 }
8302
8303 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
8304@@ -207,7 +216,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
8305
8306 static inline void native_load_tr_desc(void)
8307 {
8308+ pax_open_kernel();
8309 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
8310+ pax_close_kernel();
8311 }
8312
8313 static inline void native_load_gdt(const struct desc_ptr *dtr)
8314@@ -244,8 +255,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
8315 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
8316 unsigned int i;
8317
8318+ pax_open_kernel();
8319 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
8320 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
8321+ pax_close_kernel();
8322 }
8323
8324 #define _LDT_empty(info) \
8325@@ -307,7 +320,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
8326 desc->limit = (limit >> 16) & 0xf;
8327 }
8328
8329-static inline void _set_gate(int gate, unsigned type, void *addr,
8330+static inline void _set_gate(int gate, unsigned type, const void *addr,
8331 unsigned dpl, unsigned ist, unsigned seg)
8332 {
8333 gate_desc s;
8334@@ -326,7 +339,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
8335 * Pentium F0 0F bugfix can have resulted in the mapped
8336 * IDT being write-protected.
8337 */
8338-static inline void set_intr_gate(unsigned int n, void *addr)
8339+static inline void set_intr_gate(unsigned int n, const void *addr)
8340 {
8341 BUG_ON((unsigned)n > 0xFF);
8342 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
8343@@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
8344 /*
8345 * This routine sets up an interrupt gate at directory privilege level 3.
8346 */
8347-static inline void set_system_intr_gate(unsigned int n, void *addr)
8348+static inline void set_system_intr_gate(unsigned int n, const void *addr)
8349 {
8350 BUG_ON((unsigned)n > 0xFF);
8351 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
8352 }
8353
8354-static inline void set_system_trap_gate(unsigned int n, void *addr)
8355+static inline void set_system_trap_gate(unsigned int n, const void *addr)
8356 {
8357 BUG_ON((unsigned)n > 0xFF);
8358 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
8359 }
8360
8361-static inline void set_trap_gate(unsigned int n, void *addr)
8362+static inline void set_trap_gate(unsigned int n, const void *addr)
8363 {
8364 BUG_ON((unsigned)n > 0xFF);
8365 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
8366@@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
8367 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
8368 {
8369 BUG_ON((unsigned)n > 0xFF);
8370- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
8371+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
8372 }
8373
8374-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
8375+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
8376 {
8377 BUG_ON((unsigned)n > 0xFF);
8378 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
8379 }
8380
8381-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
8382+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
8383 {
8384 BUG_ON((unsigned)n > 0xFF);
8385 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
8386 }
8387
8388+#ifdef CONFIG_X86_32
8389+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
8390+{
8391+ struct desc_struct d;
8392+
8393+ if (likely(limit))
8394+ limit = (limit - 1UL) >> PAGE_SHIFT;
8395+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
8396+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
8397+}
8398+#endif
8399+
8400 #endif /* _ASM_X86_DESC_H */
8401diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
8402index 278441f..b95a174 100644
8403--- a/arch/x86/include/asm/desc_defs.h
8404+++ b/arch/x86/include/asm/desc_defs.h
8405@@ -31,6 +31,12 @@ struct desc_struct {
8406 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
8407 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
8408 };
8409+ struct {
8410+ u16 offset_low;
8411+ u16 seg;
8412+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
8413+ unsigned offset_high: 16;
8414+ } gate;
8415 };
8416 } __attribute__((packed));
8417
8418diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
8419index 908b969..a1f4eb4 100644
8420--- a/arch/x86/include/asm/e820.h
8421+++ b/arch/x86/include/asm/e820.h
8422@@ -69,7 +69,7 @@ struct e820map {
8423 #define ISA_START_ADDRESS 0xa0000
8424 #define ISA_END_ADDRESS 0x100000
8425
8426-#define BIOS_BEGIN 0x000a0000
8427+#define BIOS_BEGIN 0x000c0000
8428 #define BIOS_END 0x00100000
8429
8430 #define BIOS_ROM_BASE 0xffe00000
8431diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
8432index 5f962df..7289f09 100644
8433--- a/arch/x86/include/asm/elf.h
8434+++ b/arch/x86/include/asm/elf.h
8435@@ -238,7 +238,25 @@ extern int force_personality32;
8436 the loader. We need to make sure that it is out of the way of the program
8437 that it will "exec", and that there is sufficient room for the brk. */
8438
8439+#ifdef CONFIG_PAX_SEGMEXEC
8440+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
8441+#else
8442 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
8443+#endif
8444+
8445+#ifdef CONFIG_PAX_ASLR
8446+#ifdef CONFIG_X86_32
8447+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
8448+
8449+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8450+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8451+#else
8452+#define PAX_ELF_ET_DYN_BASE 0x400000UL
8453+
8454+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8455+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8456+#endif
8457+#endif
8458
8459 /* This yields a mask that user programs can use to figure out what
8460 instruction set this CPU supports. This could be done in user space,
8461@@ -291,9 +309,7 @@ do { \
8462
8463 #define ARCH_DLINFO \
8464 do { \
8465- if (vdso_enabled) \
8466- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
8467- (unsigned long)current->mm->context.vdso); \
8468+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
8469 } while (0)
8470
8471 #define AT_SYSINFO 32
8472@@ -304,7 +320,7 @@ do { \
8473
8474 #endif /* !CONFIG_X86_32 */
8475
8476-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
8477+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
8478
8479 #define VDSO_ENTRY \
8480 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
8481@@ -318,9 +334,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
8482 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
8483 #define compat_arch_setup_additional_pages syscall32_setup_pages
8484
8485-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8486-#define arch_randomize_brk arch_randomize_brk
8487-
8488 /*
8489 * True on X86_32 or when emulating IA32 on X86_64
8490 */
8491diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
8492index cc70c1c..d96d011 100644
8493--- a/arch/x86/include/asm/emergency-restart.h
8494+++ b/arch/x86/include/asm/emergency-restart.h
8495@@ -15,6 +15,6 @@ enum reboot_type {
8496
8497 extern enum reboot_type reboot_type;
8498
8499-extern void machine_emergency_restart(void);
8500+extern void machine_emergency_restart(void) __noreturn;
8501
8502 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
8503diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
8504index d09bb03..4ea4194 100644
8505--- a/arch/x86/include/asm/futex.h
8506+++ b/arch/x86/include/asm/futex.h
8507@@ -12,16 +12,18 @@
8508 #include <asm/system.h>
8509
8510 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
8511+ typecheck(u32 __user *, uaddr); \
8512 asm volatile("1:\t" insn "\n" \
8513 "2:\t.section .fixup,\"ax\"\n" \
8514 "3:\tmov\t%3, %1\n" \
8515 "\tjmp\t2b\n" \
8516 "\t.previous\n" \
8517 _ASM_EXTABLE(1b, 3b) \
8518- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
8519+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
8520 : "i" (-EFAULT), "0" (oparg), "1" (0))
8521
8522 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
8523+ typecheck(u32 __user *, uaddr); \
8524 asm volatile("1:\tmovl %2, %0\n" \
8525 "\tmovl\t%0, %3\n" \
8526 "\t" insn "\n" \
8527@@ -34,7 +36,7 @@
8528 _ASM_EXTABLE(1b, 4b) \
8529 _ASM_EXTABLE(2b, 4b) \
8530 : "=&a" (oldval), "=&r" (ret), \
8531- "+m" (*uaddr), "=&r" (tem) \
8532+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
8533 : "r" (oparg), "i" (-EFAULT), "1" (0))
8534
8535 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8536@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8537
8538 switch (op) {
8539 case FUTEX_OP_SET:
8540- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
8541+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
8542 break;
8543 case FUTEX_OP_ADD:
8544- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
8545+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
8546 uaddr, oparg);
8547 break;
8548 case FUTEX_OP_OR:
8549@@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
8550 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
8551 return -EFAULT;
8552
8553- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
8554+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
8555 "2:\t.section .fixup, \"ax\"\n"
8556 "3:\tmov %3, %0\n"
8557 "\tjmp 2b\n"
8558 "\t.previous\n"
8559 _ASM_EXTABLE(1b, 3b)
8560- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
8561+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
8562 : "i" (-EFAULT), "r" (newval), "1" (oldval)
8563 : "memory"
8564 );
8565diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
8566index eb92a6e..b98b2f4 100644
8567--- a/arch/x86/include/asm/hw_irq.h
8568+++ b/arch/x86/include/asm/hw_irq.h
8569@@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
8570 extern void enable_IO_APIC(void);
8571
8572 /* Statistics */
8573-extern atomic_t irq_err_count;
8574-extern atomic_t irq_mis_count;
8575+extern atomic_unchecked_t irq_err_count;
8576+extern atomic_unchecked_t irq_mis_count;
8577
8578 /* EISA */
8579 extern void eisa_set_level_irq(unsigned int irq);
8580diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
8581index c9e09ea..73888df 100644
8582--- a/arch/x86/include/asm/i387.h
8583+++ b/arch/x86/include/asm/i387.h
8584@@ -92,6 +92,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
8585 {
8586 int err;
8587
8588+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8589+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8590+ fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
8591+#endif
8592+
8593 /* See comment in fxsave() below. */
8594 #ifdef CONFIG_AS_FXSAVEQ
8595 asm volatile("1: fxrstorq %[fx]\n\t"
8596@@ -121,6 +126,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
8597 {
8598 int err;
8599
8600+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8601+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8602+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
8603+#endif
8604+
8605 /*
8606 * Clear the bytes not touched by the fxsave and reserved
8607 * for the SW usage.
8608@@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu *fpu)
8609 #endif /* CONFIG_X86_64 */
8610
8611 /* We need a safe address that is cheap to find and that is already
8612- in L1 during context switch. The best choices are unfortunately
8613- different for UP and SMP */
8614-#ifdef CONFIG_SMP
8615-#define safe_address (__per_cpu_offset[0])
8616-#else
8617-#define safe_address (kstat_cpu(0).cpustat.user)
8618-#endif
8619+ in L1 during context switch. */
8620+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
8621
8622 /*
8623 * These must be called with preempt disabled
8624@@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void)
8625 struct thread_info *me = current_thread_info();
8626 preempt_disable();
8627 if (me->status & TS_USEDFPU)
8628- __save_init_fpu(me->task);
8629+ __save_init_fpu(current);
8630 else
8631 clts();
8632 }
8633diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
8634index d8e8eef..99f81ae 100644
8635--- a/arch/x86/include/asm/io.h
8636+++ b/arch/x86/include/asm/io.h
8637@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
8638
8639 #include <linux/vmalloc.h>
8640
8641+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8642+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8643+{
8644+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8645+}
8646+
8647+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8648+{
8649+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8650+}
8651+
8652 /*
8653 * Convert a virtual cached pointer to an uncached pointer
8654 */
8655diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
8656index bba3cf8..06bc8da 100644
8657--- a/arch/x86/include/asm/irqflags.h
8658+++ b/arch/x86/include/asm/irqflags.h
8659@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
8660 sti; \
8661 sysexit
8662
8663+#define GET_CR0_INTO_RDI mov %cr0, %rdi
8664+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
8665+#define GET_CR3_INTO_RDI mov %cr3, %rdi
8666+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
8667+
8668 #else
8669 #define INTERRUPT_RETURN iret
8670 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
8671diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
8672index 5478825..839e88c 100644
8673--- a/arch/x86/include/asm/kprobes.h
8674+++ b/arch/x86/include/asm/kprobes.h
8675@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
8676 #define RELATIVEJUMP_SIZE 5
8677 #define RELATIVECALL_OPCODE 0xe8
8678 #define RELATIVE_ADDR_SIZE 4
8679-#define MAX_STACK_SIZE 64
8680-#define MIN_STACK_SIZE(ADDR) \
8681- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
8682- THREAD_SIZE - (unsigned long)(ADDR))) \
8683- ? (MAX_STACK_SIZE) \
8684- : (((unsigned long)current_thread_info()) + \
8685- THREAD_SIZE - (unsigned long)(ADDR)))
8686+#define MAX_STACK_SIZE 64UL
8687+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
8688
8689 #define flush_insn_slot(p) do { } while (0)
8690
8691diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
8692index b4973f4..7c4d3fc 100644
8693--- a/arch/x86/include/asm/kvm_host.h
8694+++ b/arch/x86/include/asm/kvm_host.h
8695@@ -459,7 +459,7 @@ struct kvm_arch {
8696 unsigned int n_requested_mmu_pages;
8697 unsigned int n_max_mmu_pages;
8698 unsigned int indirect_shadow_pages;
8699- atomic_t invlpg_counter;
8700+ atomic_unchecked_t invlpg_counter;
8701 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
8702 /*
8703 * Hash table of struct kvm_mmu_page.
8704@@ -638,7 +638,7 @@ struct kvm_x86_ops {
8705 int (*check_intercept)(struct kvm_vcpu *vcpu,
8706 struct x86_instruction_info *info,
8707 enum x86_intercept_stage stage);
8708-};
8709+} __do_const;
8710
8711 struct kvm_arch_async_pf {
8712 u32 token;
8713diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
8714index 9cdae5d..300d20f 100644
8715--- a/arch/x86/include/asm/local.h
8716+++ b/arch/x86/include/asm/local.h
8717@@ -18,26 +18,58 @@ typedef struct {
8718
8719 static inline void local_inc(local_t *l)
8720 {
8721- asm volatile(_ASM_INC "%0"
8722+ asm volatile(_ASM_INC "%0\n"
8723+
8724+#ifdef CONFIG_PAX_REFCOUNT
8725+ "jno 0f\n"
8726+ _ASM_DEC "%0\n"
8727+ "int $4\n0:\n"
8728+ _ASM_EXTABLE(0b, 0b)
8729+#endif
8730+
8731 : "+m" (l->a.counter));
8732 }
8733
8734 static inline void local_dec(local_t *l)
8735 {
8736- asm volatile(_ASM_DEC "%0"
8737+ asm volatile(_ASM_DEC "%0\n"
8738+
8739+#ifdef CONFIG_PAX_REFCOUNT
8740+ "jno 0f\n"
8741+ _ASM_INC "%0\n"
8742+ "int $4\n0:\n"
8743+ _ASM_EXTABLE(0b, 0b)
8744+#endif
8745+
8746 : "+m" (l->a.counter));
8747 }
8748
8749 static inline void local_add(long i, local_t *l)
8750 {
8751- asm volatile(_ASM_ADD "%1,%0"
8752+ asm volatile(_ASM_ADD "%1,%0\n"
8753+
8754+#ifdef CONFIG_PAX_REFCOUNT
8755+ "jno 0f\n"
8756+ _ASM_SUB "%1,%0\n"
8757+ "int $4\n0:\n"
8758+ _ASM_EXTABLE(0b, 0b)
8759+#endif
8760+
8761 : "+m" (l->a.counter)
8762 : "ir" (i));
8763 }
8764
8765 static inline void local_sub(long i, local_t *l)
8766 {
8767- asm volatile(_ASM_SUB "%1,%0"
8768+ asm volatile(_ASM_SUB "%1,%0\n"
8769+
8770+#ifdef CONFIG_PAX_REFCOUNT
8771+ "jno 0f\n"
8772+ _ASM_ADD "%1,%0\n"
8773+ "int $4\n0:\n"
8774+ _ASM_EXTABLE(0b, 0b)
8775+#endif
8776+
8777 : "+m" (l->a.counter)
8778 : "ir" (i));
8779 }
8780@@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
8781 {
8782 unsigned char c;
8783
8784- asm volatile(_ASM_SUB "%2,%0; sete %1"
8785+ asm volatile(_ASM_SUB "%2,%0\n"
8786+
8787+#ifdef CONFIG_PAX_REFCOUNT
8788+ "jno 0f\n"
8789+ _ASM_ADD "%2,%0\n"
8790+ "int $4\n0:\n"
8791+ _ASM_EXTABLE(0b, 0b)
8792+#endif
8793+
8794+ "sete %1\n"
8795 : "+m" (l->a.counter), "=qm" (c)
8796 : "ir" (i) : "memory");
8797 return c;
8798@@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
8799 {
8800 unsigned char c;
8801
8802- asm volatile(_ASM_DEC "%0; sete %1"
8803+ asm volatile(_ASM_DEC "%0\n"
8804+
8805+#ifdef CONFIG_PAX_REFCOUNT
8806+ "jno 0f\n"
8807+ _ASM_INC "%0\n"
8808+ "int $4\n0:\n"
8809+ _ASM_EXTABLE(0b, 0b)
8810+#endif
8811+
8812+ "sete %1\n"
8813 : "+m" (l->a.counter), "=qm" (c)
8814 : : "memory");
8815 return c != 0;
8816@@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
8817 {
8818 unsigned char c;
8819
8820- asm volatile(_ASM_INC "%0; sete %1"
8821+ asm volatile(_ASM_INC "%0\n"
8822+
8823+#ifdef CONFIG_PAX_REFCOUNT
8824+ "jno 0f\n"
8825+ _ASM_DEC "%0\n"
8826+ "int $4\n0:\n"
8827+ _ASM_EXTABLE(0b, 0b)
8828+#endif
8829+
8830+ "sete %1\n"
8831 : "+m" (l->a.counter), "=qm" (c)
8832 : : "memory");
8833 return c != 0;
8834@@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
8835 {
8836 unsigned char c;
8837
8838- asm volatile(_ASM_ADD "%2,%0; sets %1"
8839+ asm volatile(_ASM_ADD "%2,%0\n"
8840+
8841+#ifdef CONFIG_PAX_REFCOUNT
8842+ "jno 0f\n"
8843+ _ASM_SUB "%2,%0\n"
8844+ "int $4\n0:\n"
8845+ _ASM_EXTABLE(0b, 0b)
8846+#endif
8847+
8848+ "sets %1\n"
8849 : "+m" (l->a.counter), "=qm" (c)
8850 : "ir" (i) : "memory");
8851 return c;
8852@@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
8853 #endif
8854 /* Modern 486+ processor */
8855 __i = i;
8856- asm volatile(_ASM_XADD "%0, %1;"
8857+ asm volatile(_ASM_XADD "%0, %1\n"
8858+
8859+#ifdef CONFIG_PAX_REFCOUNT
8860+ "jno 0f\n"
8861+ _ASM_MOV "%0,%1\n"
8862+ "int $4\n0:\n"
8863+ _ASM_EXTABLE(0b, 0b)
8864+#endif
8865+
8866 : "+r" (i), "+m" (l->a.counter)
8867 : : "memory");
8868 return i + __i;
8869diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
8870index 593e51d..fa69c9a 100644
8871--- a/arch/x86/include/asm/mman.h
8872+++ b/arch/x86/include/asm/mman.h
8873@@ -5,4 +5,14 @@
8874
8875 #include <asm-generic/mman.h>
8876
8877+#ifdef __KERNEL__
8878+#ifndef __ASSEMBLY__
8879+#ifdef CONFIG_X86_32
8880+#define arch_mmap_check i386_mmap_check
8881+int i386_mmap_check(unsigned long addr, unsigned long len,
8882+ unsigned long flags);
8883+#endif
8884+#endif
8885+#endif
8886+
8887 #endif /* _ASM_X86_MMAN_H */
8888diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
8889index 5f55e69..e20bfb1 100644
8890--- a/arch/x86/include/asm/mmu.h
8891+++ b/arch/x86/include/asm/mmu.h
8892@@ -9,7 +9,7 @@
8893 * we put the segment information here.
8894 */
8895 typedef struct {
8896- void *ldt;
8897+ struct desc_struct *ldt;
8898 int size;
8899
8900 #ifdef CONFIG_X86_64
8901@@ -18,7 +18,19 @@ typedef struct {
8902 #endif
8903
8904 struct mutex lock;
8905- void *vdso;
8906+ unsigned long vdso;
8907+
8908+#ifdef CONFIG_X86_32
8909+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
8910+ unsigned long user_cs_base;
8911+ unsigned long user_cs_limit;
8912+
8913+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
8914+ cpumask_t cpu_user_cs_mask;
8915+#endif
8916+
8917+#endif
8918+#endif
8919 } mm_context_t;
8920
8921 #ifdef CONFIG_SMP
8922diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
8923index 6902152..399f3a2 100644
8924--- a/arch/x86/include/asm/mmu_context.h
8925+++ b/arch/x86/include/asm/mmu_context.h
8926@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
8927
8928 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
8929 {
8930+
8931+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8932+ unsigned int i;
8933+ pgd_t *pgd;
8934+
8935+ pax_open_kernel();
8936+ pgd = get_cpu_pgd(smp_processor_id());
8937+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
8938+ set_pgd_batched(pgd+i, native_make_pgd(0));
8939+ pax_close_kernel();
8940+#endif
8941+
8942 #ifdef CONFIG_SMP
8943 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
8944 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
8945@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
8946 struct task_struct *tsk)
8947 {
8948 unsigned cpu = smp_processor_id();
8949+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8950+ int tlbstate = TLBSTATE_OK;
8951+#endif
8952
8953 if (likely(prev != next)) {
8954 #ifdef CONFIG_SMP
8955+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8956+ tlbstate = percpu_read(cpu_tlbstate.state);
8957+#endif
8958 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
8959 percpu_write(cpu_tlbstate.active_mm, next);
8960 #endif
8961 cpumask_set_cpu(cpu, mm_cpumask(next));
8962
8963 /* Re-load page tables */
8964+#ifdef CONFIG_PAX_PER_CPU_PGD
8965+ pax_open_kernel();
8966+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
8967+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
8968+ pax_close_kernel();
8969+ load_cr3(get_cpu_pgd(cpu));
8970+#else
8971 load_cr3(next->pgd);
8972+#endif
8973
8974 /* stop flush ipis for the previous mm */
8975 cpumask_clear_cpu(cpu, mm_cpumask(prev));
8976@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
8977 */
8978 if (unlikely(prev->context.ldt != next->context.ldt))
8979 load_LDT_nolock(&next->context);
8980- }
8981+
8982+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
8983+ if (!(__supported_pte_mask & _PAGE_NX)) {
8984+ smp_mb__before_clear_bit();
8985+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
8986+ smp_mb__after_clear_bit();
8987+ cpu_set(cpu, next->context.cpu_user_cs_mask);
8988+ }
8989+#endif
8990+
8991+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8992+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
8993+ prev->context.user_cs_limit != next->context.user_cs_limit))
8994+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8995 #ifdef CONFIG_SMP
8996+ else if (unlikely(tlbstate != TLBSTATE_OK))
8997+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8998+#endif
8999+#endif
9000+
9001+ }
9002 else {
9003+
9004+#ifdef CONFIG_PAX_PER_CPU_PGD
9005+ pax_open_kernel();
9006+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9007+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9008+ pax_close_kernel();
9009+ load_cr3(get_cpu_pgd(cpu));
9010+#endif
9011+
9012+#ifdef CONFIG_SMP
9013 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9014 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
9015
9016@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
9017 * tlb flush IPI delivery. We must reload CR3
9018 * to make sure to use no freed page tables.
9019 */
9020+
9021+#ifndef CONFIG_PAX_PER_CPU_PGD
9022 load_cr3(next->pgd);
9023+#endif
9024+
9025 load_LDT_nolock(&next->context);
9026+
9027+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
9028+ if (!(__supported_pte_mask & _PAGE_NX))
9029+ cpu_set(cpu, next->context.cpu_user_cs_mask);
9030+#endif
9031+
9032+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9033+#ifdef CONFIG_PAX_PAGEEXEC
9034+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
9035+#endif
9036+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9037+#endif
9038+
9039 }
9040+#endif
9041 }
9042-#endif
9043 }
9044
9045 #define activate_mm(prev, next) \
9046diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
9047index 9eae775..c914fea 100644
9048--- a/arch/x86/include/asm/module.h
9049+++ b/arch/x86/include/asm/module.h
9050@@ -5,6 +5,7 @@
9051
9052 #ifdef CONFIG_X86_64
9053 /* X86_64 does not define MODULE_PROC_FAMILY */
9054+#define MODULE_PROC_FAMILY ""
9055 #elif defined CONFIG_M386
9056 #define MODULE_PROC_FAMILY "386 "
9057 #elif defined CONFIG_M486
9058@@ -59,8 +60,20 @@
9059 #error unknown processor family
9060 #endif
9061
9062-#ifdef CONFIG_X86_32
9063-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
9064+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
9065+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
9066+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
9067+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
9068+#else
9069+#define MODULE_PAX_KERNEXEC ""
9070 #endif
9071
9072+#ifdef CONFIG_PAX_MEMORY_UDEREF
9073+#define MODULE_PAX_UDEREF "UDEREF "
9074+#else
9075+#define MODULE_PAX_UDEREF ""
9076+#endif
9077+
9078+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
9079+
9080 #endif /* _ASM_X86_MODULE_H */
9081diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
9082index 7639dbf..e08a58c 100644
9083--- a/arch/x86/include/asm/page_64_types.h
9084+++ b/arch/x86/include/asm/page_64_types.h
9085@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
9086
9087 /* duplicated to the one in bootmem.h */
9088 extern unsigned long max_pfn;
9089-extern unsigned long phys_base;
9090+extern const unsigned long phys_base;
9091
9092 extern unsigned long __phys_addr(unsigned long);
9093 #define __phys_reloc_hide(x) (x)
9094diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
9095index a7d2db9..edb023e 100644
9096--- a/arch/x86/include/asm/paravirt.h
9097+++ b/arch/x86/include/asm/paravirt.h
9098@@ -667,6 +667,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
9099 val);
9100 }
9101
9102+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
9103+{
9104+ pgdval_t val = native_pgd_val(pgd);
9105+
9106+ if (sizeof(pgdval_t) > sizeof(long))
9107+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
9108+ val, (u64)val >> 32);
9109+ else
9110+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
9111+ val);
9112+}
9113+
9114 static inline void pgd_clear(pgd_t *pgdp)
9115 {
9116 set_pgd(pgdp, __pgd(0));
9117@@ -748,6 +760,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
9118 pv_mmu_ops.set_fixmap(idx, phys, flags);
9119 }
9120
9121+#ifdef CONFIG_PAX_KERNEXEC
9122+static inline unsigned long pax_open_kernel(void)
9123+{
9124+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
9125+}
9126+
9127+static inline unsigned long pax_close_kernel(void)
9128+{
9129+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
9130+}
9131+#else
9132+static inline unsigned long pax_open_kernel(void) { return 0; }
9133+static inline unsigned long pax_close_kernel(void) { return 0; }
9134+#endif
9135+
9136 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
9137
9138 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
9139@@ -964,7 +991,7 @@ extern void default_banner(void);
9140
9141 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
9142 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
9143-#define PARA_INDIRECT(addr) *%cs:addr
9144+#define PARA_INDIRECT(addr) *%ss:addr
9145 #endif
9146
9147 #define INTERRUPT_RETURN \
9148@@ -1041,6 +1068,21 @@ extern void default_banner(void);
9149 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
9150 CLBR_NONE, \
9151 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
9152+
9153+#define GET_CR0_INTO_RDI \
9154+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
9155+ mov %rax,%rdi
9156+
9157+#define SET_RDI_INTO_CR0 \
9158+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
9159+
9160+#define GET_CR3_INTO_RDI \
9161+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
9162+ mov %rax,%rdi
9163+
9164+#define SET_RDI_INTO_CR3 \
9165+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
9166+
9167 #endif /* CONFIG_X86_32 */
9168
9169 #endif /* __ASSEMBLY__ */
9170diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
9171index 8e8b9a4..f07d725 100644
9172--- a/arch/x86/include/asm/paravirt_types.h
9173+++ b/arch/x86/include/asm/paravirt_types.h
9174@@ -84,20 +84,20 @@ struct pv_init_ops {
9175 */
9176 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
9177 unsigned long addr, unsigned len);
9178-};
9179+} __no_const;
9180
9181
9182 struct pv_lazy_ops {
9183 /* Set deferred update mode, used for batching operations. */
9184 void (*enter)(void);
9185 void (*leave)(void);
9186-};
9187+} __no_const;
9188
9189 struct pv_time_ops {
9190 unsigned long long (*sched_clock)(void);
9191 unsigned long long (*steal_clock)(int cpu);
9192 unsigned long (*get_tsc_khz)(void);
9193-};
9194+} __no_const;
9195
9196 struct pv_cpu_ops {
9197 /* hooks for various privileged instructions */
9198@@ -193,7 +193,7 @@ struct pv_cpu_ops {
9199
9200 void (*start_context_switch)(struct task_struct *prev);
9201 void (*end_context_switch)(struct task_struct *next);
9202-};
9203+} __no_const;
9204
9205 struct pv_irq_ops {
9206 /*
9207@@ -224,7 +224,7 @@ struct pv_apic_ops {
9208 unsigned long start_eip,
9209 unsigned long start_esp);
9210 #endif
9211-};
9212+} __no_const;
9213
9214 struct pv_mmu_ops {
9215 unsigned long (*read_cr2)(void);
9216@@ -313,6 +313,7 @@ struct pv_mmu_ops {
9217 struct paravirt_callee_save make_pud;
9218
9219 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
9220+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
9221 #endif /* PAGETABLE_LEVELS == 4 */
9222 #endif /* PAGETABLE_LEVELS >= 3 */
9223
9224@@ -324,6 +325,12 @@ struct pv_mmu_ops {
9225 an mfn. We can tell which is which from the index. */
9226 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
9227 phys_addr_t phys, pgprot_t flags);
9228+
9229+#ifdef CONFIG_PAX_KERNEXEC
9230+ unsigned long (*pax_open_kernel)(void);
9231+ unsigned long (*pax_close_kernel)(void);
9232+#endif
9233+
9234 };
9235
9236 struct arch_spinlock;
9237@@ -334,7 +341,7 @@ struct pv_lock_ops {
9238 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
9239 int (*spin_trylock)(struct arch_spinlock *lock);
9240 void (*spin_unlock)(struct arch_spinlock *lock);
9241-};
9242+} __no_const;
9243
9244 /* This contains all the paravirt structures: we get a convenient
9245 * number for each function using the offset which we use to indicate
9246diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
9247index b4389a4..b7ff22c 100644
9248--- a/arch/x86/include/asm/pgalloc.h
9249+++ b/arch/x86/include/asm/pgalloc.h
9250@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
9251 pmd_t *pmd, pte_t *pte)
9252 {
9253 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9254+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
9255+}
9256+
9257+static inline void pmd_populate_user(struct mm_struct *mm,
9258+ pmd_t *pmd, pte_t *pte)
9259+{
9260+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9261 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
9262 }
9263
9264diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
9265index 98391db..8f6984e 100644
9266--- a/arch/x86/include/asm/pgtable-2level.h
9267+++ b/arch/x86/include/asm/pgtable-2level.h
9268@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
9269
9270 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9271 {
9272+ pax_open_kernel();
9273 *pmdp = pmd;
9274+ pax_close_kernel();
9275 }
9276
9277 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9278diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
9279index effff47..f9e4035 100644
9280--- a/arch/x86/include/asm/pgtable-3level.h
9281+++ b/arch/x86/include/asm/pgtable-3level.h
9282@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9283
9284 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9285 {
9286+ pax_open_kernel();
9287 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
9288+ pax_close_kernel();
9289 }
9290
9291 static inline void native_set_pud(pud_t *pudp, pud_t pud)
9292 {
9293+ pax_open_kernel();
9294 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
9295+ pax_close_kernel();
9296 }
9297
9298 /*
9299diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
9300index 18601c8..3d716d1 100644
9301--- a/arch/x86/include/asm/pgtable.h
9302+++ b/arch/x86/include/asm/pgtable.h
9303@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
9304
9305 #ifndef __PAGETABLE_PUD_FOLDED
9306 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
9307+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
9308 #define pgd_clear(pgd) native_pgd_clear(pgd)
9309 #endif
9310
9311@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
9312
9313 #define arch_end_context_switch(prev) do {} while(0)
9314
9315+#define pax_open_kernel() native_pax_open_kernel()
9316+#define pax_close_kernel() native_pax_close_kernel()
9317 #endif /* CONFIG_PARAVIRT */
9318
9319+#define __HAVE_ARCH_PAX_OPEN_KERNEL
9320+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
9321+
9322+#ifdef CONFIG_PAX_KERNEXEC
9323+static inline unsigned long native_pax_open_kernel(void)
9324+{
9325+ unsigned long cr0;
9326+
9327+ preempt_disable();
9328+ barrier();
9329+ cr0 = read_cr0() ^ X86_CR0_WP;
9330+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
9331+ write_cr0(cr0);
9332+ return cr0 ^ X86_CR0_WP;
9333+}
9334+
9335+static inline unsigned long native_pax_close_kernel(void)
9336+{
9337+ unsigned long cr0;
9338+
9339+ cr0 = read_cr0() ^ X86_CR0_WP;
9340+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
9341+ write_cr0(cr0);
9342+ barrier();
9343+ preempt_enable_no_resched();
9344+ return cr0 ^ X86_CR0_WP;
9345+}
9346+#else
9347+static inline unsigned long native_pax_open_kernel(void) { return 0; }
9348+static inline unsigned long native_pax_close_kernel(void) { return 0; }
9349+#endif
9350+
9351 /*
9352 * The following only work if pte_present() is true.
9353 * Undefined behaviour if not..
9354 */
9355+static inline int pte_user(pte_t pte)
9356+{
9357+ return pte_val(pte) & _PAGE_USER;
9358+}
9359+
9360 static inline int pte_dirty(pte_t pte)
9361 {
9362 return pte_flags(pte) & _PAGE_DIRTY;
9363@@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
9364 return pte_clear_flags(pte, _PAGE_RW);
9365 }
9366
9367+static inline pte_t pte_mkread(pte_t pte)
9368+{
9369+ return __pte(pte_val(pte) | _PAGE_USER);
9370+}
9371+
9372 static inline pte_t pte_mkexec(pte_t pte)
9373 {
9374- return pte_clear_flags(pte, _PAGE_NX);
9375+#ifdef CONFIG_X86_PAE
9376+ if (__supported_pte_mask & _PAGE_NX)
9377+ return pte_clear_flags(pte, _PAGE_NX);
9378+ else
9379+#endif
9380+ return pte_set_flags(pte, _PAGE_USER);
9381+}
9382+
9383+static inline pte_t pte_exprotect(pte_t pte)
9384+{
9385+#ifdef CONFIG_X86_PAE
9386+ if (__supported_pte_mask & _PAGE_NX)
9387+ return pte_set_flags(pte, _PAGE_NX);
9388+ else
9389+#endif
9390+ return pte_clear_flags(pte, _PAGE_USER);
9391 }
9392
9393 static inline pte_t pte_mkdirty(pte_t pte)
9394@@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
9395 #endif
9396
9397 #ifndef __ASSEMBLY__
9398+
9399+#ifdef CONFIG_PAX_PER_CPU_PGD
9400+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
9401+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
9402+{
9403+ return cpu_pgd[cpu];
9404+}
9405+#endif
9406+
9407 #include <linux/mm_types.h>
9408
9409 static inline int pte_none(pte_t pte)
9410@@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
9411
9412 static inline int pgd_bad(pgd_t pgd)
9413 {
9414- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
9415+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
9416 }
9417
9418 static inline int pgd_none(pgd_t pgd)
9419@@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
9420 * pgd_offset() returns a (pgd_t *)
9421 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
9422 */
9423-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
9424+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
9425+
9426+#ifdef CONFIG_PAX_PER_CPU_PGD
9427+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
9428+#endif
9429+
9430 /*
9431 * a shortcut which implies the use of the kernel's pgd, instead
9432 * of a process's
9433@@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
9434 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
9435 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
9436
9437+#ifdef CONFIG_X86_32
9438+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
9439+#else
9440+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
9441+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
9442+
9443+#ifdef CONFIG_PAX_MEMORY_UDEREF
9444+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
9445+#else
9446+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
9447+#endif
9448+
9449+#endif
9450+
9451 #ifndef __ASSEMBLY__
9452
9453 extern int direct_gbpages;
9454@@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
9455 * dst and src can be on the same page, but the range must not overlap,
9456 * and must not cross a page boundary.
9457 */
9458-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
9459+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
9460 {
9461- memcpy(dst, src, count * sizeof(pgd_t));
9462+ pax_open_kernel();
9463+ while (count--)
9464+ *dst++ = *src++;
9465+ pax_close_kernel();
9466 }
9467
9468+#ifdef CONFIG_PAX_PER_CPU_PGD
9469+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9470+#endif
9471+
9472+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9473+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9474+#else
9475+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
9476+#endif
9477
9478 #include <asm-generic/pgtable.h>
9479 #endif /* __ASSEMBLY__ */
9480diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
9481index 0c92113..34a77c6 100644
9482--- a/arch/x86/include/asm/pgtable_32.h
9483+++ b/arch/x86/include/asm/pgtable_32.h
9484@@ -25,9 +25,6 @@
9485 struct mm_struct;
9486 struct vm_area_struct;
9487
9488-extern pgd_t swapper_pg_dir[1024];
9489-extern pgd_t initial_page_table[1024];
9490-
9491 static inline void pgtable_cache_init(void) { }
9492 static inline void check_pgt_cache(void) { }
9493 void paging_init(void);
9494@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
9495 # include <asm/pgtable-2level.h>
9496 #endif
9497
9498+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
9499+extern pgd_t initial_page_table[PTRS_PER_PGD];
9500+#ifdef CONFIG_X86_PAE
9501+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
9502+#endif
9503+
9504 #if defined(CONFIG_HIGHPTE)
9505 #define pte_offset_map(dir, address) \
9506 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
9507@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
9508 /* Clear a kernel PTE and flush it from the TLB */
9509 #define kpte_clear_flush(ptep, vaddr) \
9510 do { \
9511+ pax_open_kernel(); \
9512 pte_clear(&init_mm, (vaddr), (ptep)); \
9513+ pax_close_kernel(); \
9514 __flush_tlb_one((vaddr)); \
9515 } while (0)
9516
9517@@ -74,6 +79,9 @@ do { \
9518
9519 #endif /* !__ASSEMBLY__ */
9520
9521+#define HAVE_ARCH_UNMAPPED_AREA
9522+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
9523+
9524 /*
9525 * kern_addr_valid() is (1) for FLATMEM and (0) for
9526 * SPARSEMEM and DISCONTIGMEM
9527diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
9528index ed5903b..c7fe163 100644
9529--- a/arch/x86/include/asm/pgtable_32_types.h
9530+++ b/arch/x86/include/asm/pgtable_32_types.h
9531@@ -8,7 +8,7 @@
9532 */
9533 #ifdef CONFIG_X86_PAE
9534 # include <asm/pgtable-3level_types.h>
9535-# define PMD_SIZE (1UL << PMD_SHIFT)
9536+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
9537 # define PMD_MASK (~(PMD_SIZE - 1))
9538 #else
9539 # include <asm/pgtable-2level_types.h>
9540@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
9541 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
9542 #endif
9543
9544+#ifdef CONFIG_PAX_KERNEXEC
9545+#ifndef __ASSEMBLY__
9546+extern unsigned char MODULES_EXEC_VADDR[];
9547+extern unsigned char MODULES_EXEC_END[];
9548+#endif
9549+#include <asm/boot.h>
9550+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
9551+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
9552+#else
9553+#define ktla_ktva(addr) (addr)
9554+#define ktva_ktla(addr) (addr)
9555+#endif
9556+
9557 #define MODULES_VADDR VMALLOC_START
9558 #define MODULES_END VMALLOC_END
9559 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
9560diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
9561index 975f709..107976d 100644
9562--- a/arch/x86/include/asm/pgtable_64.h
9563+++ b/arch/x86/include/asm/pgtable_64.h
9564@@ -16,10 +16,14 @@
9565
9566 extern pud_t level3_kernel_pgt[512];
9567 extern pud_t level3_ident_pgt[512];
9568+extern pud_t level3_vmalloc_start_pgt[512];
9569+extern pud_t level3_vmalloc_end_pgt[512];
9570+extern pud_t level3_vmemmap_pgt[512];
9571+extern pud_t level2_vmemmap_pgt[512];
9572 extern pmd_t level2_kernel_pgt[512];
9573 extern pmd_t level2_fixmap_pgt[512];
9574-extern pmd_t level2_ident_pgt[512];
9575-extern pgd_t init_level4_pgt[];
9576+extern pmd_t level2_ident_pgt[512*2];
9577+extern pgd_t init_level4_pgt[512];
9578
9579 #define swapper_pg_dir init_level4_pgt
9580
9581@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9582
9583 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9584 {
9585+ pax_open_kernel();
9586 *pmdp = pmd;
9587+ pax_close_kernel();
9588 }
9589
9590 static inline void native_pmd_clear(pmd_t *pmd)
9591@@ -107,6 +113,13 @@ static inline void native_pud_clear(pud_t *pud)
9592
9593 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
9594 {
9595+ pax_open_kernel();
9596+ *pgdp = pgd;
9597+ pax_close_kernel();
9598+}
9599+
9600+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
9601+{
9602 *pgdp = pgd;
9603 }
9604
9605diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
9606index 766ea16..5b96cb3 100644
9607--- a/arch/x86/include/asm/pgtable_64_types.h
9608+++ b/arch/x86/include/asm/pgtable_64_types.h
9609@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
9610 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
9611 #define MODULES_END _AC(0xffffffffff000000, UL)
9612 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
9613+#define MODULES_EXEC_VADDR MODULES_VADDR
9614+#define MODULES_EXEC_END MODULES_END
9615+
9616+#define ktla_ktva(addr) (addr)
9617+#define ktva_ktla(addr) (addr)
9618
9619 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
9620diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
9621index 013286a..8b42f4f 100644
9622--- a/arch/x86/include/asm/pgtable_types.h
9623+++ b/arch/x86/include/asm/pgtable_types.h
9624@@ -16,13 +16,12 @@
9625 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
9626 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
9627 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
9628-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
9629+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
9630 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
9631 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
9632 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
9633-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
9634-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
9635-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
9636+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
9637+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
9638 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
9639
9640 /* If _PAGE_BIT_PRESENT is clear, we use these: */
9641@@ -40,7 +39,6 @@
9642 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
9643 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
9644 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
9645-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
9646 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
9647 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
9648 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
9649@@ -57,8 +55,10 @@
9650
9651 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
9652 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
9653-#else
9654+#elif defined(CONFIG_KMEMCHECK)
9655 #define _PAGE_NX (_AT(pteval_t, 0))
9656+#else
9657+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
9658 #endif
9659
9660 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
9661@@ -96,6 +96,9 @@
9662 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
9663 _PAGE_ACCESSED)
9664
9665+#define PAGE_READONLY_NOEXEC PAGE_READONLY
9666+#define PAGE_SHARED_NOEXEC PAGE_SHARED
9667+
9668 #define __PAGE_KERNEL_EXEC \
9669 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
9670 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
9671@@ -106,7 +109,7 @@
9672 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
9673 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
9674 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
9675-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
9676+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
9677 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
9678 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
9679 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
9680@@ -168,8 +171,8 @@
9681 * bits are combined, this will alow user to access the high address mapped
9682 * VDSO in the presence of CONFIG_COMPAT_VDSO
9683 */
9684-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
9685-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
9686+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9687+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9688 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
9689 #endif
9690
9691@@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
9692 {
9693 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
9694 }
9695+#endif
9696
9697+#if PAGETABLE_LEVELS == 3
9698+#include <asm-generic/pgtable-nopud.h>
9699+#endif
9700+
9701+#if PAGETABLE_LEVELS == 2
9702+#include <asm-generic/pgtable-nopmd.h>
9703+#endif
9704+
9705+#ifndef __ASSEMBLY__
9706 #if PAGETABLE_LEVELS > 3
9707 typedef struct { pudval_t pud; } pud_t;
9708
9709@@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
9710 return pud.pud;
9711 }
9712 #else
9713-#include <asm-generic/pgtable-nopud.h>
9714-
9715 static inline pudval_t native_pud_val(pud_t pud)
9716 {
9717 return native_pgd_val(pud.pgd);
9718@@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
9719 return pmd.pmd;
9720 }
9721 #else
9722-#include <asm-generic/pgtable-nopmd.h>
9723-
9724 static inline pmdval_t native_pmd_val(pmd_t pmd)
9725 {
9726 return native_pgd_val(pmd.pud.pgd);
9727@@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
9728
9729 extern pteval_t __supported_pte_mask;
9730 extern void set_nx(void);
9731-extern int nx_enabled;
9732
9733 #define pgprot_writecombine pgprot_writecombine
9734 extern pgprot_t pgprot_writecombine(pgprot_t prot);
9735diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
9736index b650435..eefa566 100644
9737--- a/arch/x86/include/asm/processor.h
9738+++ b/arch/x86/include/asm/processor.h
9739@@ -268,7 +268,7 @@ struct tss_struct {
9740
9741 } ____cacheline_aligned;
9742
9743-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
9744+extern struct tss_struct init_tss[NR_CPUS];
9745
9746 /*
9747 * Save the original ist values for checking stack pointers during debugging
9748@@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(const void *x)
9749 */
9750 #define TASK_SIZE PAGE_OFFSET
9751 #define TASK_SIZE_MAX TASK_SIZE
9752+
9753+#ifdef CONFIG_PAX_SEGMEXEC
9754+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
9755+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
9756+#else
9757 #define STACK_TOP TASK_SIZE
9758-#define STACK_TOP_MAX STACK_TOP
9759+#endif
9760+
9761+#define STACK_TOP_MAX TASK_SIZE
9762
9763 #define INIT_THREAD { \
9764- .sp0 = sizeof(init_stack) + (long)&init_stack, \
9765+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9766 .vm86_info = NULL, \
9767 .sysenter_cs = __KERNEL_CS, \
9768 .io_bitmap_ptr = NULL, \
9769@@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(const void *x)
9770 */
9771 #define INIT_TSS { \
9772 .x86_tss = { \
9773- .sp0 = sizeof(init_stack) + (long)&init_stack, \
9774+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9775 .ss0 = __KERNEL_DS, \
9776 .ss1 = __KERNEL_CS, \
9777 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
9778@@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(const void *x)
9779 extern unsigned long thread_saved_pc(struct task_struct *tsk);
9780
9781 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
9782-#define KSTK_TOP(info) \
9783-({ \
9784- unsigned long *__ptr = (unsigned long *)(info); \
9785- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
9786-})
9787+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
9788
9789 /*
9790 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
9791@@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
9792 #define task_pt_regs(task) \
9793 ({ \
9794 struct pt_regs *__regs__; \
9795- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
9796+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
9797 __regs__ - 1; \
9798 })
9799
9800@@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
9801 /*
9802 * User space process size. 47bits minus one guard page.
9803 */
9804-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
9805+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
9806
9807 /* This decides where the kernel will search for a free chunk of vm
9808 * space during mmap's.
9809 */
9810 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
9811- 0xc0000000 : 0xFFFFe000)
9812+ 0xc0000000 : 0xFFFFf000)
9813
9814 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
9815 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
9816@@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
9817 #define STACK_TOP_MAX TASK_SIZE_MAX
9818
9819 #define INIT_THREAD { \
9820- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9821+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9822 }
9823
9824 #define INIT_TSS { \
9825- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9826+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9827 }
9828
9829 /*
9830@@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
9831 */
9832 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
9833
9834+#ifdef CONFIG_PAX_SEGMEXEC
9835+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
9836+#endif
9837+
9838 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
9839
9840 /* Get/set a process' ability to use the timestamp counter instruction */
9841diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
9842index 3566454..4bdfb8c 100644
9843--- a/arch/x86/include/asm/ptrace.h
9844+++ b/arch/x86/include/asm/ptrace.h
9845@@ -156,28 +156,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
9846 }
9847
9848 /*
9849- * user_mode_vm(regs) determines whether a register set came from user mode.
9850+ * user_mode(regs) determines whether a register set came from user mode.
9851 * This is true if V8086 mode was enabled OR if the register set was from
9852 * protected mode with RPL-3 CS value. This tricky test checks that with
9853 * one comparison. Many places in the kernel can bypass this full check
9854- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
9855+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
9856+ * be used.
9857 */
9858-static inline int user_mode(struct pt_regs *regs)
9859+static inline int user_mode_novm(struct pt_regs *regs)
9860 {
9861 #ifdef CONFIG_X86_32
9862 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
9863 #else
9864- return !!(regs->cs & 3);
9865+ return !!(regs->cs & SEGMENT_RPL_MASK);
9866 #endif
9867 }
9868
9869-static inline int user_mode_vm(struct pt_regs *regs)
9870+static inline int user_mode(struct pt_regs *regs)
9871 {
9872 #ifdef CONFIG_X86_32
9873 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
9874 USER_RPL;
9875 #else
9876- return user_mode(regs);
9877+ return user_mode_novm(regs);
9878 #endif
9879 }
9880
9881@@ -193,15 +194,16 @@ static inline int v8086_mode(struct pt_regs *regs)
9882 #ifdef CONFIG_X86_64
9883 static inline bool user_64bit_mode(struct pt_regs *regs)
9884 {
9885+ unsigned long cs = regs->cs & 0xffff;
9886 #ifndef CONFIG_PARAVIRT
9887 /*
9888 * On non-paravirt systems, this is the only long mode CPL 3
9889 * selector. We do not allow long mode selectors in the LDT.
9890 */
9891- return regs->cs == __USER_CS;
9892+ return cs == __USER_CS;
9893 #else
9894 /* Headers are too twisted for this to go in paravirt.h. */
9895- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
9896+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
9897 #endif
9898 }
9899 #endif
9900diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
9901index 92f29706..a79cbbb 100644
9902--- a/arch/x86/include/asm/reboot.h
9903+++ b/arch/x86/include/asm/reboot.h
9904@@ -6,19 +6,19 @@
9905 struct pt_regs;
9906
9907 struct machine_ops {
9908- void (*restart)(char *cmd);
9909- void (*halt)(void);
9910- void (*power_off)(void);
9911+ void (* __noreturn restart)(char *cmd);
9912+ void (* __noreturn halt)(void);
9913+ void (* __noreturn power_off)(void);
9914 void (*shutdown)(void);
9915 void (*crash_shutdown)(struct pt_regs *);
9916- void (*emergency_restart)(void);
9917-};
9918+ void (* __noreturn emergency_restart)(void);
9919+} __no_const;
9920
9921 extern struct machine_ops machine_ops;
9922
9923 void native_machine_crash_shutdown(struct pt_regs *regs);
9924 void native_machine_shutdown(void);
9925-void machine_real_restart(unsigned int type);
9926+void machine_real_restart(unsigned int type) __noreturn;
9927 /* These must match dispatch_table in reboot_32.S */
9928 #define MRR_BIOS 0
9929 #define MRR_APM 1
9930diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
9931index 2dbe4a7..ce1db00 100644
9932--- a/arch/x86/include/asm/rwsem.h
9933+++ b/arch/x86/include/asm/rwsem.h
9934@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
9935 {
9936 asm volatile("# beginning down_read\n\t"
9937 LOCK_PREFIX _ASM_INC "(%1)\n\t"
9938+
9939+#ifdef CONFIG_PAX_REFCOUNT
9940+ "jno 0f\n"
9941+ LOCK_PREFIX _ASM_DEC "(%1)\n"
9942+ "int $4\n0:\n"
9943+ _ASM_EXTABLE(0b, 0b)
9944+#endif
9945+
9946 /* adds 0x00000001 */
9947 " jns 1f\n"
9948 " call call_rwsem_down_read_failed\n"
9949@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
9950 "1:\n\t"
9951 " mov %1,%2\n\t"
9952 " add %3,%2\n\t"
9953+
9954+#ifdef CONFIG_PAX_REFCOUNT
9955+ "jno 0f\n"
9956+ "sub %3,%2\n"
9957+ "int $4\n0:\n"
9958+ _ASM_EXTABLE(0b, 0b)
9959+#endif
9960+
9961 " jle 2f\n\t"
9962 LOCK_PREFIX " cmpxchg %2,%0\n\t"
9963 " jnz 1b\n\t"
9964@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
9965 long tmp;
9966 asm volatile("# beginning down_write\n\t"
9967 LOCK_PREFIX " xadd %1,(%2)\n\t"
9968+
9969+#ifdef CONFIG_PAX_REFCOUNT
9970+ "jno 0f\n"
9971+ "mov %1,(%2)\n"
9972+ "int $4\n0:\n"
9973+ _ASM_EXTABLE(0b, 0b)
9974+#endif
9975+
9976 /* adds 0xffff0001, returns the old value */
9977 " test %1,%1\n\t"
9978 /* was the count 0 before? */
9979@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
9980 long tmp;
9981 asm volatile("# beginning __up_read\n\t"
9982 LOCK_PREFIX " xadd %1,(%2)\n\t"
9983+
9984+#ifdef CONFIG_PAX_REFCOUNT
9985+ "jno 0f\n"
9986+ "mov %1,(%2)\n"
9987+ "int $4\n0:\n"
9988+ _ASM_EXTABLE(0b, 0b)
9989+#endif
9990+
9991 /* subtracts 1, returns the old value */
9992 " jns 1f\n\t"
9993 " call call_rwsem_wake\n" /* expects old value in %edx */
9994@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
9995 long tmp;
9996 asm volatile("# beginning __up_write\n\t"
9997 LOCK_PREFIX " xadd %1,(%2)\n\t"
9998+
9999+#ifdef CONFIG_PAX_REFCOUNT
10000+ "jno 0f\n"
10001+ "mov %1,(%2)\n"
10002+ "int $4\n0:\n"
10003+ _ASM_EXTABLE(0b, 0b)
10004+#endif
10005+
10006 /* subtracts 0xffff0001, returns the old value */
10007 " jns 1f\n\t"
10008 " call call_rwsem_wake\n" /* expects old value in %edx */
10009@@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
10010 {
10011 asm volatile("# beginning __downgrade_write\n\t"
10012 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
10013+
10014+#ifdef CONFIG_PAX_REFCOUNT
10015+ "jno 0f\n"
10016+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
10017+ "int $4\n0:\n"
10018+ _ASM_EXTABLE(0b, 0b)
10019+#endif
10020+
10021 /*
10022 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
10023 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
10024@@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
10025 */
10026 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
10027 {
10028- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
10029+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
10030+
10031+#ifdef CONFIG_PAX_REFCOUNT
10032+ "jno 0f\n"
10033+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
10034+ "int $4\n0:\n"
10035+ _ASM_EXTABLE(0b, 0b)
10036+#endif
10037+
10038 : "+m" (sem->count)
10039 : "er" (delta));
10040 }
10041@@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
10042 */
10043 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
10044 {
10045- return delta + xadd(&sem->count, delta);
10046+ return delta + xadd_check_overflow(&sem->count, delta);
10047 }
10048
10049 #endif /* __KERNEL__ */
10050diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
10051index 5e64171..f58957e 100644
10052--- a/arch/x86/include/asm/segment.h
10053+++ b/arch/x86/include/asm/segment.h
10054@@ -64,10 +64,15 @@
10055 * 26 - ESPFIX small SS
10056 * 27 - per-cpu [ offset to per-cpu data area ]
10057 * 28 - stack_canary-20 [ for stack protector ]
10058- * 29 - unused
10059- * 30 - unused
10060+ * 29 - PCI BIOS CS
10061+ * 30 - PCI BIOS DS
10062 * 31 - TSS for double fault handler
10063 */
10064+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
10065+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
10066+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
10067+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
10068+
10069 #define GDT_ENTRY_TLS_MIN 6
10070 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
10071
10072@@ -79,6 +84,8 @@
10073
10074 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
10075
10076+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
10077+
10078 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
10079
10080 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
10081@@ -104,6 +111,12 @@
10082 #define __KERNEL_STACK_CANARY 0
10083 #endif
10084
10085+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
10086+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
10087+
10088+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
10089+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
10090+
10091 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
10092
10093 /*
10094@@ -141,7 +154,7 @@
10095 */
10096
10097 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
10098-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
10099+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
10100
10101
10102 #else
10103@@ -165,6 +178,8 @@
10104 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
10105 #define __USER32_DS __USER_DS
10106
10107+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
10108+
10109 #define GDT_ENTRY_TSS 8 /* needs two entries */
10110 #define GDT_ENTRY_LDT 10 /* needs two entries */
10111 #define GDT_ENTRY_TLS_MIN 12
10112@@ -185,6 +200,7 @@
10113 #endif
10114
10115 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
10116+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
10117 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
10118 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
10119 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
10120diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
10121index 73b11bc..d4a3b63 100644
10122--- a/arch/x86/include/asm/smp.h
10123+++ b/arch/x86/include/asm/smp.h
10124@@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
10125 /* cpus sharing the last level cache: */
10126 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
10127 DECLARE_PER_CPU(u16, cpu_llc_id);
10128-DECLARE_PER_CPU(int, cpu_number);
10129+DECLARE_PER_CPU(unsigned int, cpu_number);
10130
10131 static inline struct cpumask *cpu_sibling_mask(int cpu)
10132 {
10133@@ -77,7 +77,7 @@ struct smp_ops {
10134
10135 void (*send_call_func_ipi)(const struct cpumask *mask);
10136 void (*send_call_func_single_ipi)(int cpu);
10137-};
10138+} __no_const;
10139
10140 /* Globals due to paravirt */
10141 extern void set_cpu_sibling_map(int cpu);
10142@@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitdata;
10143 extern int safe_smp_processor_id(void);
10144
10145 #elif defined(CONFIG_X86_64_SMP)
10146-#define raw_smp_processor_id() (percpu_read(cpu_number))
10147-
10148-#define stack_smp_processor_id() \
10149-({ \
10150- struct thread_info *ti; \
10151- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
10152- ti->cpu; \
10153-})
10154+#define raw_smp_processor_id() (percpu_read(cpu_number))
10155+#define stack_smp_processor_id() raw_smp_processor_id()
10156 #define safe_smp_processor_id() smp_processor_id()
10157
10158 #endif
10159diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
10160index 972c260..43ab1fd 100644
10161--- a/arch/x86/include/asm/spinlock.h
10162+++ b/arch/x86/include/asm/spinlock.h
10163@@ -188,6 +188,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
10164 static inline void arch_read_lock(arch_rwlock_t *rw)
10165 {
10166 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
10167+
10168+#ifdef CONFIG_PAX_REFCOUNT
10169+ "jno 0f\n"
10170+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
10171+ "int $4\n0:\n"
10172+ _ASM_EXTABLE(0b, 0b)
10173+#endif
10174+
10175 "jns 1f\n"
10176 "call __read_lock_failed\n\t"
10177 "1:\n"
10178@@ -197,6 +205,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
10179 static inline void arch_write_lock(arch_rwlock_t *rw)
10180 {
10181 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
10182+
10183+#ifdef CONFIG_PAX_REFCOUNT
10184+ "jno 0f\n"
10185+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
10186+ "int $4\n0:\n"
10187+ _ASM_EXTABLE(0b, 0b)
10188+#endif
10189+
10190 "jz 1f\n"
10191 "call __write_lock_failed\n\t"
10192 "1:\n"
10193@@ -226,13 +242,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
10194
10195 static inline void arch_read_unlock(arch_rwlock_t *rw)
10196 {
10197- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
10198+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
10199+
10200+#ifdef CONFIG_PAX_REFCOUNT
10201+ "jno 0f\n"
10202+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
10203+ "int $4\n0:\n"
10204+ _ASM_EXTABLE(0b, 0b)
10205+#endif
10206+
10207 :"+m" (rw->lock) : : "memory");
10208 }
10209
10210 static inline void arch_write_unlock(arch_rwlock_t *rw)
10211 {
10212- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
10213+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
10214+
10215+#ifdef CONFIG_PAX_REFCOUNT
10216+ "jno 0f\n"
10217+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
10218+ "int $4\n0:\n"
10219+ _ASM_EXTABLE(0b, 0b)
10220+#endif
10221+
10222 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
10223 }
10224
10225diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
10226index 1575177..cb23f52 100644
10227--- a/arch/x86/include/asm/stackprotector.h
10228+++ b/arch/x86/include/asm/stackprotector.h
10229@@ -48,7 +48,7 @@
10230 * head_32 for boot CPU and setup_per_cpu_areas() for others.
10231 */
10232 #define GDT_STACK_CANARY_INIT \
10233- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
10234+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
10235
10236 /*
10237 * Initialize the stackprotector canary value.
10238@@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
10239
10240 static inline void load_stack_canary_segment(void)
10241 {
10242-#ifdef CONFIG_X86_32
10243+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
10244 asm volatile ("mov %0, %%gs" : : "r" (0));
10245 #endif
10246 }
10247diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
10248index 70bbe39..4ae2bd4 100644
10249--- a/arch/x86/include/asm/stacktrace.h
10250+++ b/arch/x86/include/asm/stacktrace.h
10251@@ -11,28 +11,20 @@
10252
10253 extern int kstack_depth_to_print;
10254
10255-struct thread_info;
10256+struct task_struct;
10257 struct stacktrace_ops;
10258
10259-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
10260- unsigned long *stack,
10261- unsigned long bp,
10262- const struct stacktrace_ops *ops,
10263- void *data,
10264- unsigned long *end,
10265- int *graph);
10266+typedef unsigned long walk_stack_t(struct task_struct *task,
10267+ void *stack_start,
10268+ unsigned long *stack,
10269+ unsigned long bp,
10270+ const struct stacktrace_ops *ops,
10271+ void *data,
10272+ unsigned long *end,
10273+ int *graph);
10274
10275-extern unsigned long
10276-print_context_stack(struct thread_info *tinfo,
10277- unsigned long *stack, unsigned long bp,
10278- const struct stacktrace_ops *ops, void *data,
10279- unsigned long *end, int *graph);
10280-
10281-extern unsigned long
10282-print_context_stack_bp(struct thread_info *tinfo,
10283- unsigned long *stack, unsigned long bp,
10284- const struct stacktrace_ops *ops, void *data,
10285- unsigned long *end, int *graph);
10286+extern walk_stack_t print_context_stack;
10287+extern walk_stack_t print_context_stack_bp;
10288
10289 /* Generic stack tracer with callbacks */
10290
10291@@ -40,7 +32,7 @@ struct stacktrace_ops {
10292 void (*address)(void *data, unsigned long address, int reliable);
10293 /* On negative return stop dumping */
10294 int (*stack)(void *data, char *name);
10295- walk_stack_t walk_stack;
10296+ walk_stack_t *walk_stack;
10297 };
10298
10299 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
10300diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
10301index cb23852..2dde194 100644
10302--- a/arch/x86/include/asm/sys_ia32.h
10303+++ b/arch/x86/include/asm/sys_ia32.h
10304@@ -40,7 +40,7 @@ asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *,
10305 compat_sigset_t __user *, unsigned int);
10306 asmlinkage long sys32_alarm(unsigned int);
10307
10308-asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
10309+asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
10310 asmlinkage long sys32_sysfs(int, u32, u32);
10311
10312 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
10313diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
10314index 2d2f01c..f985723 100644
10315--- a/arch/x86/include/asm/system.h
10316+++ b/arch/x86/include/asm/system.h
10317@@ -129,7 +129,7 @@ do { \
10318 "call __switch_to\n\t" \
10319 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
10320 __switch_canary \
10321- "movq %P[thread_info](%%rsi),%%r8\n\t" \
10322+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
10323 "movq %%rax,%%rdi\n\t" \
10324 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
10325 "jnz ret_from_fork\n\t" \
10326@@ -140,7 +140,7 @@ do { \
10327 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
10328 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
10329 [_tif_fork] "i" (_TIF_FORK), \
10330- [thread_info] "i" (offsetof(struct task_struct, stack)), \
10331+ [thread_info] "m" (current_tinfo), \
10332 [current_task] "m" (current_task) \
10333 __switch_canary_iparam \
10334 : "memory", "cc" __EXTRA_CLOBBER)
10335@@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
10336 {
10337 unsigned long __limit;
10338 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
10339- return __limit + 1;
10340+ return __limit;
10341 }
10342
10343 static inline void native_clts(void)
10344@@ -397,13 +397,13 @@ void enable_hlt(void);
10345
10346 void cpu_idle_wait(void);
10347
10348-extern unsigned long arch_align_stack(unsigned long sp);
10349+#define arch_align_stack(x) ((x) & ~0xfUL)
10350 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
10351
10352 void default_idle(void);
10353 bool set_pm_idle_to_default(void);
10354
10355-void stop_this_cpu(void *dummy);
10356+void stop_this_cpu(void *dummy) __noreturn;
10357
10358 /*
10359 * Force strict CPU ordering.
10360diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
10361index a1fe5c1..ee326d8 100644
10362--- a/arch/x86/include/asm/thread_info.h
10363+++ b/arch/x86/include/asm/thread_info.h
10364@@ -10,6 +10,7 @@
10365 #include <linux/compiler.h>
10366 #include <asm/page.h>
10367 #include <asm/types.h>
10368+#include <asm/percpu.h>
10369
10370 /*
10371 * low level task data that entry.S needs immediate access to
10372@@ -24,7 +25,6 @@ struct exec_domain;
10373 #include <linux/atomic.h>
10374
10375 struct thread_info {
10376- struct task_struct *task; /* main task structure */
10377 struct exec_domain *exec_domain; /* execution domain */
10378 __u32 flags; /* low level flags */
10379 __u32 status; /* thread synchronous flags */
10380@@ -34,18 +34,12 @@ struct thread_info {
10381 mm_segment_t addr_limit;
10382 struct restart_block restart_block;
10383 void __user *sysenter_return;
10384-#ifdef CONFIG_X86_32
10385- unsigned long previous_esp; /* ESP of the previous stack in
10386- case of nested (IRQ) stacks
10387- */
10388- __u8 supervisor_stack[0];
10389-#endif
10390+ unsigned long lowest_stack;
10391 int uaccess_err;
10392 };
10393
10394-#define INIT_THREAD_INFO(tsk) \
10395+#define INIT_THREAD_INFO \
10396 { \
10397- .task = &tsk, \
10398 .exec_domain = &default_exec_domain, \
10399 .flags = 0, \
10400 .cpu = 0, \
10401@@ -56,7 +50,7 @@ struct thread_info {
10402 }, \
10403 }
10404
10405-#define init_thread_info (init_thread_union.thread_info)
10406+#define init_thread_info (init_thread_union.stack)
10407 #define init_stack (init_thread_union.stack)
10408
10409 #else /* !__ASSEMBLY__ */
10410@@ -170,45 +164,40 @@ struct thread_info {
10411 ret; \
10412 })
10413
10414-#ifdef CONFIG_X86_32
10415-
10416-#define STACK_WARN (THREAD_SIZE/8)
10417-/*
10418- * macros/functions for gaining access to the thread information structure
10419- *
10420- * preempt_count needs to be 1 initially, until the scheduler is functional.
10421- */
10422-#ifndef __ASSEMBLY__
10423-
10424-
10425-/* how to get the current stack pointer from C */
10426-register unsigned long current_stack_pointer asm("esp") __used;
10427-
10428-/* how to get the thread information struct from C */
10429-static inline struct thread_info *current_thread_info(void)
10430-{
10431- return (struct thread_info *)
10432- (current_stack_pointer & ~(THREAD_SIZE - 1));
10433-}
10434-
10435-#else /* !__ASSEMBLY__ */
10436-
10437+#ifdef __ASSEMBLY__
10438 /* how to get the thread information struct from ASM */
10439 #define GET_THREAD_INFO(reg) \
10440- movl $-THREAD_SIZE, reg; \
10441- andl %esp, reg
10442+ mov PER_CPU_VAR(current_tinfo), reg
10443
10444 /* use this one if reg already contains %esp */
10445-#define GET_THREAD_INFO_WITH_ESP(reg) \
10446- andl $-THREAD_SIZE, reg
10447+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
10448+#else
10449+/* how to get the thread information struct from C */
10450+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
10451+
10452+static __always_inline struct thread_info *current_thread_info(void)
10453+{
10454+ return percpu_read_stable(current_tinfo);
10455+}
10456+#endif
10457+
10458+#ifdef CONFIG_X86_32
10459+
10460+#define STACK_WARN (THREAD_SIZE/8)
10461+/*
10462+ * macros/functions for gaining access to the thread information structure
10463+ *
10464+ * preempt_count needs to be 1 initially, until the scheduler is functional.
10465+ */
10466+#ifndef __ASSEMBLY__
10467+
10468+/* how to get the current stack pointer from C */
10469+register unsigned long current_stack_pointer asm("esp") __used;
10470
10471 #endif
10472
10473 #else /* X86_32 */
10474
10475-#include <asm/percpu.h>
10476-#define KERNEL_STACK_OFFSET (5*8)
10477-
10478 /*
10479 * macros/functions for gaining access to the thread information structure
10480 * preempt_count needs to be 1 initially, until the scheduler is functional.
10481@@ -216,21 +205,8 @@ static inline struct thread_info *current_thread_info(void)
10482 #ifndef __ASSEMBLY__
10483 DECLARE_PER_CPU(unsigned long, kernel_stack);
10484
10485-static inline struct thread_info *current_thread_info(void)
10486-{
10487- struct thread_info *ti;
10488- ti = (void *)(percpu_read_stable(kernel_stack) +
10489- KERNEL_STACK_OFFSET - THREAD_SIZE);
10490- return ti;
10491-}
10492-
10493-#else /* !__ASSEMBLY__ */
10494-
10495-/* how to get the thread information struct from ASM */
10496-#define GET_THREAD_INFO(reg) \
10497- movq PER_CPU_VAR(kernel_stack),reg ; \
10498- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
10499-
10500+/* how to get the current stack pointer from C */
10501+register unsigned long current_stack_pointer asm("rsp") __used;
10502 #endif
10503
10504 #endif /* !X86_32 */
10505@@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
10506 extern void free_thread_info(struct thread_info *ti);
10507 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
10508 #define arch_task_cache_init arch_task_cache_init
10509+
10510+#define __HAVE_THREAD_FUNCTIONS
10511+#define task_thread_info(task) (&(task)->tinfo)
10512+#define task_stack_page(task) ((task)->stack)
10513+#define setup_thread_stack(p, org) do {} while (0)
10514+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
10515+
10516+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
10517+extern struct task_struct *alloc_task_struct_node(int node);
10518+extern void free_task_struct(struct task_struct *);
10519+
10520 #endif
10521 #endif /* _ASM_X86_THREAD_INFO_H */
10522diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
10523index 36361bf..324f262 100644
10524--- a/arch/x86/include/asm/uaccess.h
10525+++ b/arch/x86/include/asm/uaccess.h
10526@@ -7,12 +7,15 @@
10527 #include <linux/compiler.h>
10528 #include <linux/thread_info.h>
10529 #include <linux/string.h>
10530+#include <linux/sched.h>
10531 #include <asm/asm.h>
10532 #include <asm/page.h>
10533
10534 #define VERIFY_READ 0
10535 #define VERIFY_WRITE 1
10536
10537+extern void check_object_size(const void *ptr, unsigned long n, bool to);
10538+
10539 /*
10540 * The fs value determines whether argument validity checking should be
10541 * performed or not. If get_fs() == USER_DS, checking is performed, with
10542@@ -28,7 +31,12 @@
10543
10544 #define get_ds() (KERNEL_DS)
10545 #define get_fs() (current_thread_info()->addr_limit)
10546+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10547+void __set_fs(mm_segment_t x);
10548+void set_fs(mm_segment_t x);
10549+#else
10550 #define set_fs(x) (current_thread_info()->addr_limit = (x))
10551+#endif
10552
10553 #define segment_eq(a, b) ((a).seg == (b).seg)
10554
10555@@ -76,7 +84,33 @@
10556 * checks that the pointer is in the user space range - after calling
10557 * this function, memory access functions may still return -EFAULT.
10558 */
10559-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10560+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10561+#define access_ok(type, addr, size) \
10562+({ \
10563+ long __size = size; \
10564+ unsigned long __addr = (unsigned long)addr; \
10565+ unsigned long __addr_ao = __addr & PAGE_MASK; \
10566+ unsigned long __end_ao = __addr + __size - 1; \
10567+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
10568+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
10569+ while(__addr_ao <= __end_ao) { \
10570+ char __c_ao; \
10571+ __addr_ao += PAGE_SIZE; \
10572+ if (__size > PAGE_SIZE) \
10573+ cond_resched(); \
10574+ if (__get_user(__c_ao, (char __user *)__addr)) \
10575+ break; \
10576+ if (type != VERIFY_WRITE) { \
10577+ __addr = __addr_ao; \
10578+ continue; \
10579+ } \
10580+ if (__put_user(__c_ao, (char __user *)__addr)) \
10581+ break; \
10582+ __addr = __addr_ao; \
10583+ } \
10584+ } \
10585+ __ret_ao; \
10586+})
10587
10588 /*
10589 * The exception table consists of pairs of addresses: the first is the
10590@@ -182,12 +216,20 @@ extern int __get_user_bad(void);
10591 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
10592 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
10593
10594-
10595+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10596+#define __copyuser_seg "gs;"
10597+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
10598+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
10599+#else
10600+#define __copyuser_seg
10601+#define __COPYUSER_SET_ES
10602+#define __COPYUSER_RESTORE_ES
10603+#endif
10604
10605 #ifdef CONFIG_X86_32
10606 #define __put_user_asm_u64(x, addr, err, errret) \
10607- asm volatile("1: movl %%eax,0(%2)\n" \
10608- "2: movl %%edx,4(%2)\n" \
10609+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
10610+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
10611 "3:\n" \
10612 ".section .fixup,\"ax\"\n" \
10613 "4: movl %3,%0\n" \
10614@@ -199,8 +241,8 @@ extern int __get_user_bad(void);
10615 : "A" (x), "r" (addr), "i" (errret), "0" (err))
10616
10617 #define __put_user_asm_ex_u64(x, addr) \
10618- asm volatile("1: movl %%eax,0(%1)\n" \
10619- "2: movl %%edx,4(%1)\n" \
10620+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
10621+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
10622 "3:\n" \
10623 _ASM_EXTABLE(1b, 2b - 1b) \
10624 _ASM_EXTABLE(2b, 3b - 2b) \
10625@@ -252,7 +294,7 @@ extern void __put_user_8(void);
10626 __typeof__(*(ptr)) __pu_val; \
10627 __chk_user_ptr(ptr); \
10628 might_fault(); \
10629- __pu_val = x; \
10630+ __pu_val = (x); \
10631 switch (sizeof(*(ptr))) { \
10632 case 1: \
10633 __put_user_x(1, __pu_val, ptr, __ret_pu); \
10634@@ -373,7 +415,7 @@ do { \
10635 } while (0)
10636
10637 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10638- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
10639+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
10640 "2:\n" \
10641 ".section .fixup,\"ax\"\n" \
10642 "3: mov %3,%0\n" \
10643@@ -381,7 +423,7 @@ do { \
10644 " jmp 2b\n" \
10645 ".previous\n" \
10646 _ASM_EXTABLE(1b, 3b) \
10647- : "=r" (err), ltype(x) \
10648+ : "=r" (err), ltype (x) \
10649 : "m" (__m(addr)), "i" (errret), "0" (err))
10650
10651 #define __get_user_size_ex(x, ptr, size) \
10652@@ -406,7 +448,7 @@ do { \
10653 } while (0)
10654
10655 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
10656- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
10657+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
10658 "2:\n" \
10659 _ASM_EXTABLE(1b, 2b - 1b) \
10660 : ltype(x) : "m" (__m(addr)))
10661@@ -423,13 +465,24 @@ do { \
10662 int __gu_err; \
10663 unsigned long __gu_val; \
10664 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
10665- (x) = (__force __typeof__(*(ptr)))__gu_val; \
10666+ (x) = (__typeof__(*(ptr)))__gu_val; \
10667 __gu_err; \
10668 })
10669
10670 /* FIXME: this hack is definitely wrong -AK */
10671 struct __large_struct { unsigned long buf[100]; };
10672-#define __m(x) (*(struct __large_struct __user *)(x))
10673+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10674+#define ____m(x) \
10675+({ \
10676+ unsigned long ____x = (unsigned long)(x); \
10677+ if (____x < PAX_USER_SHADOW_BASE) \
10678+ ____x += PAX_USER_SHADOW_BASE; \
10679+ (void __user *)____x; \
10680+})
10681+#else
10682+#define ____m(x) (x)
10683+#endif
10684+#define __m(x) (*(struct __large_struct __user *)____m(x))
10685
10686 /*
10687 * Tell gcc we read from memory instead of writing: this is because
10688@@ -437,7 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
10689 * aliasing issues.
10690 */
10691 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10692- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
10693+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
10694 "2:\n" \
10695 ".section .fixup,\"ax\"\n" \
10696 "3: mov %3,%0\n" \
10697@@ -445,10 +498,10 @@ struct __large_struct { unsigned long buf[100]; };
10698 ".previous\n" \
10699 _ASM_EXTABLE(1b, 3b) \
10700 : "=r"(err) \
10701- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
10702+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
10703
10704 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
10705- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
10706+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
10707 "2:\n" \
10708 _ASM_EXTABLE(1b, 2b - 1b) \
10709 : : ltype(x), "m" (__m(addr)))
10710@@ -487,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
10711 * On error, the variable @x is set to zero.
10712 */
10713
10714+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10715+#define __get_user(x, ptr) get_user((x), (ptr))
10716+#else
10717 #define __get_user(x, ptr) \
10718 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
10719+#endif
10720
10721 /**
10722 * __put_user: - Write a simple value into user space, with less checking.
10723@@ -510,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
10724 * Returns zero on success, or -EFAULT on error.
10725 */
10726
10727+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10728+#define __put_user(x, ptr) put_user((x), (ptr))
10729+#else
10730 #define __put_user(x, ptr) \
10731 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
10732+#endif
10733
10734 #define __get_user_unaligned __get_user
10735 #define __put_user_unaligned __put_user
10736@@ -529,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
10737 #define get_user_ex(x, ptr) do { \
10738 unsigned long __gue_val; \
10739 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
10740- (x) = (__force __typeof__(*(ptr)))__gue_val; \
10741+ (x) = (__typeof__(*(ptr)))__gue_val; \
10742 } while (0)
10743
10744 #ifdef CONFIG_X86_WP_WORKS_OK
10745diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
10746index 566e803..b9521e9 100644
10747--- a/arch/x86/include/asm/uaccess_32.h
10748+++ b/arch/x86/include/asm/uaccess_32.h
10749@@ -43,6 +43,9 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
10750 static __always_inline unsigned long __must_check
10751 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10752 {
10753+ if ((long)n < 0)
10754+ return n;
10755+
10756 if (__builtin_constant_p(n)) {
10757 unsigned long ret;
10758
10759@@ -61,6 +64,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10760 return ret;
10761 }
10762 }
10763+ if (!__builtin_constant_p(n))
10764+ check_object_size(from, n, true);
10765 return __copy_to_user_ll(to, from, n);
10766 }
10767
10768@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
10769 __copy_to_user(void __user *to, const void *from, unsigned long n)
10770 {
10771 might_fault();
10772+
10773 return __copy_to_user_inatomic(to, from, n);
10774 }
10775
10776 static __always_inline unsigned long
10777 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
10778 {
10779+ if ((long)n < 0)
10780+ return n;
10781+
10782 /* Avoid zeroing the tail if the copy fails..
10783 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
10784 * but as the zeroing behaviour is only significant when n is not
10785@@ -137,6 +146,10 @@ static __always_inline unsigned long
10786 __copy_from_user(void *to, const void __user *from, unsigned long n)
10787 {
10788 might_fault();
10789+
10790+ if ((long)n < 0)
10791+ return n;
10792+
10793 if (__builtin_constant_p(n)) {
10794 unsigned long ret;
10795
10796@@ -152,6 +165,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
10797 return ret;
10798 }
10799 }
10800+ if (!__builtin_constant_p(n))
10801+ check_object_size(to, n, false);
10802 return __copy_from_user_ll(to, from, n);
10803 }
10804
10805@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
10806 const void __user *from, unsigned long n)
10807 {
10808 might_fault();
10809+
10810+ if ((long)n < 0)
10811+ return n;
10812+
10813 if (__builtin_constant_p(n)) {
10814 unsigned long ret;
10815
10816@@ -181,15 +200,19 @@ static __always_inline unsigned long
10817 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
10818 unsigned long n)
10819 {
10820- return __copy_from_user_ll_nocache_nozero(to, from, n);
10821+ if ((long)n < 0)
10822+ return n;
10823+
10824+ return __copy_from_user_ll_nocache_nozero(to, from, n);
10825 }
10826
10827-unsigned long __must_check copy_to_user(void __user *to,
10828- const void *from, unsigned long n);
10829-unsigned long __must_check _copy_from_user(void *to,
10830- const void __user *from,
10831- unsigned long n);
10832-
10833+extern void copy_to_user_overflow(void)
10834+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
10835+ __compiletime_error("copy_to_user() buffer size is not provably correct")
10836+#else
10837+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
10838+#endif
10839+;
10840
10841 extern void copy_from_user_overflow(void)
10842 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
10843@@ -199,17 +222,61 @@ extern void copy_from_user_overflow(void)
10844 #endif
10845 ;
10846
10847-static inline unsigned long __must_check copy_from_user(void *to,
10848- const void __user *from,
10849- unsigned long n)
10850+/**
10851+ * copy_to_user: - Copy a block of data into user space.
10852+ * @to: Destination address, in user space.
10853+ * @from: Source address, in kernel space.
10854+ * @n: Number of bytes to copy.
10855+ *
10856+ * Context: User context only. This function may sleep.
10857+ *
10858+ * Copy data from kernel space to user space.
10859+ *
10860+ * Returns number of bytes that could not be copied.
10861+ * On success, this will be zero.
10862+ */
10863+static inline unsigned long __must_check
10864+copy_to_user(void __user *to, const void *from, unsigned long n)
10865+{
10866+ int sz = __compiletime_object_size(from);
10867+
10868+ if (unlikely(sz != -1 && sz < n))
10869+ copy_to_user_overflow();
10870+ else if (access_ok(VERIFY_WRITE, to, n))
10871+ n = __copy_to_user(to, from, n);
10872+ return n;
10873+}
10874+
10875+/**
10876+ * copy_from_user: - Copy a block of data from user space.
10877+ * @to: Destination address, in kernel space.
10878+ * @from: Source address, in user space.
10879+ * @n: Number of bytes to copy.
10880+ *
10881+ * Context: User context only. This function may sleep.
10882+ *
10883+ * Copy data from user space to kernel space.
10884+ *
10885+ * Returns number of bytes that could not be copied.
10886+ * On success, this will be zero.
10887+ *
10888+ * If some data could not be copied, this function will pad the copied
10889+ * data to the requested size using zero bytes.
10890+ */
10891+static inline unsigned long __must_check
10892+copy_from_user(void *to, const void __user *from, unsigned long n)
10893 {
10894 int sz = __compiletime_object_size(to);
10895
10896- if (likely(sz == -1 || sz >= n))
10897- n = _copy_from_user(to, from, n);
10898- else
10899+ if (unlikely(sz != -1 && sz < n))
10900 copy_from_user_overflow();
10901-
10902+ else if (access_ok(VERIFY_READ, from, n))
10903+ n = __copy_from_user(to, from, n);
10904+ else if ((long)n > 0) {
10905+ if (!__builtin_constant_p(n))
10906+ check_object_size(to, n, false);
10907+ memset(to, 0, n);
10908+ }
10909 return n;
10910 }
10911
10912diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
10913index 1c66d30..23ab77d 100644
10914--- a/arch/x86/include/asm/uaccess_64.h
10915+++ b/arch/x86/include/asm/uaccess_64.h
10916@@ -10,6 +10,9 @@
10917 #include <asm/alternative.h>
10918 #include <asm/cpufeature.h>
10919 #include <asm/page.h>
10920+#include <asm/pgtable.h>
10921+
10922+#define set_fs(x) (current_thread_info()->addr_limit = (x))
10923
10924 /*
10925 * Copy To/From Userspace
10926@@ -17,12 +20,12 @@
10927
10928 /* Handles exceptions in both to and from, but doesn't do access_ok */
10929 __must_check unsigned long
10930-copy_user_generic_string(void *to, const void *from, unsigned len);
10931+copy_user_generic_string(void *to, const void *from, unsigned long len);
10932 __must_check unsigned long
10933-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
10934+copy_user_generic_unrolled(void *to, const void *from, unsigned long len);
10935
10936 static __always_inline __must_check unsigned long
10937-copy_user_generic(void *to, const void *from, unsigned len)
10938+copy_user_generic(void *to, const void *from, unsigned long len)
10939 {
10940 unsigned ret;
10941
10942@@ -36,138 +39,222 @@ copy_user_generic(void *to, const void *from, unsigned len)
10943 return ret;
10944 }
10945
10946+static __always_inline __must_check unsigned long
10947+__copy_to_user(void __user *to, const void *from, unsigned long len);
10948+static __always_inline __must_check unsigned long
10949+__copy_from_user(void *to, const void __user *from, unsigned long len);
10950 __must_check unsigned long
10951-_copy_to_user(void __user *to, const void *from, unsigned len);
10952-__must_check unsigned long
10953-_copy_from_user(void *to, const void __user *from, unsigned len);
10954-__must_check unsigned long
10955-copy_in_user(void __user *to, const void __user *from, unsigned len);
10956+copy_in_user(void __user *to, const void __user *from, unsigned long len);
10957
10958 static inline unsigned long __must_check copy_from_user(void *to,
10959 const void __user *from,
10960 unsigned long n)
10961 {
10962- int sz = __compiletime_object_size(to);
10963-
10964 might_fault();
10965- if (likely(sz == -1 || sz >= n))
10966- n = _copy_from_user(to, from, n);
10967-#ifdef CONFIG_DEBUG_VM
10968- else
10969- WARN(1, "Buffer overflow detected!\n");
10970-#endif
10971+
10972+ if (access_ok(VERIFY_READ, from, n))
10973+ n = __copy_from_user(to, from, n);
10974+ else if (n < INT_MAX) {
10975+ if (!__builtin_constant_p(n))
10976+ check_object_size(to, n, false);
10977+ memset(to, 0, n);
10978+ }
10979 return n;
10980 }
10981
10982 static __always_inline __must_check
10983-int copy_to_user(void __user *dst, const void *src, unsigned size)
10984+int copy_to_user(void __user *dst, const void *src, unsigned long size)
10985 {
10986 might_fault();
10987
10988- return _copy_to_user(dst, src, size);
10989+ if (access_ok(VERIFY_WRITE, dst, size))
10990+ size = __copy_to_user(dst, src, size);
10991+ return size;
10992 }
10993
10994 static __always_inline __must_check
10995-int __copy_from_user(void *dst, const void __user *src, unsigned size)
10996+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
10997 {
10998- int ret = 0;
10999+ int sz = __compiletime_object_size(dst);
11000+ unsigned ret = 0;
11001
11002 might_fault();
11003- if (!__builtin_constant_p(size))
11004- return copy_user_generic(dst, (__force void *)src, size);
11005+
11006+ if (size > INT_MAX)
11007+ return size;
11008+
11009+#ifdef CONFIG_PAX_MEMORY_UDEREF
11010+ if (!__access_ok(VERIFY_READ, src, size))
11011+ return size;
11012+#endif
11013+
11014+ if (unlikely(sz != -1 && sz < size)) {
11015+#ifdef CONFIG_DEBUG_VM
11016+ WARN(1, "Buffer overflow detected!\n");
11017+#endif
11018+ return size;
11019+ }
11020+
11021+ if (!__builtin_constant_p(size)) {
11022+ check_object_size(dst, size, false);
11023+
11024+#ifdef CONFIG_PAX_MEMORY_UDEREF
11025+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11026+ src += PAX_USER_SHADOW_BASE;
11027+#endif
11028+
11029+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
11030+ }
11031 switch (size) {
11032- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
11033+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
11034 ret, "b", "b", "=q", 1);
11035 return ret;
11036- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
11037+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
11038 ret, "w", "w", "=r", 2);
11039 return ret;
11040- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
11041+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
11042 ret, "l", "k", "=r", 4);
11043 return ret;
11044- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
11045+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
11046 ret, "q", "", "=r", 8);
11047 return ret;
11048 case 10:
11049- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
11050+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
11051 ret, "q", "", "=r", 10);
11052 if (unlikely(ret))
11053 return ret;
11054 __get_user_asm(*(u16 *)(8 + (char *)dst),
11055- (u16 __user *)(8 + (char __user *)src),
11056+ (const u16 __user *)(8 + (const char __user *)src),
11057 ret, "w", "w", "=r", 2);
11058 return ret;
11059 case 16:
11060- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
11061+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
11062 ret, "q", "", "=r", 16);
11063 if (unlikely(ret))
11064 return ret;
11065 __get_user_asm(*(u64 *)(8 + (char *)dst),
11066- (u64 __user *)(8 + (char __user *)src),
11067+ (const u64 __user *)(8 + (const char __user *)src),
11068 ret, "q", "", "=r", 8);
11069 return ret;
11070 default:
11071- return copy_user_generic(dst, (__force void *)src, size);
11072+
11073+#ifdef CONFIG_PAX_MEMORY_UDEREF
11074+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11075+ src += PAX_USER_SHADOW_BASE;
11076+#endif
11077+
11078+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
11079 }
11080 }
11081
11082 static __always_inline __must_check
11083-int __copy_to_user(void __user *dst, const void *src, unsigned size)
11084+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
11085 {
11086- int ret = 0;
11087+ int sz = __compiletime_object_size(src);
11088+ unsigned ret = 0;
11089
11090 might_fault();
11091- if (!__builtin_constant_p(size))
11092- return copy_user_generic((__force void *)dst, src, size);
11093+
11094+ if (size > INT_MAX)
11095+ return size;
11096+
11097+#ifdef CONFIG_PAX_MEMORY_UDEREF
11098+ if (!__access_ok(VERIFY_WRITE, dst, size))
11099+ return size;
11100+#endif
11101+
11102+ if (unlikely(sz != -1 && sz < size)) {
11103+#ifdef CONFIG_DEBUG_VM
11104+ WARN(1, "Buffer overflow detected!\n");
11105+#endif
11106+ return size;
11107+ }
11108+
11109+ if (!__builtin_constant_p(size)) {
11110+ check_object_size(src, size, true);
11111+
11112+#ifdef CONFIG_PAX_MEMORY_UDEREF
11113+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11114+ dst += PAX_USER_SHADOW_BASE;
11115+#endif
11116+
11117+ return copy_user_generic((__force_kernel void *)dst, src, size);
11118+ }
11119 switch (size) {
11120- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
11121+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
11122 ret, "b", "b", "iq", 1);
11123 return ret;
11124- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
11125+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
11126 ret, "w", "w", "ir", 2);
11127 return ret;
11128- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
11129+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
11130 ret, "l", "k", "ir", 4);
11131 return ret;
11132- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
11133+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11134 ret, "q", "", "er", 8);
11135 return ret;
11136 case 10:
11137- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11138+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11139 ret, "q", "", "er", 10);
11140 if (unlikely(ret))
11141 return ret;
11142 asm("":::"memory");
11143- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
11144+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
11145 ret, "w", "w", "ir", 2);
11146 return ret;
11147 case 16:
11148- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11149+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11150 ret, "q", "", "er", 16);
11151 if (unlikely(ret))
11152 return ret;
11153 asm("":::"memory");
11154- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
11155+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
11156 ret, "q", "", "er", 8);
11157 return ret;
11158 default:
11159- return copy_user_generic((__force void *)dst, src, size);
11160+
11161+#ifdef CONFIG_PAX_MEMORY_UDEREF
11162+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11163+ dst += PAX_USER_SHADOW_BASE;
11164+#endif
11165+
11166+ return copy_user_generic((__force_kernel void *)dst, src, size);
11167 }
11168 }
11169
11170 static __always_inline __must_check
11171-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11172+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
11173 {
11174- int ret = 0;
11175+ unsigned ret = 0;
11176
11177 might_fault();
11178- if (!__builtin_constant_p(size))
11179- return copy_user_generic((__force void *)dst,
11180- (__force void *)src, size);
11181+
11182+ if (size > INT_MAX)
11183+ return size;
11184+
11185+#ifdef CONFIG_PAX_MEMORY_UDEREF
11186+ if (!__access_ok(VERIFY_READ, src, size))
11187+ return size;
11188+ if (!__access_ok(VERIFY_WRITE, dst, size))
11189+ return size;
11190+#endif
11191+
11192+ if (!__builtin_constant_p(size)) {
11193+
11194+#ifdef CONFIG_PAX_MEMORY_UDEREF
11195+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11196+ src += PAX_USER_SHADOW_BASE;
11197+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11198+ dst += PAX_USER_SHADOW_BASE;
11199+#endif
11200+
11201+ return copy_user_generic((__force_kernel void *)dst,
11202+ (__force_kernel const void *)src, size);
11203+ }
11204 switch (size) {
11205 case 1: {
11206 u8 tmp;
11207- __get_user_asm(tmp, (u8 __user *)src,
11208+ __get_user_asm(tmp, (const u8 __user *)src,
11209 ret, "b", "b", "=q", 1);
11210 if (likely(!ret))
11211 __put_user_asm(tmp, (u8 __user *)dst,
11212@@ -176,7 +263,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11213 }
11214 case 2: {
11215 u16 tmp;
11216- __get_user_asm(tmp, (u16 __user *)src,
11217+ __get_user_asm(tmp, (const u16 __user *)src,
11218 ret, "w", "w", "=r", 2);
11219 if (likely(!ret))
11220 __put_user_asm(tmp, (u16 __user *)dst,
11221@@ -186,7 +273,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11222
11223 case 4: {
11224 u32 tmp;
11225- __get_user_asm(tmp, (u32 __user *)src,
11226+ __get_user_asm(tmp, (const u32 __user *)src,
11227 ret, "l", "k", "=r", 4);
11228 if (likely(!ret))
11229 __put_user_asm(tmp, (u32 __user *)dst,
11230@@ -195,7 +282,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11231 }
11232 case 8: {
11233 u64 tmp;
11234- __get_user_asm(tmp, (u64 __user *)src,
11235+ __get_user_asm(tmp, (const u64 __user *)src,
11236 ret, "q", "", "=r", 8);
11237 if (likely(!ret))
11238 __put_user_asm(tmp, (u64 __user *)dst,
11239@@ -203,8 +290,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11240 return ret;
11241 }
11242 default:
11243- return copy_user_generic((__force void *)dst,
11244- (__force void *)src, size);
11245+
11246+#ifdef CONFIG_PAX_MEMORY_UDEREF
11247+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11248+ src += PAX_USER_SHADOW_BASE;
11249+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11250+ dst += PAX_USER_SHADOW_BASE;
11251+#endif
11252+
11253+ return copy_user_generic((__force_kernel void *)dst,
11254+ (__force_kernel const void *)src, size);
11255 }
11256 }
11257
11258@@ -219,35 +314,72 @@ __must_check unsigned long clear_user(void __user *mem, unsigned long len);
11259 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
11260
11261 static __must_check __always_inline int
11262-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
11263+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
11264 {
11265- return copy_user_generic(dst, (__force const void *)src, size);
11266+ if (size > INT_MAX)
11267+ return size;
11268+
11269+#ifdef CONFIG_PAX_MEMORY_UDEREF
11270+ if (!__access_ok(VERIFY_READ, src, size))
11271+ return size;
11272+
11273+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11274+ src += PAX_USER_SHADOW_BASE;
11275+#endif
11276+
11277+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
11278 }
11279
11280-static __must_check __always_inline int
11281-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
11282+static __must_check __always_inline unsigned long
11283+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
11284 {
11285- return copy_user_generic((__force void *)dst, src, size);
11286+ if (size > INT_MAX)
11287+ return size;
11288+
11289+#ifdef CONFIG_PAX_MEMORY_UDEREF
11290+ if (!__access_ok(VERIFY_WRITE, dst, size))
11291+ return size;
11292+
11293+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11294+ dst += PAX_USER_SHADOW_BASE;
11295+#endif
11296+
11297+ return copy_user_generic((__force_kernel void *)dst, src, size);
11298 }
11299
11300-extern long __copy_user_nocache(void *dst, const void __user *src,
11301- unsigned size, int zerorest);
11302+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
11303+ unsigned long size, int zerorest);
11304
11305-static inline int
11306-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
11307+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
11308 {
11309 might_sleep();
11310+
11311+ if (size > INT_MAX)
11312+ return size;
11313+
11314+#ifdef CONFIG_PAX_MEMORY_UDEREF
11315+ if (!__access_ok(VERIFY_READ, src, size))
11316+ return size;
11317+#endif
11318+
11319 return __copy_user_nocache(dst, src, size, 1);
11320 }
11321
11322-static inline int
11323-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11324- unsigned size)
11325+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11326+ unsigned long size)
11327 {
11328+ if (size > INT_MAX)
11329+ return size;
11330+
11331+#ifdef CONFIG_PAX_MEMORY_UDEREF
11332+ if (!__access_ok(VERIFY_READ, src, size))
11333+ return size;
11334+#endif
11335+
11336 return __copy_user_nocache(dst, src, size, 0);
11337 }
11338
11339-unsigned long
11340-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
11341+extern unsigned long
11342+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest);
11343
11344 #endif /* _ASM_X86_UACCESS_64_H */
11345diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
11346index bb05228..d763d5b 100644
11347--- a/arch/x86/include/asm/vdso.h
11348+++ b/arch/x86/include/asm/vdso.h
11349@@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
11350 #define VDSO32_SYMBOL(base, name) \
11351 ({ \
11352 extern const char VDSO32_##name[]; \
11353- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
11354+ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
11355 })
11356 #endif
11357
11358diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
11359index 1971e65..1e3559b 100644
11360--- a/arch/x86/include/asm/x86_init.h
11361+++ b/arch/x86/include/asm/x86_init.h
11362@@ -28,7 +28,7 @@ struct x86_init_mpparse {
11363 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
11364 void (*find_smp_config)(void);
11365 void (*get_smp_config)(unsigned int early);
11366-};
11367+} __no_const;
11368
11369 /**
11370 * struct x86_init_resources - platform specific resource related ops
11371@@ -42,7 +42,7 @@ struct x86_init_resources {
11372 void (*probe_roms)(void);
11373 void (*reserve_resources)(void);
11374 char *(*memory_setup)(void);
11375-};
11376+} __no_const;
11377
11378 /**
11379 * struct x86_init_irqs - platform specific interrupt setup
11380@@ -55,7 +55,7 @@ struct x86_init_irqs {
11381 void (*pre_vector_init)(void);
11382 void (*intr_init)(void);
11383 void (*trap_init)(void);
11384-};
11385+} __no_const;
11386
11387 /**
11388 * struct x86_init_oem - oem platform specific customizing functions
11389@@ -65,7 +65,7 @@ struct x86_init_irqs {
11390 struct x86_init_oem {
11391 void (*arch_setup)(void);
11392 void (*banner)(void);
11393-};
11394+} __no_const;
11395
11396 /**
11397 * struct x86_init_mapping - platform specific initial kernel pagetable setup
11398@@ -76,7 +76,7 @@ struct x86_init_oem {
11399 */
11400 struct x86_init_mapping {
11401 void (*pagetable_reserve)(u64 start, u64 end);
11402-};
11403+} __no_const;
11404
11405 /**
11406 * struct x86_init_paging - platform specific paging functions
11407@@ -86,7 +86,7 @@ struct x86_init_mapping {
11408 struct x86_init_paging {
11409 void (*pagetable_setup_start)(pgd_t *base);
11410 void (*pagetable_setup_done)(pgd_t *base);
11411-};
11412+} __no_const;
11413
11414 /**
11415 * struct x86_init_timers - platform specific timer setup
11416@@ -101,7 +101,7 @@ struct x86_init_timers {
11417 void (*tsc_pre_init)(void);
11418 void (*timer_init)(void);
11419 void (*wallclock_init)(void);
11420-};
11421+} __no_const;
11422
11423 /**
11424 * struct x86_init_iommu - platform specific iommu setup
11425@@ -109,7 +109,7 @@ struct x86_init_timers {
11426 */
11427 struct x86_init_iommu {
11428 int (*iommu_init)(void);
11429-};
11430+} __no_const;
11431
11432 /**
11433 * struct x86_init_pci - platform specific pci init functions
11434@@ -123,7 +123,7 @@ struct x86_init_pci {
11435 int (*init)(void);
11436 void (*init_irq)(void);
11437 void (*fixup_irqs)(void);
11438-};
11439+} __no_const;
11440
11441 /**
11442 * struct x86_init_ops - functions for platform specific setup
11443@@ -139,7 +139,7 @@ struct x86_init_ops {
11444 struct x86_init_timers timers;
11445 struct x86_init_iommu iommu;
11446 struct x86_init_pci pci;
11447-};
11448+} __no_const;
11449
11450 /**
11451 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
11452@@ -147,7 +147,7 @@ struct x86_init_ops {
11453 */
11454 struct x86_cpuinit_ops {
11455 void (*setup_percpu_clockev)(void);
11456-};
11457+} __no_const;
11458
11459 /**
11460 * struct x86_platform_ops - platform specific runtime functions
11461@@ -169,7 +169,7 @@ struct x86_platform_ops {
11462 void (*nmi_init)(void);
11463 unsigned char (*get_nmi_reason)(void);
11464 int (*i8042_detect)(void);
11465-};
11466+} __no_const;
11467
11468 struct pci_dev;
11469
11470@@ -177,7 +177,7 @@ struct x86_msi_ops {
11471 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
11472 void (*teardown_msi_irq)(unsigned int irq);
11473 void (*teardown_msi_irqs)(struct pci_dev *dev);
11474-};
11475+} __no_const;
11476
11477 extern struct x86_init_ops x86_init;
11478 extern struct x86_cpuinit_ops x86_cpuinit;
11479diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
11480index c6ce245..ffbdab7 100644
11481--- a/arch/x86/include/asm/xsave.h
11482+++ b/arch/x86/include/asm/xsave.h
11483@@ -65,6 +65,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
11484 {
11485 int err;
11486
11487+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11488+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
11489+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
11490+#endif
11491+
11492 /*
11493 * Clear the xsave header first, so that reserved fields are
11494 * initialized to zero.
11495@@ -96,10 +101,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
11496 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
11497 {
11498 int err;
11499- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
11500+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
11501 u32 lmask = mask;
11502 u32 hmask = mask >> 32;
11503
11504+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11505+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
11506+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
11507+#endif
11508+
11509 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
11510 "2:\n"
11511 ".section .fixup,\"ax\"\n"
11512diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
11513index 6a564ac..9b1340c 100644
11514--- a/arch/x86/kernel/acpi/realmode/Makefile
11515+++ b/arch/x86/kernel/acpi/realmode/Makefile
11516@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
11517 $(call cc-option, -fno-stack-protector) \
11518 $(call cc-option, -mpreferred-stack-boundary=2)
11519 KBUILD_CFLAGS += $(call cc-option, -m32)
11520+ifdef CONSTIFY_PLUGIN
11521+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
11522+endif
11523 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
11524 GCOV_PROFILE := n
11525
11526diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
11527index b4fd836..4358fe3 100644
11528--- a/arch/x86/kernel/acpi/realmode/wakeup.S
11529+++ b/arch/x86/kernel/acpi/realmode/wakeup.S
11530@@ -108,6 +108,9 @@ wakeup_code:
11531 /* Do any other stuff... */
11532
11533 #ifndef CONFIG_64BIT
11534+ /* Recheck NX bit overrides (64bit path does this in trampoline */
11535+ call verify_cpu
11536+
11537 /* This could also be done in C code... */
11538 movl pmode_cr3, %eax
11539 movl %eax, %cr3
11540@@ -131,6 +134,7 @@ wakeup_code:
11541 movl pmode_cr0, %eax
11542 movl %eax, %cr0
11543 jmp pmode_return
11544+# include "../../verify_cpu.S"
11545 #else
11546 pushw $0
11547 pushw trampoline_segment
11548diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
11549index 103b6ab..2004d0a 100644
11550--- a/arch/x86/kernel/acpi/sleep.c
11551+++ b/arch/x86/kernel/acpi/sleep.c
11552@@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
11553 header->trampoline_segment = trampoline_address() >> 4;
11554 #ifdef CONFIG_SMP
11555 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
11556+
11557+ pax_open_kernel();
11558 early_gdt_descr.address =
11559 (unsigned long)get_cpu_gdt_table(smp_processor_id());
11560+ pax_close_kernel();
11561+
11562 initial_gs = per_cpu_offset(smp_processor_id());
11563 #endif
11564 initial_code = (unsigned long)wakeup_long64;
11565diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
11566index 13ab720..95d5442 100644
11567--- a/arch/x86/kernel/acpi/wakeup_32.S
11568+++ b/arch/x86/kernel/acpi/wakeup_32.S
11569@@ -30,13 +30,11 @@ wakeup_pmode_return:
11570 # and restore the stack ... but you need gdt for this to work
11571 movl saved_context_esp, %esp
11572
11573- movl %cs:saved_magic, %eax
11574- cmpl $0x12345678, %eax
11575+ cmpl $0x12345678, saved_magic
11576 jne bogus_magic
11577
11578 # jump to place where we left off
11579- movl saved_eip, %eax
11580- jmp *%eax
11581+ jmp *(saved_eip)
11582
11583 bogus_magic:
11584 jmp bogus_magic
11585diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
11586index 1f84794..e23f862 100644
11587--- a/arch/x86/kernel/alternative.c
11588+++ b/arch/x86/kernel/alternative.c
11589@@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
11590 */
11591 for (a = start; a < end; a++) {
11592 instr = (u8 *)&a->instr_offset + a->instr_offset;
11593+
11594+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11595+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11596+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
11597+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11598+#endif
11599+
11600 replacement = (u8 *)&a->repl_offset + a->repl_offset;
11601 BUG_ON(a->replacementlen > a->instrlen);
11602 BUG_ON(a->instrlen > sizeof(insnbuf));
11603@@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
11604 for (poff = start; poff < end; poff++) {
11605 u8 *ptr = (u8 *)poff + *poff;
11606
11607+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11608+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11609+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
11610+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11611+#endif
11612+
11613 if (!*poff || ptr < text || ptr >= text_end)
11614 continue;
11615 /* turn DS segment override prefix into lock prefix */
11616- if (*ptr == 0x3e)
11617+ if (*ktla_ktva(ptr) == 0x3e)
11618 text_poke(ptr, ((unsigned char []){0xf0}), 1);
11619 };
11620 mutex_unlock(&text_mutex);
11621@@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
11622 for (poff = start; poff < end; poff++) {
11623 u8 *ptr = (u8 *)poff + *poff;
11624
11625+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11626+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11627+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
11628+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11629+#endif
11630+
11631 if (!*poff || ptr < text || ptr >= text_end)
11632 continue;
11633 /* turn lock prefix into DS segment override prefix */
11634- if (*ptr == 0xf0)
11635+ if (*ktla_ktva(ptr) == 0xf0)
11636 text_poke(ptr, ((unsigned char []){0x3E}), 1);
11637 };
11638 mutex_unlock(&text_mutex);
11639@@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
11640
11641 BUG_ON(p->len > MAX_PATCH_LEN);
11642 /* prep the buffer with the original instructions */
11643- memcpy(insnbuf, p->instr, p->len);
11644+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
11645 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
11646 (unsigned long)p->instr, p->len);
11647
11648@@ -568,7 +587,7 @@ void __init alternative_instructions(void)
11649 if (smp_alt_once)
11650 free_init_pages("SMP alternatives",
11651 (unsigned long)__smp_locks,
11652- (unsigned long)__smp_locks_end);
11653+ PAGE_ALIGN((unsigned long)__smp_locks_end));
11654
11655 restart_nmi();
11656 }
11657@@ -585,13 +604,17 @@ void __init alternative_instructions(void)
11658 * instructions. And on the local CPU you need to be protected again NMI or MCE
11659 * handlers seeing an inconsistent instruction while you patch.
11660 */
11661-void *__init_or_module text_poke_early(void *addr, const void *opcode,
11662+void *__kprobes text_poke_early(void *addr, const void *opcode,
11663 size_t len)
11664 {
11665 unsigned long flags;
11666 local_irq_save(flags);
11667- memcpy(addr, opcode, len);
11668+
11669+ pax_open_kernel();
11670+ memcpy(ktla_ktva(addr), opcode, len);
11671 sync_core();
11672+ pax_close_kernel();
11673+
11674 local_irq_restore(flags);
11675 /* Could also do a CLFLUSH here to speed up CPU recovery; but
11676 that causes hangs on some VIA CPUs. */
11677@@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
11678 */
11679 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
11680 {
11681- unsigned long flags;
11682- char *vaddr;
11683+ unsigned char *vaddr = ktla_ktva(addr);
11684 struct page *pages[2];
11685- int i;
11686+ size_t i;
11687
11688 if (!core_kernel_text((unsigned long)addr)) {
11689- pages[0] = vmalloc_to_page(addr);
11690- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
11691+ pages[0] = vmalloc_to_page(vaddr);
11692+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
11693 } else {
11694- pages[0] = virt_to_page(addr);
11695+ pages[0] = virt_to_page(vaddr);
11696 WARN_ON(!PageReserved(pages[0]));
11697- pages[1] = virt_to_page(addr + PAGE_SIZE);
11698+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
11699 }
11700 BUG_ON(!pages[0]);
11701- local_irq_save(flags);
11702- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
11703- if (pages[1])
11704- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
11705- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
11706- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
11707- clear_fixmap(FIX_TEXT_POKE0);
11708- if (pages[1])
11709- clear_fixmap(FIX_TEXT_POKE1);
11710- local_flush_tlb();
11711- sync_core();
11712- /* Could also do a CLFLUSH here to speed up CPU recovery; but
11713- that causes hangs on some VIA CPUs. */
11714+ text_poke_early(addr, opcode, len);
11715 for (i = 0; i < len; i++)
11716- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
11717- local_irq_restore(flags);
11718+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
11719 return addr;
11720 }
11721
11722diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
11723index f98d84c..e402a69 100644
11724--- a/arch/x86/kernel/apic/apic.c
11725+++ b/arch/x86/kernel/apic/apic.c
11726@@ -174,7 +174,7 @@ int first_system_vector = 0xfe;
11727 /*
11728 * Debug level, exported for io_apic.c
11729 */
11730-unsigned int apic_verbosity;
11731+int apic_verbosity;
11732
11733 int pic_mode;
11734
11735@@ -1853,7 +1853,7 @@ void smp_error_interrupt(struct pt_regs *regs)
11736 apic_write(APIC_ESR, 0);
11737 v1 = apic_read(APIC_ESR);
11738 ack_APIC_irq();
11739- atomic_inc(&irq_err_count);
11740+ atomic_inc_unchecked(&irq_err_count);
11741
11742 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
11743 smp_processor_id(), v0 , v1);
11744diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
11745index 6d939d7..0697fcc 100644
11746--- a/arch/x86/kernel/apic/io_apic.c
11747+++ b/arch/x86/kernel/apic/io_apic.c
11748@@ -1096,7 +1096,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
11749 }
11750 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
11751
11752-void lock_vector_lock(void)
11753+void lock_vector_lock(void) __acquires(vector_lock)
11754 {
11755 /* Used to the online set of cpus does not change
11756 * during assign_irq_vector.
11757@@ -1104,7 +1104,7 @@ void lock_vector_lock(void)
11758 raw_spin_lock(&vector_lock);
11759 }
11760
11761-void unlock_vector_lock(void)
11762+void unlock_vector_lock(void) __releases(vector_lock)
11763 {
11764 raw_spin_unlock(&vector_lock);
11765 }
11766@@ -2510,7 +2510,7 @@ static void ack_apic_edge(struct irq_data *data)
11767 ack_APIC_irq();
11768 }
11769
11770-atomic_t irq_mis_count;
11771+atomic_unchecked_t irq_mis_count;
11772
11773 static void ack_apic_level(struct irq_data *data)
11774 {
11775@@ -2576,7 +2576,7 @@ static void ack_apic_level(struct irq_data *data)
11776 * at the cpu.
11777 */
11778 if (!(v & (1 << (i & 0x1f)))) {
11779- atomic_inc(&irq_mis_count);
11780+ atomic_inc_unchecked(&irq_mis_count);
11781
11782 eoi_ioapic_irq(irq, cfg);
11783 }
11784diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
11785index a46bd38..6b906d7 100644
11786--- a/arch/x86/kernel/apm_32.c
11787+++ b/arch/x86/kernel/apm_32.c
11788@@ -411,7 +411,7 @@ static DEFINE_MUTEX(apm_mutex);
11789 * This is for buggy BIOS's that refer to (real mode) segment 0x40
11790 * even though they are called in protected mode.
11791 */
11792-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
11793+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
11794 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
11795
11796 static const char driver_version[] = "1.16ac"; /* no spaces */
11797@@ -589,7 +589,10 @@ static long __apm_bios_call(void *_call)
11798 BUG_ON(cpu != 0);
11799 gdt = get_cpu_gdt_table(cpu);
11800 save_desc_40 = gdt[0x40 / 8];
11801+
11802+ pax_open_kernel();
11803 gdt[0x40 / 8] = bad_bios_desc;
11804+ pax_close_kernel();
11805
11806 apm_irq_save(flags);
11807 APM_DO_SAVE_SEGS;
11808@@ -598,7 +601,11 @@ static long __apm_bios_call(void *_call)
11809 &call->esi);
11810 APM_DO_RESTORE_SEGS;
11811 apm_irq_restore(flags);
11812+
11813+ pax_open_kernel();
11814 gdt[0x40 / 8] = save_desc_40;
11815+ pax_close_kernel();
11816+
11817 put_cpu();
11818
11819 return call->eax & 0xff;
11820@@ -665,7 +672,10 @@ static long __apm_bios_call_simple(void *_call)
11821 BUG_ON(cpu != 0);
11822 gdt = get_cpu_gdt_table(cpu);
11823 save_desc_40 = gdt[0x40 / 8];
11824+
11825+ pax_open_kernel();
11826 gdt[0x40 / 8] = bad_bios_desc;
11827+ pax_close_kernel();
11828
11829 apm_irq_save(flags);
11830 APM_DO_SAVE_SEGS;
11831@@ -673,7 +683,11 @@ static long __apm_bios_call_simple(void *_call)
11832 &call->eax);
11833 APM_DO_RESTORE_SEGS;
11834 apm_irq_restore(flags);
11835+
11836+ pax_open_kernel();
11837 gdt[0x40 / 8] = save_desc_40;
11838+ pax_close_kernel();
11839+
11840 put_cpu();
11841 return error;
11842 }
11843@@ -2347,12 +2361,15 @@ static int __init apm_init(void)
11844 * code to that CPU.
11845 */
11846 gdt = get_cpu_gdt_table(0);
11847+
11848+ pax_open_kernel();
11849 set_desc_base(&gdt[APM_CS >> 3],
11850 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
11851 set_desc_base(&gdt[APM_CS_16 >> 3],
11852 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
11853 set_desc_base(&gdt[APM_DS >> 3],
11854 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
11855+ pax_close_kernel();
11856
11857 proc_create("apm", 0, NULL, &apm_file_ops);
11858
11859diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
11860index 4f13faf..87db5d2 100644
11861--- a/arch/x86/kernel/asm-offsets.c
11862+++ b/arch/x86/kernel/asm-offsets.c
11863@@ -33,6 +33,8 @@ void common(void) {
11864 OFFSET(TI_status, thread_info, status);
11865 OFFSET(TI_addr_limit, thread_info, addr_limit);
11866 OFFSET(TI_preempt_count, thread_info, preempt_count);
11867+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
11868+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
11869
11870 BLANK();
11871 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
11872@@ -53,8 +55,26 @@ void common(void) {
11873 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
11874 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
11875 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
11876+
11877+#ifdef CONFIG_PAX_KERNEXEC
11878+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
11879 #endif
11880
11881+#ifdef CONFIG_PAX_MEMORY_UDEREF
11882+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
11883+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
11884+#ifdef CONFIG_X86_64
11885+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
11886+#endif
11887+#endif
11888+
11889+#endif
11890+
11891+ BLANK();
11892+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
11893+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
11894+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
11895+
11896 #ifdef CONFIG_XEN
11897 BLANK();
11898 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
11899diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
11900index e72a119..6e2955d 100644
11901--- a/arch/x86/kernel/asm-offsets_64.c
11902+++ b/arch/x86/kernel/asm-offsets_64.c
11903@@ -69,6 +69,7 @@ int main(void)
11904 BLANK();
11905 #undef ENTRY
11906
11907+ DEFINE(TSS_size, sizeof(struct tss_struct));
11908 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
11909 BLANK();
11910
11911diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
11912index 25f24dc..4094a7f 100644
11913--- a/arch/x86/kernel/cpu/Makefile
11914+++ b/arch/x86/kernel/cpu/Makefile
11915@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
11916 CFLAGS_REMOVE_perf_event.o = -pg
11917 endif
11918
11919-# Make sure load_percpu_segment has no stackprotector
11920-nostackp := $(call cc-option, -fno-stack-protector)
11921-CFLAGS_common.o := $(nostackp)
11922-
11923 obj-y := intel_cacheinfo.o scattered.o topology.o
11924 obj-y += proc.o capflags.o powerflags.o common.o
11925 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
11926diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
11927index 0bab2b1..d0a1bf8 100644
11928--- a/arch/x86/kernel/cpu/amd.c
11929+++ b/arch/x86/kernel/cpu/amd.c
11930@@ -664,7 +664,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
11931 unsigned int size)
11932 {
11933 /* AMD errata T13 (order #21922) */
11934- if ((c->x86 == 6)) {
11935+ if (c->x86 == 6) {
11936 /* Duron Rev A0 */
11937 if (c->x86_model == 3 && c->x86_mask == 0)
11938 size = 64;
11939diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
11940index aa003b1..47ea638 100644
11941--- a/arch/x86/kernel/cpu/common.c
11942+++ b/arch/x86/kernel/cpu/common.c
11943@@ -84,60 +84,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
11944
11945 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
11946
11947-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
11948-#ifdef CONFIG_X86_64
11949- /*
11950- * We need valid kernel segments for data and code in long mode too
11951- * IRET will check the segment types kkeil 2000/10/28
11952- * Also sysret mandates a special GDT layout
11953- *
11954- * TLS descriptors are currently at a different place compared to i386.
11955- * Hopefully nobody expects them at a fixed place (Wine?)
11956- */
11957- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
11958- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
11959- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
11960- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
11961- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
11962- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
11963-#else
11964- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
11965- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11966- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
11967- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
11968- /*
11969- * Segments used for calling PnP BIOS have byte granularity.
11970- * They code segments and data segments have fixed 64k limits,
11971- * the transfer segment sizes are set at run time.
11972- */
11973- /* 32-bit code */
11974- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11975- /* 16-bit code */
11976- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11977- /* 16-bit data */
11978- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
11979- /* 16-bit data */
11980- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
11981- /* 16-bit data */
11982- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
11983- /*
11984- * The APM segments have byte granularity and their bases
11985- * are set at run time. All have 64k limits.
11986- */
11987- /* 32-bit code */
11988- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11989- /* 16-bit code */
11990- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11991- /* data */
11992- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
11993-
11994- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11995- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11996- GDT_STACK_CANARY_INIT
11997-#endif
11998-} };
11999-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
12000-
12001 static int __init x86_xsave_setup(char *s)
12002 {
12003 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
12004@@ -372,7 +318,7 @@ void switch_to_new_gdt(int cpu)
12005 {
12006 struct desc_ptr gdt_descr;
12007
12008- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
12009+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
12010 gdt_descr.size = GDT_SIZE - 1;
12011 load_gdt(&gdt_descr);
12012 /* Reload the per-cpu base */
12013@@ -844,6 +790,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
12014 /* Filter out anything that depends on CPUID levels we don't have */
12015 filter_cpuid_features(c, true);
12016
12017+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
12018+ setup_clear_cpu_cap(X86_FEATURE_SEP);
12019+#endif
12020+
12021 /* If the model name is still unset, do table lookup. */
12022 if (!c->x86_model_id[0]) {
12023 const char *p;
12024@@ -1024,6 +974,9 @@ static __init int setup_disablecpuid(char *arg)
12025 }
12026 __setup("clearcpuid=", setup_disablecpuid);
12027
12028+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
12029+EXPORT_PER_CPU_SYMBOL(current_tinfo);
12030+
12031 #ifdef CONFIG_X86_64
12032 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
12033
12034@@ -1039,7 +992,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
12035 EXPORT_PER_CPU_SYMBOL(current_task);
12036
12037 DEFINE_PER_CPU(unsigned long, kernel_stack) =
12038- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
12039+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
12040 EXPORT_PER_CPU_SYMBOL(kernel_stack);
12041
12042 DEFINE_PER_CPU(char *, irq_stack_ptr) =
12043@@ -1104,7 +1057,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
12044 {
12045 memset(regs, 0, sizeof(struct pt_regs));
12046 regs->fs = __KERNEL_PERCPU;
12047- regs->gs = __KERNEL_STACK_CANARY;
12048+ savesegment(gs, regs->gs);
12049
12050 return regs;
12051 }
12052@@ -1159,7 +1112,7 @@ void __cpuinit cpu_init(void)
12053 int i;
12054
12055 cpu = stack_smp_processor_id();
12056- t = &per_cpu(init_tss, cpu);
12057+ t = init_tss + cpu;
12058 oist = &per_cpu(orig_ist, cpu);
12059
12060 #ifdef CONFIG_NUMA
12061@@ -1185,7 +1138,7 @@ void __cpuinit cpu_init(void)
12062 switch_to_new_gdt(cpu);
12063 loadsegment(fs, 0);
12064
12065- load_idt((const struct desc_ptr *)&idt_descr);
12066+ load_idt(&idt_descr);
12067
12068 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
12069 syscall_init();
12070@@ -1194,7 +1147,6 @@ void __cpuinit cpu_init(void)
12071 wrmsrl(MSR_KERNEL_GS_BASE, 0);
12072 barrier();
12073
12074- x86_configure_nx();
12075 if (cpu != 0)
12076 enable_x2apic();
12077
12078@@ -1248,7 +1200,7 @@ void __cpuinit cpu_init(void)
12079 {
12080 int cpu = smp_processor_id();
12081 struct task_struct *curr = current;
12082- struct tss_struct *t = &per_cpu(init_tss, cpu);
12083+ struct tss_struct *t = init_tss + cpu;
12084 struct thread_struct *thread = &curr->thread;
12085
12086 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
12087diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
12088index 5231312..a78a987 100644
12089--- a/arch/x86/kernel/cpu/intel.c
12090+++ b/arch/x86/kernel/cpu/intel.c
12091@@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
12092 * Update the IDT descriptor and reload the IDT so that
12093 * it uses the read-only mapped virtual address.
12094 */
12095- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
12096+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
12097 load_idt(&idt_descr);
12098 }
12099 #endif
12100diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
12101index 2af127d..8ff7ac0 100644
12102--- a/arch/x86/kernel/cpu/mcheck/mce.c
12103+++ b/arch/x86/kernel/cpu/mcheck/mce.c
12104@@ -42,6 +42,7 @@
12105 #include <asm/processor.h>
12106 #include <asm/mce.h>
12107 #include <asm/msr.h>
12108+#include <asm/local.h>
12109
12110 #include "mce-internal.h"
12111
12112@@ -202,7 +203,7 @@ static void print_mce(struct mce *m)
12113 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
12114 m->cs, m->ip);
12115
12116- if (m->cs == __KERNEL_CS)
12117+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
12118 print_symbol("{%s}", m->ip);
12119 pr_cont("\n");
12120 }
12121@@ -235,10 +236,10 @@ static void print_mce(struct mce *m)
12122
12123 #define PANIC_TIMEOUT 5 /* 5 seconds */
12124
12125-static atomic_t mce_paniced;
12126+static atomic_unchecked_t mce_paniced;
12127
12128 static int fake_panic;
12129-static atomic_t mce_fake_paniced;
12130+static atomic_unchecked_t mce_fake_paniced;
12131
12132 /* Panic in progress. Enable interrupts and wait for final IPI */
12133 static void wait_for_panic(void)
12134@@ -262,7 +263,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
12135 /*
12136 * Make sure only one CPU runs in machine check panic
12137 */
12138- if (atomic_inc_return(&mce_paniced) > 1)
12139+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
12140 wait_for_panic();
12141 barrier();
12142
12143@@ -270,7 +271,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
12144 console_verbose();
12145 } else {
12146 /* Don't log too much for fake panic */
12147- if (atomic_inc_return(&mce_fake_paniced) > 1)
12148+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
12149 return;
12150 }
12151 /* First print corrected ones that are still unlogged */
12152@@ -610,7 +611,7 @@ static int mce_timed_out(u64 *t)
12153 * might have been modified by someone else.
12154 */
12155 rmb();
12156- if (atomic_read(&mce_paniced))
12157+ if (atomic_read_unchecked(&mce_paniced))
12158 wait_for_panic();
12159 if (!monarch_timeout)
12160 goto out;
12161@@ -1398,7 +1399,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
12162 }
12163
12164 /* Call the installed machine check handler for this CPU setup. */
12165-void (*machine_check_vector)(struct pt_regs *, long error_code) =
12166+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
12167 unexpected_machine_check;
12168
12169 /*
12170@@ -1421,7 +1422,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
12171 return;
12172 }
12173
12174+ pax_open_kernel();
12175 machine_check_vector = do_machine_check;
12176+ pax_close_kernel();
12177
12178 __mcheck_cpu_init_generic();
12179 __mcheck_cpu_init_vendor(c);
12180@@ -1435,7 +1438,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
12181 */
12182
12183 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
12184-static int mce_chrdev_open_count; /* #times opened */
12185+static local_t mce_chrdev_open_count; /* #times opened */
12186 static int mce_chrdev_open_exclu; /* already open exclusive? */
12187
12188 static int mce_chrdev_open(struct inode *inode, struct file *file)
12189@@ -1443,7 +1446,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
12190 spin_lock(&mce_chrdev_state_lock);
12191
12192 if (mce_chrdev_open_exclu ||
12193- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
12194+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
12195 spin_unlock(&mce_chrdev_state_lock);
12196
12197 return -EBUSY;
12198@@ -1451,7 +1454,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
12199
12200 if (file->f_flags & O_EXCL)
12201 mce_chrdev_open_exclu = 1;
12202- mce_chrdev_open_count++;
12203+ local_inc(&mce_chrdev_open_count);
12204
12205 spin_unlock(&mce_chrdev_state_lock);
12206
12207@@ -1462,7 +1465,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
12208 {
12209 spin_lock(&mce_chrdev_state_lock);
12210
12211- mce_chrdev_open_count--;
12212+ local_dec(&mce_chrdev_open_count);
12213 mce_chrdev_open_exclu = 0;
12214
12215 spin_unlock(&mce_chrdev_state_lock);
12216@@ -2171,7 +2174,7 @@ struct dentry *mce_get_debugfs_dir(void)
12217 static void mce_reset(void)
12218 {
12219 cpu_missing = 0;
12220- atomic_set(&mce_fake_paniced, 0);
12221+ atomic_set_unchecked(&mce_fake_paniced, 0);
12222 atomic_set(&mce_executing, 0);
12223 atomic_set(&mce_callin, 0);
12224 atomic_set(&global_nwo, 0);
12225diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
12226index 5c0e653..51ddf2c 100644
12227--- a/arch/x86/kernel/cpu/mcheck/p5.c
12228+++ b/arch/x86/kernel/cpu/mcheck/p5.c
12229@@ -11,7 +11,7 @@
12230 #include <asm/processor.h>
12231 #include <asm/system.h>
12232 #include <asm/mce.h>
12233-#include <asm/msr.h>
12234+#include <asm/pgtable.h>
12235
12236 /* By default disabled */
12237 int mce_p5_enabled __read_mostly;
12238@@ -50,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
12239 if (!cpu_has(c, X86_FEATURE_MCE))
12240 return;
12241
12242+ pax_open_kernel();
12243 machine_check_vector = pentium_machine_check;
12244+ pax_close_kernel();
12245 /* Make sure the vector pointer is visible before we enable MCEs: */
12246 wmb();
12247
12248diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
12249index 54060f5..c1a7577 100644
12250--- a/arch/x86/kernel/cpu/mcheck/winchip.c
12251+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
12252@@ -11,6 +11,7 @@
12253 #include <asm/system.h>
12254 #include <asm/mce.h>
12255 #include <asm/msr.h>
12256+#include <asm/pgtable.h>
12257
12258 /* Machine check handler for WinChip C6: */
12259 static void winchip_machine_check(struct pt_regs *regs, long error_code)
12260@@ -24,7 +25,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
12261 {
12262 u32 lo, hi;
12263
12264+ pax_open_kernel();
12265 machine_check_vector = winchip_machine_check;
12266+ pax_close_kernel();
12267 /* Make sure the vector pointer is visible before we enable MCEs: */
12268 wmb();
12269
12270diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
12271index 6b96110..0da73eb 100644
12272--- a/arch/x86/kernel/cpu/mtrr/main.c
12273+++ b/arch/x86/kernel/cpu/mtrr/main.c
12274@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
12275 u64 size_or_mask, size_and_mask;
12276 static bool mtrr_aps_delayed_init;
12277
12278-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
12279+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
12280
12281 const struct mtrr_ops *mtrr_if;
12282
12283diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
12284index df5e41f..816c719 100644
12285--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
12286+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
12287@@ -25,7 +25,7 @@ struct mtrr_ops {
12288 int (*validate_add_page)(unsigned long base, unsigned long size,
12289 unsigned int type);
12290 int (*have_wrcomb)(void);
12291-};
12292+} __do_const;
12293
12294 extern int generic_get_free_region(unsigned long base, unsigned long size,
12295 int replace_reg);
12296diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
12297index 2bda212..78cc605 100644
12298--- a/arch/x86/kernel/cpu/perf_event.c
12299+++ b/arch/x86/kernel/cpu/perf_event.c
12300@@ -1529,7 +1529,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
12301 break;
12302
12303 perf_callchain_store(entry, frame.return_address);
12304- fp = frame.next_frame;
12305+ fp = (const void __force_user *)frame.next_frame;
12306 }
12307 }
12308
12309diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
12310index 13ad899..f642b9a 100644
12311--- a/arch/x86/kernel/crash.c
12312+++ b/arch/x86/kernel/crash.c
12313@@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
12314 {
12315 #ifdef CONFIG_X86_32
12316 struct pt_regs fixed_regs;
12317-#endif
12318
12319-#ifdef CONFIG_X86_32
12320- if (!user_mode_vm(regs)) {
12321+ if (!user_mode(regs)) {
12322 crash_fixup_ss_esp(&fixed_regs, regs);
12323 regs = &fixed_regs;
12324 }
12325diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
12326index 37250fe..bf2ec74 100644
12327--- a/arch/x86/kernel/doublefault_32.c
12328+++ b/arch/x86/kernel/doublefault_32.c
12329@@ -11,7 +11,7 @@
12330
12331 #define DOUBLEFAULT_STACKSIZE (1024)
12332 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
12333-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
12334+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
12335
12336 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
12337
12338@@ -21,7 +21,7 @@ static void doublefault_fn(void)
12339 unsigned long gdt, tss;
12340
12341 store_gdt(&gdt_desc);
12342- gdt = gdt_desc.address;
12343+ gdt = (unsigned long)gdt_desc.address;
12344
12345 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
12346
12347@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
12348 /* 0x2 bit is always set */
12349 .flags = X86_EFLAGS_SF | 0x2,
12350 .sp = STACK_START,
12351- .es = __USER_DS,
12352+ .es = __KERNEL_DS,
12353 .cs = __KERNEL_CS,
12354 .ss = __KERNEL_DS,
12355- .ds = __USER_DS,
12356+ .ds = __KERNEL_DS,
12357 .fs = __KERNEL_PERCPU,
12358
12359 .__cr3 = __pa_nodebug(swapper_pg_dir),
12360diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
12361index 1aae78f..aab3a3d 100644
12362--- a/arch/x86/kernel/dumpstack.c
12363+++ b/arch/x86/kernel/dumpstack.c
12364@@ -2,6 +2,9 @@
12365 * Copyright (C) 1991, 1992 Linus Torvalds
12366 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
12367 */
12368+#ifdef CONFIG_GRKERNSEC_HIDESYM
12369+#define __INCLUDED_BY_HIDESYM 1
12370+#endif
12371 #include <linux/kallsyms.h>
12372 #include <linux/kprobes.h>
12373 #include <linux/uaccess.h>
12374@@ -35,9 +38,8 @@ void printk_address(unsigned long address, int reliable)
12375 static void
12376 print_ftrace_graph_addr(unsigned long addr, void *data,
12377 const struct stacktrace_ops *ops,
12378- struct thread_info *tinfo, int *graph)
12379+ struct task_struct *task, int *graph)
12380 {
12381- struct task_struct *task = tinfo->task;
12382 unsigned long ret_addr;
12383 int index = task->curr_ret_stack;
12384
12385@@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
12386 static inline void
12387 print_ftrace_graph_addr(unsigned long addr, void *data,
12388 const struct stacktrace_ops *ops,
12389- struct thread_info *tinfo, int *graph)
12390+ struct task_struct *task, int *graph)
12391 { }
12392 #endif
12393
12394@@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
12395 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
12396 */
12397
12398-static inline int valid_stack_ptr(struct thread_info *tinfo,
12399- void *p, unsigned int size, void *end)
12400+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
12401 {
12402- void *t = tinfo;
12403 if (end) {
12404 if (p < end && p >= (end-THREAD_SIZE))
12405 return 1;
12406@@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
12407 }
12408
12409 unsigned long
12410-print_context_stack(struct thread_info *tinfo,
12411+print_context_stack(struct task_struct *task, void *stack_start,
12412 unsigned long *stack, unsigned long bp,
12413 const struct stacktrace_ops *ops, void *data,
12414 unsigned long *end, int *graph)
12415 {
12416 struct stack_frame *frame = (struct stack_frame *)bp;
12417
12418- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
12419+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
12420 unsigned long addr;
12421
12422 addr = *stack;
12423@@ -102,7 +102,7 @@ print_context_stack(struct thread_info *tinfo,
12424 } else {
12425 ops->address(data, addr, 0);
12426 }
12427- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12428+ print_ftrace_graph_addr(addr, data, ops, task, graph);
12429 }
12430 stack++;
12431 }
12432@@ -111,7 +111,7 @@ print_context_stack(struct thread_info *tinfo,
12433 EXPORT_SYMBOL_GPL(print_context_stack);
12434
12435 unsigned long
12436-print_context_stack_bp(struct thread_info *tinfo,
12437+print_context_stack_bp(struct task_struct *task, void *stack_start,
12438 unsigned long *stack, unsigned long bp,
12439 const struct stacktrace_ops *ops, void *data,
12440 unsigned long *end, int *graph)
12441@@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_info *tinfo,
12442 struct stack_frame *frame = (struct stack_frame *)bp;
12443 unsigned long *ret_addr = &frame->return_address;
12444
12445- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
12446+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
12447 unsigned long addr = *ret_addr;
12448
12449 if (!__kernel_text_address(addr))
12450@@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_info *tinfo,
12451 ops->address(data, addr, 1);
12452 frame = frame->next_frame;
12453 ret_addr = &frame->return_address;
12454- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12455+ print_ftrace_graph_addr(addr, data, ops, task, graph);
12456 }
12457
12458 return (unsigned long)frame;
12459@@ -186,7 +186,7 @@ void dump_stack(void)
12460
12461 bp = stack_frame(current, NULL);
12462 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
12463- current->pid, current->comm, print_tainted(),
12464+ task_pid_nr(current), current->comm, print_tainted(),
12465 init_utsname()->release,
12466 (int)strcspn(init_utsname()->version, " "),
12467 init_utsname()->version);
12468@@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
12469 }
12470 EXPORT_SYMBOL_GPL(oops_begin);
12471
12472+extern void gr_handle_kernel_exploit(void);
12473+
12474 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12475 {
12476 if (regs && kexec_should_crash(current))
12477@@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12478 panic("Fatal exception in interrupt");
12479 if (panic_on_oops)
12480 panic("Fatal exception");
12481- do_exit(signr);
12482+
12483+ gr_handle_kernel_exploit();
12484+
12485+ do_group_exit(signr);
12486 }
12487
12488 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12489@@ -269,7 +274,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12490
12491 show_registers(regs);
12492 #ifdef CONFIG_X86_32
12493- if (user_mode_vm(regs)) {
12494+ if (user_mode(regs)) {
12495 sp = regs->sp;
12496 ss = regs->ss & 0xffff;
12497 } else {
12498@@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs *regs, long err)
12499 unsigned long flags = oops_begin();
12500 int sig = SIGSEGV;
12501
12502- if (!user_mode_vm(regs))
12503+ if (!user_mode(regs))
12504 report_bug(regs->ip, regs);
12505
12506 if (__die(str, regs, err))
12507diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
12508index c99f9ed..2a15d80 100644
12509--- a/arch/x86/kernel/dumpstack_32.c
12510+++ b/arch/x86/kernel/dumpstack_32.c
12511@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12512 bp = stack_frame(task, regs);
12513
12514 for (;;) {
12515- struct thread_info *context;
12516+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12517
12518- context = (struct thread_info *)
12519- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
12520- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
12521+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12522
12523- stack = (unsigned long *)context->previous_esp;
12524- if (!stack)
12525+ if (stack_start == task_stack_page(task))
12526 break;
12527+ stack = *(unsigned long **)stack_start;
12528 if (ops->stack(data, "IRQ") < 0)
12529 break;
12530 touch_nmi_watchdog();
12531@@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs)
12532 * When in-kernel, we also print out the stack and code at the
12533 * time of the fault..
12534 */
12535- if (!user_mode_vm(regs)) {
12536+ if (!user_mode(regs)) {
12537 unsigned int code_prologue = code_bytes * 43 / 64;
12538 unsigned int code_len = code_bytes;
12539 unsigned char c;
12540 u8 *ip;
12541+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
12542
12543 printk(KERN_EMERG "Stack:\n");
12544 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
12545
12546 printk(KERN_EMERG "Code: ");
12547
12548- ip = (u8 *)regs->ip - code_prologue;
12549+ ip = (u8 *)regs->ip - code_prologue + cs_base;
12550 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
12551 /* try starting at IP */
12552- ip = (u8 *)regs->ip;
12553+ ip = (u8 *)regs->ip + cs_base;
12554 code_len = code_len - code_prologue + 1;
12555 }
12556 for (i = 0; i < code_len; i++, ip++) {
12557@@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs)
12558 printk(KERN_CONT " Bad EIP value.");
12559 break;
12560 }
12561- if (ip == (u8 *)regs->ip)
12562+ if (ip == (u8 *)regs->ip + cs_base)
12563 printk(KERN_CONT "<%02x> ", c);
12564 else
12565 printk(KERN_CONT "%02x ", c);
12566@@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
12567 {
12568 unsigned short ud2;
12569
12570+ ip = ktla_ktva(ip);
12571 if (ip < PAGE_OFFSET)
12572 return 0;
12573 if (probe_kernel_address((unsigned short *)ip, ud2))
12574@@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
12575
12576 return ud2 == 0x0b0f;
12577 }
12578+
12579+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12580+void pax_check_alloca(unsigned long size)
12581+{
12582+ unsigned long sp = (unsigned long)&sp, stack_left;
12583+
12584+ /* all kernel stacks are of the same size */
12585+ stack_left = sp & (THREAD_SIZE - 1);
12586+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
12587+}
12588+EXPORT_SYMBOL(pax_check_alloca);
12589+#endif
12590diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
12591index 6d728d9..279514e 100644
12592--- a/arch/x86/kernel/dumpstack_64.c
12593+++ b/arch/x86/kernel/dumpstack_64.c
12594@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12595 unsigned long *irq_stack_end =
12596 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
12597 unsigned used = 0;
12598- struct thread_info *tinfo;
12599 int graph = 0;
12600 unsigned long dummy;
12601+ void *stack_start;
12602
12603 if (!task)
12604 task = current;
12605@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12606 * current stack address. If the stacks consist of nested
12607 * exceptions
12608 */
12609- tinfo = task_thread_info(task);
12610 for (;;) {
12611 char *id;
12612 unsigned long *estack_end;
12613+
12614 estack_end = in_exception_stack(cpu, (unsigned long)stack,
12615 &used, &id);
12616
12617@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12618 if (ops->stack(data, id) < 0)
12619 break;
12620
12621- bp = ops->walk_stack(tinfo, stack, bp, ops,
12622+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
12623 data, estack_end, &graph);
12624 ops->stack(data, "<EOE>");
12625 /*
12626@@ -172,7 +172,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12627 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
12628 if (ops->stack(data, "IRQ") < 0)
12629 break;
12630- bp = ops->walk_stack(tinfo, stack, bp,
12631+ bp = ops->walk_stack(task, irq_stack, stack, bp,
12632 ops, data, irq_stack_end, &graph);
12633 /*
12634 * We link to the next stack (which would be
12635@@ -191,7 +191,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12636 /*
12637 * This handles the process stack:
12638 */
12639- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
12640+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12641+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12642 put_cpu();
12643 }
12644 EXPORT_SYMBOL(dump_trace);
12645@@ -305,3 +306,50 @@ int is_valid_bugaddr(unsigned long ip)
12646
12647 return ud2 == 0x0b0f;
12648 }
12649+
12650+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12651+void pax_check_alloca(unsigned long size)
12652+{
12653+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
12654+ unsigned cpu, used;
12655+ char *id;
12656+
12657+ /* check the process stack first */
12658+ stack_start = (unsigned long)task_stack_page(current);
12659+ stack_end = stack_start + THREAD_SIZE;
12660+ if (likely(stack_start <= sp && sp < stack_end)) {
12661+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
12662+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
12663+ return;
12664+ }
12665+
12666+ cpu = get_cpu();
12667+
12668+ /* check the irq stacks */
12669+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
12670+ stack_start = stack_end - IRQ_STACK_SIZE;
12671+ if (stack_start <= sp && sp < stack_end) {
12672+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
12673+ put_cpu();
12674+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
12675+ return;
12676+ }
12677+
12678+ /* check the exception stacks */
12679+ used = 0;
12680+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
12681+ stack_start = stack_end - EXCEPTION_STKSZ;
12682+ if (stack_end && stack_start <= sp && sp < stack_end) {
12683+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
12684+ put_cpu();
12685+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
12686+ return;
12687+ }
12688+
12689+ put_cpu();
12690+
12691+ /* unknown stack */
12692+ BUG();
12693+}
12694+EXPORT_SYMBOL(pax_check_alloca);
12695+#endif
12696diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
12697index cd28a35..c72ed9a 100644
12698--- a/arch/x86/kernel/early_printk.c
12699+++ b/arch/x86/kernel/early_printk.c
12700@@ -7,6 +7,7 @@
12701 #include <linux/pci_regs.h>
12702 #include <linux/pci_ids.h>
12703 #include <linux/errno.h>
12704+#include <linux/sched.h>
12705 #include <asm/io.h>
12706 #include <asm/processor.h>
12707 #include <asm/fcntl.h>
12708diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
12709index f3f6f53..0841b66 100644
12710--- a/arch/x86/kernel/entry_32.S
12711+++ b/arch/x86/kernel/entry_32.S
12712@@ -186,13 +186,146 @@
12713 /*CFI_REL_OFFSET gs, PT_GS*/
12714 .endm
12715 .macro SET_KERNEL_GS reg
12716+
12717+#ifdef CONFIG_CC_STACKPROTECTOR
12718 movl $(__KERNEL_STACK_CANARY), \reg
12719+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12720+ movl $(__USER_DS), \reg
12721+#else
12722+ xorl \reg, \reg
12723+#endif
12724+
12725 movl \reg, %gs
12726 .endm
12727
12728 #endif /* CONFIG_X86_32_LAZY_GS */
12729
12730-.macro SAVE_ALL
12731+.macro pax_enter_kernel
12732+#ifdef CONFIG_PAX_KERNEXEC
12733+ call pax_enter_kernel
12734+#endif
12735+.endm
12736+
12737+.macro pax_exit_kernel
12738+#ifdef CONFIG_PAX_KERNEXEC
12739+ call pax_exit_kernel
12740+#endif
12741+.endm
12742+
12743+#ifdef CONFIG_PAX_KERNEXEC
12744+ENTRY(pax_enter_kernel)
12745+#ifdef CONFIG_PARAVIRT
12746+ pushl %eax
12747+ pushl %ecx
12748+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
12749+ mov %eax, %esi
12750+#else
12751+ mov %cr0, %esi
12752+#endif
12753+ bts $16, %esi
12754+ jnc 1f
12755+ mov %cs, %esi
12756+ cmp $__KERNEL_CS, %esi
12757+ jz 3f
12758+ ljmp $__KERNEL_CS, $3f
12759+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
12760+2:
12761+#ifdef CONFIG_PARAVIRT
12762+ mov %esi, %eax
12763+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
12764+#else
12765+ mov %esi, %cr0
12766+#endif
12767+3:
12768+#ifdef CONFIG_PARAVIRT
12769+ popl %ecx
12770+ popl %eax
12771+#endif
12772+ ret
12773+ENDPROC(pax_enter_kernel)
12774+
12775+ENTRY(pax_exit_kernel)
12776+#ifdef CONFIG_PARAVIRT
12777+ pushl %eax
12778+ pushl %ecx
12779+#endif
12780+ mov %cs, %esi
12781+ cmp $__KERNEXEC_KERNEL_CS, %esi
12782+ jnz 2f
12783+#ifdef CONFIG_PARAVIRT
12784+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
12785+ mov %eax, %esi
12786+#else
12787+ mov %cr0, %esi
12788+#endif
12789+ btr $16, %esi
12790+ ljmp $__KERNEL_CS, $1f
12791+1:
12792+#ifdef CONFIG_PARAVIRT
12793+ mov %esi, %eax
12794+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
12795+#else
12796+ mov %esi, %cr0
12797+#endif
12798+2:
12799+#ifdef CONFIG_PARAVIRT
12800+ popl %ecx
12801+ popl %eax
12802+#endif
12803+ ret
12804+ENDPROC(pax_exit_kernel)
12805+#endif
12806+
12807+.macro pax_erase_kstack
12808+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12809+ call pax_erase_kstack
12810+#endif
12811+.endm
12812+
12813+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12814+/*
12815+ * ebp: thread_info
12816+ * ecx, edx: can be clobbered
12817+ */
12818+ENTRY(pax_erase_kstack)
12819+ pushl %edi
12820+ pushl %eax
12821+
12822+ mov TI_lowest_stack(%ebp), %edi
12823+ mov $-0xBEEF, %eax
12824+ std
12825+
12826+1: mov %edi, %ecx
12827+ and $THREAD_SIZE_asm - 1, %ecx
12828+ shr $2, %ecx
12829+ repne scasl
12830+ jecxz 2f
12831+
12832+ cmp $2*16, %ecx
12833+ jc 2f
12834+
12835+ mov $2*16, %ecx
12836+ repe scasl
12837+ jecxz 2f
12838+ jne 1b
12839+
12840+2: cld
12841+ mov %esp, %ecx
12842+ sub %edi, %ecx
12843+ shr $2, %ecx
12844+ rep stosl
12845+
12846+ mov TI_task_thread_sp0(%ebp), %edi
12847+ sub $128, %edi
12848+ mov %edi, TI_lowest_stack(%ebp)
12849+
12850+ popl %eax
12851+ popl %edi
12852+ ret
12853+ENDPROC(pax_erase_kstack)
12854+#endif
12855+
12856+.macro __SAVE_ALL _DS
12857 cld
12858 PUSH_GS
12859 pushl_cfi %fs
12860@@ -215,7 +348,7 @@
12861 CFI_REL_OFFSET ecx, 0
12862 pushl_cfi %ebx
12863 CFI_REL_OFFSET ebx, 0
12864- movl $(__USER_DS), %edx
12865+ movl $\_DS, %edx
12866 movl %edx, %ds
12867 movl %edx, %es
12868 movl $(__KERNEL_PERCPU), %edx
12869@@ -223,6 +356,15 @@
12870 SET_KERNEL_GS %edx
12871 .endm
12872
12873+.macro SAVE_ALL
12874+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
12875+ __SAVE_ALL __KERNEL_DS
12876+ pax_enter_kernel
12877+#else
12878+ __SAVE_ALL __USER_DS
12879+#endif
12880+.endm
12881+
12882 .macro RESTORE_INT_REGS
12883 popl_cfi %ebx
12884 CFI_RESTORE ebx
12885@@ -308,7 +450,7 @@ ENTRY(ret_from_fork)
12886 popfl_cfi
12887 jmp syscall_exit
12888 CFI_ENDPROC
12889-END(ret_from_fork)
12890+ENDPROC(ret_from_fork)
12891
12892 /*
12893 * Interrupt exit functions should be protected against kprobes
12894@@ -333,7 +475,15 @@ check_userspace:
12895 movb PT_CS(%esp), %al
12896 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
12897 cmpl $USER_RPL, %eax
12898+
12899+#ifdef CONFIG_PAX_KERNEXEC
12900+ jae resume_userspace
12901+
12902+ PAX_EXIT_KERNEL
12903+ jmp resume_kernel
12904+#else
12905 jb resume_kernel # not returning to v8086 or userspace
12906+#endif
12907
12908 ENTRY(resume_userspace)
12909 LOCKDEP_SYS_EXIT
12910@@ -345,8 +495,8 @@ ENTRY(resume_userspace)
12911 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
12912 # int/exception return?
12913 jne work_pending
12914- jmp restore_all
12915-END(ret_from_exception)
12916+ jmp restore_all_pax
12917+ENDPROC(ret_from_exception)
12918
12919 #ifdef CONFIG_PREEMPT
12920 ENTRY(resume_kernel)
12921@@ -361,7 +511,7 @@ need_resched:
12922 jz restore_all
12923 call preempt_schedule_irq
12924 jmp need_resched
12925-END(resume_kernel)
12926+ENDPROC(resume_kernel)
12927 #endif
12928 CFI_ENDPROC
12929 /*
12930@@ -395,23 +545,34 @@ sysenter_past_esp:
12931 /*CFI_REL_OFFSET cs, 0*/
12932 /*
12933 * Push current_thread_info()->sysenter_return to the stack.
12934- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
12935- * pushed above; +8 corresponds to copy_thread's esp0 setting.
12936 */
12937- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
12938+ pushl_cfi $0
12939 CFI_REL_OFFSET eip, 0
12940
12941 pushl_cfi %eax
12942 SAVE_ALL
12943+ GET_THREAD_INFO(%ebp)
12944+ movl TI_sysenter_return(%ebp),%ebp
12945+ movl %ebp,PT_EIP(%esp)
12946 ENABLE_INTERRUPTS(CLBR_NONE)
12947
12948 /*
12949 * Load the potential sixth argument from user stack.
12950 * Careful about security.
12951 */
12952+ movl PT_OLDESP(%esp),%ebp
12953+
12954+#ifdef CONFIG_PAX_MEMORY_UDEREF
12955+ mov PT_OLDSS(%esp),%ds
12956+1: movl %ds:(%ebp),%ebp
12957+ push %ss
12958+ pop %ds
12959+#else
12960 cmpl $__PAGE_OFFSET-3,%ebp
12961 jae syscall_fault
12962 1: movl (%ebp),%ebp
12963+#endif
12964+
12965 movl %ebp,PT_EBP(%esp)
12966 .section __ex_table,"a"
12967 .align 4
12968@@ -434,12 +595,24 @@ sysenter_do_call:
12969 testl $_TIF_ALLWORK_MASK, %ecx
12970 jne sysexit_audit
12971 sysenter_exit:
12972+
12973+#ifdef CONFIG_PAX_RANDKSTACK
12974+ pushl_cfi %eax
12975+ movl %esp, %eax
12976+ call pax_randomize_kstack
12977+ popl_cfi %eax
12978+#endif
12979+
12980+ pax_erase_kstack
12981+
12982 /* if something modifies registers it must also disable sysexit */
12983 movl PT_EIP(%esp), %edx
12984 movl PT_OLDESP(%esp), %ecx
12985 xorl %ebp,%ebp
12986 TRACE_IRQS_ON
12987 1: mov PT_FS(%esp), %fs
12988+2: mov PT_DS(%esp), %ds
12989+3: mov PT_ES(%esp), %es
12990 PTGS_TO_GS
12991 ENABLE_INTERRUPTS_SYSEXIT
12992
12993@@ -456,6 +629,9 @@ sysenter_audit:
12994 movl %eax,%edx /* 2nd arg: syscall number */
12995 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
12996 call audit_syscall_entry
12997+
12998+ pax_erase_kstack
12999+
13000 pushl_cfi %ebx
13001 movl PT_EAX(%esp),%eax /* reload syscall number */
13002 jmp sysenter_do_call
13003@@ -482,11 +658,17 @@ sysexit_audit:
13004
13005 CFI_ENDPROC
13006 .pushsection .fixup,"ax"
13007-2: movl $0,PT_FS(%esp)
13008+4: movl $0,PT_FS(%esp)
13009+ jmp 1b
13010+5: movl $0,PT_DS(%esp)
13011+ jmp 1b
13012+6: movl $0,PT_ES(%esp)
13013 jmp 1b
13014 .section __ex_table,"a"
13015 .align 4
13016- .long 1b,2b
13017+ .long 1b,4b
13018+ .long 2b,5b
13019+ .long 3b,6b
13020 .popsection
13021 PTGS_TO_GS_EX
13022 ENDPROC(ia32_sysenter_target)
13023@@ -519,6 +701,15 @@ syscall_exit:
13024 testl $_TIF_ALLWORK_MASK, %ecx # current->work
13025 jne syscall_exit_work
13026
13027+restore_all_pax:
13028+
13029+#ifdef CONFIG_PAX_RANDKSTACK
13030+ movl %esp, %eax
13031+ call pax_randomize_kstack
13032+#endif
13033+
13034+ pax_erase_kstack
13035+
13036 restore_all:
13037 TRACE_IRQS_IRET
13038 restore_all_notrace:
13039@@ -578,14 +769,34 @@ ldt_ss:
13040 * compensating for the offset by changing to the ESPFIX segment with
13041 * a base address that matches for the difference.
13042 */
13043-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
13044+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
13045 mov %esp, %edx /* load kernel esp */
13046 mov PT_OLDESP(%esp), %eax /* load userspace esp */
13047 mov %dx, %ax /* eax: new kernel esp */
13048 sub %eax, %edx /* offset (low word is 0) */
13049+#ifdef CONFIG_SMP
13050+ movl PER_CPU_VAR(cpu_number), %ebx
13051+ shll $PAGE_SHIFT_asm, %ebx
13052+ addl $cpu_gdt_table, %ebx
13053+#else
13054+ movl $cpu_gdt_table, %ebx
13055+#endif
13056 shr $16, %edx
13057- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
13058- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
13059+
13060+#ifdef CONFIG_PAX_KERNEXEC
13061+ mov %cr0, %esi
13062+ btr $16, %esi
13063+ mov %esi, %cr0
13064+#endif
13065+
13066+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
13067+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
13068+
13069+#ifdef CONFIG_PAX_KERNEXEC
13070+ bts $16, %esi
13071+ mov %esi, %cr0
13072+#endif
13073+
13074 pushl_cfi $__ESPFIX_SS
13075 pushl_cfi %eax /* new kernel esp */
13076 /* Disable interrupts, but do not irqtrace this section: we
13077@@ -614,34 +825,28 @@ work_resched:
13078 movl TI_flags(%ebp), %ecx
13079 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
13080 # than syscall tracing?
13081- jz restore_all
13082+ jz restore_all_pax
13083 testb $_TIF_NEED_RESCHED, %cl
13084 jnz work_resched
13085
13086 work_notifysig: # deal with pending signals and
13087 # notify-resume requests
13088+ movl %esp, %eax
13089 #ifdef CONFIG_VM86
13090 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
13091- movl %esp, %eax
13092- jne work_notifysig_v86 # returning to kernel-space or
13093+ jz 1f # returning to kernel-space or
13094 # vm86-space
13095- xorl %edx, %edx
13096- call do_notify_resume
13097- jmp resume_userspace_sig
13098
13099- ALIGN
13100-work_notifysig_v86:
13101 pushl_cfi %ecx # save ti_flags for do_notify_resume
13102 call save_v86_state # %eax contains pt_regs pointer
13103 popl_cfi %ecx
13104 movl %eax, %esp
13105-#else
13106- movl %esp, %eax
13107+1:
13108 #endif
13109 xorl %edx, %edx
13110 call do_notify_resume
13111 jmp resume_userspace_sig
13112-END(work_pending)
13113+ENDPROC(work_pending)
13114
13115 # perform syscall exit tracing
13116 ALIGN
13117@@ -649,11 +854,14 @@ syscall_trace_entry:
13118 movl $-ENOSYS,PT_EAX(%esp)
13119 movl %esp, %eax
13120 call syscall_trace_enter
13121+
13122+ pax_erase_kstack
13123+
13124 /* What it returned is what we'll actually use. */
13125 cmpl $(nr_syscalls), %eax
13126 jnae syscall_call
13127 jmp syscall_exit
13128-END(syscall_trace_entry)
13129+ENDPROC(syscall_trace_entry)
13130
13131 # perform syscall exit tracing
13132 ALIGN
13133@@ -666,20 +874,24 @@ syscall_exit_work:
13134 movl %esp, %eax
13135 call syscall_trace_leave
13136 jmp resume_userspace
13137-END(syscall_exit_work)
13138+ENDPROC(syscall_exit_work)
13139 CFI_ENDPROC
13140
13141 RING0_INT_FRAME # can't unwind into user space anyway
13142 syscall_fault:
13143+#ifdef CONFIG_PAX_MEMORY_UDEREF
13144+ push %ss
13145+ pop %ds
13146+#endif
13147 GET_THREAD_INFO(%ebp)
13148 movl $-EFAULT,PT_EAX(%esp)
13149 jmp resume_userspace
13150-END(syscall_fault)
13151+ENDPROC(syscall_fault)
13152
13153 syscall_badsys:
13154 movl $-ENOSYS,PT_EAX(%esp)
13155 jmp resume_userspace
13156-END(syscall_badsys)
13157+ENDPROC(syscall_badsys)
13158 CFI_ENDPROC
13159 /*
13160 * End of kprobes section
13161@@ -753,6 +965,36 @@ ptregs_clone:
13162 CFI_ENDPROC
13163 ENDPROC(ptregs_clone)
13164
13165+ ALIGN;
13166+ENTRY(kernel_execve)
13167+ CFI_STARTPROC
13168+ pushl_cfi %ebp
13169+ sub $PT_OLDSS+4,%esp
13170+ pushl_cfi %edi
13171+ pushl_cfi %ecx
13172+ pushl_cfi %eax
13173+ lea 3*4(%esp),%edi
13174+ mov $PT_OLDSS/4+1,%ecx
13175+ xorl %eax,%eax
13176+ rep stosl
13177+ popl_cfi %eax
13178+ popl_cfi %ecx
13179+ popl_cfi %edi
13180+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
13181+ pushl_cfi %esp
13182+ call sys_execve
13183+ add $4,%esp
13184+ CFI_ADJUST_CFA_OFFSET -4
13185+ GET_THREAD_INFO(%ebp)
13186+ test %eax,%eax
13187+ jz syscall_exit
13188+ add $PT_OLDSS+4,%esp
13189+ CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
13190+ popl_cfi %ebp
13191+ ret
13192+ CFI_ENDPROC
13193+ENDPROC(kernel_execve)
13194+
13195 .macro FIXUP_ESPFIX_STACK
13196 /*
13197 * Switch back for ESPFIX stack to the normal zerobased stack
13198@@ -762,8 +1004,15 @@ ENDPROC(ptregs_clone)
13199 * normal stack and adjusts ESP with the matching offset.
13200 */
13201 /* fixup the stack */
13202- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
13203- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
13204+#ifdef CONFIG_SMP
13205+ movl PER_CPU_VAR(cpu_number), %ebx
13206+ shll $PAGE_SHIFT_asm, %ebx
13207+ addl $cpu_gdt_table, %ebx
13208+#else
13209+ movl $cpu_gdt_table, %ebx
13210+#endif
13211+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
13212+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
13213 shl $16, %eax
13214 addl %esp, %eax /* the adjusted stack pointer */
13215 pushl_cfi $__KERNEL_DS
13216@@ -816,7 +1065,7 @@ vector=vector+1
13217 .endr
13218 2: jmp common_interrupt
13219 .endr
13220-END(irq_entries_start)
13221+ENDPROC(irq_entries_start)
13222
13223 .previous
13224 END(interrupt)
13225@@ -864,7 +1113,7 @@ ENTRY(coprocessor_error)
13226 pushl_cfi $do_coprocessor_error
13227 jmp error_code
13228 CFI_ENDPROC
13229-END(coprocessor_error)
13230+ENDPROC(coprocessor_error)
13231
13232 ENTRY(simd_coprocessor_error)
13233 RING0_INT_FRAME
13234@@ -885,7 +1134,7 @@ ENTRY(simd_coprocessor_error)
13235 #endif
13236 jmp error_code
13237 CFI_ENDPROC
13238-END(simd_coprocessor_error)
13239+ENDPROC(simd_coprocessor_error)
13240
13241 ENTRY(device_not_available)
13242 RING0_INT_FRAME
13243@@ -893,7 +1142,7 @@ ENTRY(device_not_available)
13244 pushl_cfi $do_device_not_available
13245 jmp error_code
13246 CFI_ENDPROC
13247-END(device_not_available)
13248+ENDPROC(device_not_available)
13249
13250 #ifdef CONFIG_PARAVIRT
13251 ENTRY(native_iret)
13252@@ -902,12 +1151,12 @@ ENTRY(native_iret)
13253 .align 4
13254 .long native_iret, iret_exc
13255 .previous
13256-END(native_iret)
13257+ENDPROC(native_iret)
13258
13259 ENTRY(native_irq_enable_sysexit)
13260 sti
13261 sysexit
13262-END(native_irq_enable_sysexit)
13263+ENDPROC(native_irq_enable_sysexit)
13264 #endif
13265
13266 ENTRY(overflow)
13267@@ -916,7 +1165,7 @@ ENTRY(overflow)
13268 pushl_cfi $do_overflow
13269 jmp error_code
13270 CFI_ENDPROC
13271-END(overflow)
13272+ENDPROC(overflow)
13273
13274 ENTRY(bounds)
13275 RING0_INT_FRAME
13276@@ -924,7 +1173,7 @@ ENTRY(bounds)
13277 pushl_cfi $do_bounds
13278 jmp error_code
13279 CFI_ENDPROC
13280-END(bounds)
13281+ENDPROC(bounds)
13282
13283 ENTRY(invalid_op)
13284 RING0_INT_FRAME
13285@@ -932,7 +1181,7 @@ ENTRY(invalid_op)
13286 pushl_cfi $do_invalid_op
13287 jmp error_code
13288 CFI_ENDPROC
13289-END(invalid_op)
13290+ENDPROC(invalid_op)
13291
13292 ENTRY(coprocessor_segment_overrun)
13293 RING0_INT_FRAME
13294@@ -940,35 +1189,35 @@ ENTRY(coprocessor_segment_overrun)
13295 pushl_cfi $do_coprocessor_segment_overrun
13296 jmp error_code
13297 CFI_ENDPROC
13298-END(coprocessor_segment_overrun)
13299+ENDPROC(coprocessor_segment_overrun)
13300
13301 ENTRY(invalid_TSS)
13302 RING0_EC_FRAME
13303 pushl_cfi $do_invalid_TSS
13304 jmp error_code
13305 CFI_ENDPROC
13306-END(invalid_TSS)
13307+ENDPROC(invalid_TSS)
13308
13309 ENTRY(segment_not_present)
13310 RING0_EC_FRAME
13311 pushl_cfi $do_segment_not_present
13312 jmp error_code
13313 CFI_ENDPROC
13314-END(segment_not_present)
13315+ENDPROC(segment_not_present)
13316
13317 ENTRY(stack_segment)
13318 RING0_EC_FRAME
13319 pushl_cfi $do_stack_segment
13320 jmp error_code
13321 CFI_ENDPROC
13322-END(stack_segment)
13323+ENDPROC(stack_segment)
13324
13325 ENTRY(alignment_check)
13326 RING0_EC_FRAME
13327 pushl_cfi $do_alignment_check
13328 jmp error_code
13329 CFI_ENDPROC
13330-END(alignment_check)
13331+ENDPROC(alignment_check)
13332
13333 ENTRY(divide_error)
13334 RING0_INT_FRAME
13335@@ -976,7 +1225,7 @@ ENTRY(divide_error)
13336 pushl_cfi $do_divide_error
13337 jmp error_code
13338 CFI_ENDPROC
13339-END(divide_error)
13340+ENDPROC(divide_error)
13341
13342 #ifdef CONFIG_X86_MCE
13343 ENTRY(machine_check)
13344@@ -985,7 +1234,7 @@ ENTRY(machine_check)
13345 pushl_cfi machine_check_vector
13346 jmp error_code
13347 CFI_ENDPROC
13348-END(machine_check)
13349+ENDPROC(machine_check)
13350 #endif
13351
13352 ENTRY(spurious_interrupt_bug)
13353@@ -994,7 +1243,7 @@ ENTRY(spurious_interrupt_bug)
13354 pushl_cfi $do_spurious_interrupt_bug
13355 jmp error_code
13356 CFI_ENDPROC
13357-END(spurious_interrupt_bug)
13358+ENDPROC(spurious_interrupt_bug)
13359 /*
13360 * End of kprobes section
13361 */
13362@@ -1109,7 +1358,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
13363
13364 ENTRY(mcount)
13365 ret
13366-END(mcount)
13367+ENDPROC(mcount)
13368
13369 ENTRY(ftrace_caller)
13370 cmpl $0, function_trace_stop
13371@@ -1138,7 +1387,7 @@ ftrace_graph_call:
13372 .globl ftrace_stub
13373 ftrace_stub:
13374 ret
13375-END(ftrace_caller)
13376+ENDPROC(ftrace_caller)
13377
13378 #else /* ! CONFIG_DYNAMIC_FTRACE */
13379
13380@@ -1174,7 +1423,7 @@ trace:
13381 popl %ecx
13382 popl %eax
13383 jmp ftrace_stub
13384-END(mcount)
13385+ENDPROC(mcount)
13386 #endif /* CONFIG_DYNAMIC_FTRACE */
13387 #endif /* CONFIG_FUNCTION_TRACER */
13388
13389@@ -1195,7 +1444,7 @@ ENTRY(ftrace_graph_caller)
13390 popl %ecx
13391 popl %eax
13392 ret
13393-END(ftrace_graph_caller)
13394+ENDPROC(ftrace_graph_caller)
13395
13396 .globl return_to_handler
13397 return_to_handler:
13398@@ -1209,7 +1458,6 @@ return_to_handler:
13399 jmp *%ecx
13400 #endif
13401
13402-.section .rodata,"a"
13403 #include "syscall_table_32.S"
13404
13405 syscall_table_size=(.-sys_call_table)
13406@@ -1255,15 +1503,18 @@ error_code:
13407 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
13408 REG_TO_PTGS %ecx
13409 SET_KERNEL_GS %ecx
13410- movl $(__USER_DS), %ecx
13411+ movl $(__KERNEL_DS), %ecx
13412 movl %ecx, %ds
13413 movl %ecx, %es
13414+
13415+ pax_enter_kernel
13416+
13417 TRACE_IRQS_OFF
13418 movl %esp,%eax # pt_regs pointer
13419 call *%edi
13420 jmp ret_from_exception
13421 CFI_ENDPROC
13422-END(page_fault)
13423+ENDPROC(page_fault)
13424
13425 /*
13426 * Debug traps and NMI can happen at the one SYSENTER instruction
13427@@ -1305,7 +1556,7 @@ debug_stack_correct:
13428 call do_debug
13429 jmp ret_from_exception
13430 CFI_ENDPROC
13431-END(debug)
13432+ENDPROC(debug)
13433
13434 /*
13435 * NMI is doubly nasty. It can happen _while_ we're handling
13436@@ -1342,6 +1593,9 @@ nmi_stack_correct:
13437 xorl %edx,%edx # zero error code
13438 movl %esp,%eax # pt_regs pointer
13439 call do_nmi
13440+
13441+ pax_exit_kernel
13442+
13443 jmp restore_all_notrace
13444 CFI_ENDPROC
13445
13446@@ -1378,12 +1632,15 @@ nmi_espfix_stack:
13447 FIXUP_ESPFIX_STACK # %eax == %esp
13448 xorl %edx,%edx # zero error code
13449 call do_nmi
13450+
13451+ pax_exit_kernel
13452+
13453 RESTORE_REGS
13454 lss 12+4(%esp), %esp # back to espfix stack
13455 CFI_ADJUST_CFA_OFFSET -24
13456 jmp irq_return
13457 CFI_ENDPROC
13458-END(nmi)
13459+ENDPROC(nmi)
13460
13461 ENTRY(int3)
13462 RING0_INT_FRAME
13463@@ -1395,14 +1652,14 @@ ENTRY(int3)
13464 call do_int3
13465 jmp ret_from_exception
13466 CFI_ENDPROC
13467-END(int3)
13468+ENDPROC(int3)
13469
13470 ENTRY(general_protection)
13471 RING0_EC_FRAME
13472 pushl_cfi $do_general_protection
13473 jmp error_code
13474 CFI_ENDPROC
13475-END(general_protection)
13476+ENDPROC(general_protection)
13477
13478 #ifdef CONFIG_KVM_GUEST
13479 ENTRY(async_page_fault)
13480@@ -1410,7 +1667,7 @@ ENTRY(async_page_fault)
13481 pushl_cfi $do_async_page_fault
13482 jmp error_code
13483 CFI_ENDPROC
13484-END(async_page_fault)
13485+ENDPROC(async_page_fault)
13486 #endif
13487
13488 /*
13489diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
13490index faf8d5e..f58c441 100644
13491--- a/arch/x86/kernel/entry_64.S
13492+++ b/arch/x86/kernel/entry_64.S
13493@@ -55,6 +55,8 @@
13494 #include <asm/paravirt.h>
13495 #include <asm/ftrace.h>
13496 #include <asm/percpu.h>
13497+#include <asm/pgtable.h>
13498+#include <asm/alternative-asm.h>
13499
13500 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
13501 #include <linux/elf-em.h>
13502@@ -68,8 +70,9 @@
13503 #ifdef CONFIG_FUNCTION_TRACER
13504 #ifdef CONFIG_DYNAMIC_FTRACE
13505 ENTRY(mcount)
13506+ pax_force_retaddr
13507 retq
13508-END(mcount)
13509+ENDPROC(mcount)
13510
13511 ENTRY(ftrace_caller)
13512 cmpl $0, function_trace_stop
13513@@ -92,8 +95,9 @@ GLOBAL(ftrace_graph_call)
13514 #endif
13515
13516 GLOBAL(ftrace_stub)
13517+ pax_force_retaddr
13518 retq
13519-END(ftrace_caller)
13520+ENDPROC(ftrace_caller)
13521
13522 #else /* ! CONFIG_DYNAMIC_FTRACE */
13523 ENTRY(mcount)
13524@@ -112,6 +116,7 @@ ENTRY(mcount)
13525 #endif
13526
13527 GLOBAL(ftrace_stub)
13528+ pax_force_retaddr
13529 retq
13530
13531 trace:
13532@@ -121,12 +126,13 @@ trace:
13533 movq 8(%rbp), %rsi
13534 subq $MCOUNT_INSN_SIZE, %rdi
13535
13536+ pax_force_fptr ftrace_trace_function
13537 call *ftrace_trace_function
13538
13539 MCOUNT_RESTORE_FRAME
13540
13541 jmp ftrace_stub
13542-END(mcount)
13543+ENDPROC(mcount)
13544 #endif /* CONFIG_DYNAMIC_FTRACE */
13545 #endif /* CONFIG_FUNCTION_TRACER */
13546
13547@@ -146,8 +152,9 @@ ENTRY(ftrace_graph_caller)
13548
13549 MCOUNT_RESTORE_FRAME
13550
13551+ pax_force_retaddr
13552 retq
13553-END(ftrace_graph_caller)
13554+ENDPROC(ftrace_graph_caller)
13555
13556 GLOBAL(return_to_handler)
13557 subq $24, %rsp
13558@@ -163,6 +170,7 @@ GLOBAL(return_to_handler)
13559 movq 8(%rsp), %rdx
13560 movq (%rsp), %rax
13561 addq $24, %rsp
13562+ pax_force_fptr %rdi
13563 jmp *%rdi
13564 #endif
13565
13566@@ -178,6 +186,282 @@ ENTRY(native_usergs_sysret64)
13567 ENDPROC(native_usergs_sysret64)
13568 #endif /* CONFIG_PARAVIRT */
13569
13570+ .macro ljmpq sel, off
13571+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
13572+ .byte 0x48; ljmp *1234f(%rip)
13573+ .pushsection .rodata
13574+ .align 16
13575+ 1234: .quad \off; .word \sel
13576+ .popsection
13577+#else
13578+ pushq $\sel
13579+ pushq $\off
13580+ lretq
13581+#endif
13582+ .endm
13583+
13584+ .macro pax_enter_kernel
13585+ pax_set_fptr_mask
13586+#ifdef CONFIG_PAX_KERNEXEC
13587+ call pax_enter_kernel
13588+#endif
13589+ .endm
13590+
13591+ .macro pax_exit_kernel
13592+#ifdef CONFIG_PAX_KERNEXEC
13593+ call pax_exit_kernel
13594+#endif
13595+ .endm
13596+
13597+#ifdef CONFIG_PAX_KERNEXEC
13598+ENTRY(pax_enter_kernel)
13599+ pushq %rdi
13600+
13601+#ifdef CONFIG_PARAVIRT
13602+ PV_SAVE_REGS(CLBR_RDI)
13603+#endif
13604+
13605+ GET_CR0_INTO_RDI
13606+ bts $16,%rdi
13607+ jnc 3f
13608+ mov %cs,%edi
13609+ cmp $__KERNEL_CS,%edi
13610+ jnz 2f
13611+1:
13612+
13613+#ifdef CONFIG_PARAVIRT
13614+ PV_RESTORE_REGS(CLBR_RDI)
13615+#endif
13616+
13617+ popq %rdi
13618+ pax_force_retaddr
13619+ retq
13620+
13621+2: ljmpq __KERNEL_CS,1f
13622+3: ljmpq __KERNEXEC_KERNEL_CS,4f
13623+4: SET_RDI_INTO_CR0
13624+ jmp 1b
13625+ENDPROC(pax_enter_kernel)
13626+
13627+ENTRY(pax_exit_kernel)
13628+ pushq %rdi
13629+
13630+#ifdef CONFIG_PARAVIRT
13631+ PV_SAVE_REGS(CLBR_RDI)
13632+#endif
13633+
13634+ mov %cs,%rdi
13635+ cmp $__KERNEXEC_KERNEL_CS,%edi
13636+ jz 2f
13637+1:
13638+
13639+#ifdef CONFIG_PARAVIRT
13640+ PV_RESTORE_REGS(CLBR_RDI);
13641+#endif
13642+
13643+ popq %rdi
13644+ pax_force_retaddr
13645+ retq
13646+
13647+2: GET_CR0_INTO_RDI
13648+ btr $16,%rdi
13649+ ljmpq __KERNEL_CS,3f
13650+3: SET_RDI_INTO_CR0
13651+ jmp 1b
13652+#ifdef CONFIG_PARAVIRT
13653+ PV_RESTORE_REGS(CLBR_RDI);
13654+#endif
13655+
13656+ popq %rdi
13657+ pax_force_retaddr
13658+ retq
13659+ENDPROC(pax_exit_kernel)
13660+#endif
13661+
13662+ .macro pax_enter_kernel_user
13663+ pax_set_fptr_mask
13664+#ifdef CONFIG_PAX_MEMORY_UDEREF
13665+ call pax_enter_kernel_user
13666+#endif
13667+ .endm
13668+
13669+ .macro pax_exit_kernel_user
13670+#ifdef CONFIG_PAX_MEMORY_UDEREF
13671+ call pax_exit_kernel_user
13672+#endif
13673+#ifdef CONFIG_PAX_RANDKSTACK
13674+ pushq %rax
13675+ call pax_randomize_kstack
13676+ popq %rax
13677+#endif
13678+ .endm
13679+
13680+#ifdef CONFIG_PAX_MEMORY_UDEREF
13681+ENTRY(pax_enter_kernel_user)
13682+ pushq %rdi
13683+ pushq %rbx
13684+
13685+#ifdef CONFIG_PARAVIRT
13686+ PV_SAVE_REGS(CLBR_RDI)
13687+#endif
13688+
13689+ GET_CR3_INTO_RDI
13690+ mov %rdi,%rbx
13691+ add $__START_KERNEL_map,%rbx
13692+ sub phys_base(%rip),%rbx
13693+
13694+#ifdef CONFIG_PARAVIRT
13695+ pushq %rdi
13696+ cmpl $0, pv_info+PARAVIRT_enabled
13697+ jz 1f
13698+ i = 0
13699+ .rept USER_PGD_PTRS
13700+ mov i*8(%rbx),%rsi
13701+ mov $0,%sil
13702+ lea i*8(%rbx),%rdi
13703+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
13704+ i = i + 1
13705+ .endr
13706+ jmp 2f
13707+1:
13708+#endif
13709+
13710+ i = 0
13711+ .rept USER_PGD_PTRS
13712+ movb $0,i*8(%rbx)
13713+ i = i + 1
13714+ .endr
13715+
13716+#ifdef CONFIG_PARAVIRT
13717+2: popq %rdi
13718+#endif
13719+ SET_RDI_INTO_CR3
13720+
13721+#ifdef CONFIG_PAX_KERNEXEC
13722+ GET_CR0_INTO_RDI
13723+ bts $16,%rdi
13724+ SET_RDI_INTO_CR0
13725+#endif
13726+
13727+#ifdef CONFIG_PARAVIRT
13728+ PV_RESTORE_REGS(CLBR_RDI)
13729+#endif
13730+
13731+ popq %rbx
13732+ popq %rdi
13733+ pax_force_retaddr
13734+ retq
13735+ENDPROC(pax_enter_kernel_user)
13736+
13737+ENTRY(pax_exit_kernel_user)
13738+ push %rdi
13739+
13740+#ifdef CONFIG_PARAVIRT
13741+ pushq %rbx
13742+ PV_SAVE_REGS(CLBR_RDI)
13743+#endif
13744+
13745+#ifdef CONFIG_PAX_KERNEXEC
13746+ GET_CR0_INTO_RDI
13747+ btr $16,%rdi
13748+ SET_RDI_INTO_CR0
13749+#endif
13750+
13751+ GET_CR3_INTO_RDI
13752+ add $__START_KERNEL_map,%rdi
13753+ sub phys_base(%rip),%rdi
13754+
13755+#ifdef CONFIG_PARAVIRT
13756+ cmpl $0, pv_info+PARAVIRT_enabled
13757+ jz 1f
13758+ mov %rdi,%rbx
13759+ i = 0
13760+ .rept USER_PGD_PTRS
13761+ mov i*8(%rbx),%rsi
13762+ mov $0x67,%sil
13763+ lea i*8(%rbx),%rdi
13764+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
13765+ i = i + 1
13766+ .endr
13767+ jmp 2f
13768+1:
13769+#endif
13770+
13771+ i = 0
13772+ .rept USER_PGD_PTRS
13773+ movb $0x67,i*8(%rdi)
13774+ i = i + 1
13775+ .endr
13776+
13777+#ifdef CONFIG_PARAVIRT
13778+2: PV_RESTORE_REGS(CLBR_RDI)
13779+ popq %rbx
13780+#endif
13781+
13782+ popq %rdi
13783+ pax_force_retaddr
13784+ retq
13785+ENDPROC(pax_exit_kernel_user)
13786+#endif
13787+
13788+.macro pax_erase_kstack
13789+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13790+ call pax_erase_kstack
13791+#endif
13792+.endm
13793+
13794+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13795+/*
13796+ * r11: thread_info
13797+ * rcx, rdx: can be clobbered
13798+ */
13799+ENTRY(pax_erase_kstack)
13800+ pushq %rdi
13801+ pushq %rax
13802+ pushq %r11
13803+
13804+ GET_THREAD_INFO(%r11)
13805+ mov TI_lowest_stack(%r11), %rdi
13806+ mov $-0xBEEF, %rax
13807+ std
13808+
13809+1: mov %edi, %ecx
13810+ and $THREAD_SIZE_asm - 1, %ecx
13811+ shr $3, %ecx
13812+ repne scasq
13813+ jecxz 2f
13814+
13815+ cmp $2*8, %ecx
13816+ jc 2f
13817+
13818+ mov $2*8, %ecx
13819+ repe scasq
13820+ jecxz 2f
13821+ jne 1b
13822+
13823+2: cld
13824+ mov %esp, %ecx
13825+ sub %edi, %ecx
13826+
13827+ cmp $THREAD_SIZE_asm, %rcx
13828+ jb 3f
13829+ ud2
13830+3:
13831+
13832+ shr $3, %ecx
13833+ rep stosq
13834+
13835+ mov TI_task_thread_sp0(%r11), %rdi
13836+ sub $256, %rdi
13837+ mov %rdi, TI_lowest_stack(%r11)
13838+
13839+ popq %r11
13840+ popq %rax
13841+ popq %rdi
13842+ pax_force_retaddr
13843+ ret
13844+ENDPROC(pax_erase_kstack)
13845+#endif
13846
13847 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
13848 #ifdef CONFIG_TRACE_IRQFLAGS
13849@@ -231,8 +515,8 @@ ENDPROC(native_usergs_sysret64)
13850 .endm
13851
13852 .macro UNFAKE_STACK_FRAME
13853- addq $8*6, %rsp
13854- CFI_ADJUST_CFA_OFFSET -(6*8)
13855+ addq $8*6 + ARG_SKIP, %rsp
13856+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
13857 .endm
13858
13859 /*
13860@@ -319,7 +603,7 @@ ENDPROC(native_usergs_sysret64)
13861 movq %rsp, %rsi
13862
13863 leaq -RBP(%rsp),%rdi /* arg1 for handler */
13864- testl $3, CS(%rdi)
13865+ testb $3, CS(%rdi)
13866 je 1f
13867 SWAPGS
13868 /*
13869@@ -355,9 +639,10 @@ ENTRY(save_rest)
13870 movq_cfi r15, R15+16
13871 movq %r11, 8(%rsp) /* return address */
13872 FIXUP_TOP_OF_STACK %r11, 16
13873+ pax_force_retaddr
13874 ret
13875 CFI_ENDPROC
13876-END(save_rest)
13877+ENDPROC(save_rest)
13878
13879 /* save complete stack frame */
13880 .pushsection .kprobes.text, "ax"
13881@@ -386,9 +671,10 @@ ENTRY(save_paranoid)
13882 js 1f /* negative -> in kernel */
13883 SWAPGS
13884 xorl %ebx,%ebx
13885-1: ret
13886+1: pax_force_retaddr_bts
13887+ ret
13888 CFI_ENDPROC
13889-END(save_paranoid)
13890+ENDPROC(save_paranoid)
13891 .popsection
13892
13893 /*
13894@@ -410,7 +696,7 @@ ENTRY(ret_from_fork)
13895
13896 RESTORE_REST
13897
13898- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13899+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13900 je int_ret_from_sys_call
13901
13902 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
13903@@ -420,7 +706,7 @@ ENTRY(ret_from_fork)
13904 jmp ret_from_sys_call # go to the SYSRET fastpath
13905
13906 CFI_ENDPROC
13907-END(ret_from_fork)
13908+ENDPROC(ret_from_fork)
13909
13910 /*
13911 * System call entry. Up to 6 arguments in registers are supported.
13912@@ -456,7 +742,7 @@ END(ret_from_fork)
13913 ENTRY(system_call)
13914 CFI_STARTPROC simple
13915 CFI_SIGNAL_FRAME
13916- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
13917+ CFI_DEF_CFA rsp,0
13918 CFI_REGISTER rip,rcx
13919 /*CFI_REGISTER rflags,r11*/
13920 SWAPGS_UNSAFE_STACK
13921@@ -469,12 +755,13 @@ ENTRY(system_call_after_swapgs)
13922
13923 movq %rsp,PER_CPU_VAR(old_rsp)
13924 movq PER_CPU_VAR(kernel_stack),%rsp
13925+ SAVE_ARGS 8*6,0
13926+ pax_enter_kernel_user
13927 /*
13928 * No need to follow this irqs off/on section - it's straight
13929 * and short:
13930 */
13931 ENABLE_INTERRUPTS(CLBR_NONE)
13932- SAVE_ARGS 8,0
13933 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
13934 movq %rcx,RIP-ARGOFFSET(%rsp)
13935 CFI_REL_OFFSET rip,RIP-ARGOFFSET
13936@@ -484,7 +771,7 @@ ENTRY(system_call_after_swapgs)
13937 system_call_fastpath:
13938 cmpq $__NR_syscall_max,%rax
13939 ja badsys
13940- movq %r10,%rcx
13941+ movq R10-ARGOFFSET(%rsp),%rcx
13942 call *sys_call_table(,%rax,8) # XXX: rip relative
13943 movq %rax,RAX-ARGOFFSET(%rsp)
13944 /*
13945@@ -503,6 +790,8 @@ sysret_check:
13946 andl %edi,%edx
13947 jnz sysret_careful
13948 CFI_REMEMBER_STATE
13949+ pax_exit_kernel_user
13950+ pax_erase_kstack
13951 /*
13952 * sysretq will re-enable interrupts:
13953 */
13954@@ -554,14 +843,18 @@ badsys:
13955 * jump back to the normal fast path.
13956 */
13957 auditsys:
13958- movq %r10,%r9 /* 6th arg: 4th syscall arg */
13959+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
13960 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
13961 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
13962 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
13963 movq %rax,%rsi /* 2nd arg: syscall number */
13964 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
13965 call audit_syscall_entry
13966+
13967+ pax_erase_kstack
13968+
13969 LOAD_ARGS 0 /* reload call-clobbered registers */
13970+ pax_set_fptr_mask
13971 jmp system_call_fastpath
13972
13973 /*
13974@@ -591,16 +884,20 @@ tracesys:
13975 FIXUP_TOP_OF_STACK %rdi
13976 movq %rsp,%rdi
13977 call syscall_trace_enter
13978+
13979+ pax_erase_kstack
13980+
13981 /*
13982 * Reload arg registers from stack in case ptrace changed them.
13983 * We don't reload %rax because syscall_trace_enter() returned
13984 * the value it wants us to use in the table lookup.
13985 */
13986 LOAD_ARGS ARGOFFSET, 1
13987+ pax_set_fptr_mask
13988 RESTORE_REST
13989 cmpq $__NR_syscall_max,%rax
13990 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
13991- movq %r10,%rcx /* fixup for C */
13992+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
13993 call *sys_call_table(,%rax,8)
13994 movq %rax,RAX-ARGOFFSET(%rsp)
13995 /* Use IRET because user could have changed frame */
13996@@ -612,7 +909,7 @@ tracesys:
13997 GLOBAL(int_ret_from_sys_call)
13998 DISABLE_INTERRUPTS(CLBR_NONE)
13999 TRACE_IRQS_OFF
14000- testl $3,CS-ARGOFFSET(%rsp)
14001+ testb $3,CS-ARGOFFSET(%rsp)
14002 je retint_restore_args
14003 movl $_TIF_ALLWORK_MASK,%edi
14004 /* edi: mask to check */
14005@@ -669,7 +966,7 @@ int_restore_rest:
14006 TRACE_IRQS_OFF
14007 jmp int_with_check
14008 CFI_ENDPROC
14009-END(system_call)
14010+ENDPROC(system_call)
14011
14012 /*
14013 * Certain special system calls that need to save a complete full stack frame.
14014@@ -685,7 +982,7 @@ ENTRY(\label)
14015 call \func
14016 jmp ptregscall_common
14017 CFI_ENDPROC
14018-END(\label)
14019+ENDPROC(\label)
14020 .endm
14021
14022 PTREGSCALL stub_clone, sys_clone, %r8
14023@@ -703,9 +1000,10 @@ ENTRY(ptregscall_common)
14024 movq_cfi_restore R12+8, r12
14025 movq_cfi_restore RBP+8, rbp
14026 movq_cfi_restore RBX+8, rbx
14027+ pax_force_retaddr
14028 ret $REST_SKIP /* pop extended registers */
14029 CFI_ENDPROC
14030-END(ptregscall_common)
14031+ENDPROC(ptregscall_common)
14032
14033 ENTRY(stub_execve)
14034 CFI_STARTPROC
14035@@ -720,7 +1018,7 @@ ENTRY(stub_execve)
14036 RESTORE_REST
14037 jmp int_ret_from_sys_call
14038 CFI_ENDPROC
14039-END(stub_execve)
14040+ENDPROC(stub_execve)
14041
14042 /*
14043 * sigreturn is special because it needs to restore all registers on return.
14044@@ -738,7 +1036,7 @@ ENTRY(stub_rt_sigreturn)
14045 RESTORE_REST
14046 jmp int_ret_from_sys_call
14047 CFI_ENDPROC
14048-END(stub_rt_sigreturn)
14049+ENDPROC(stub_rt_sigreturn)
14050
14051 /*
14052 * Build the entry stubs and pointer table with some assembler magic.
14053@@ -773,7 +1071,7 @@ vector=vector+1
14054 2: jmp common_interrupt
14055 .endr
14056 CFI_ENDPROC
14057-END(irq_entries_start)
14058+ENDPROC(irq_entries_start)
14059
14060 .previous
14061 END(interrupt)
14062@@ -793,6 +1091,16 @@ END(interrupt)
14063 subq $ORIG_RAX-RBP, %rsp
14064 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
14065 SAVE_ARGS_IRQ
14066+#ifdef CONFIG_PAX_MEMORY_UDEREF
14067+ testb $3, CS(%rdi)
14068+ jnz 1f
14069+ pax_enter_kernel
14070+ jmp 2f
14071+1: pax_enter_kernel_user
14072+2:
14073+#else
14074+ pax_enter_kernel
14075+#endif
14076 call \func
14077 .endm
14078
14079@@ -824,7 +1132,7 @@ ret_from_intr:
14080
14081 exit_intr:
14082 GET_THREAD_INFO(%rcx)
14083- testl $3,CS-ARGOFFSET(%rsp)
14084+ testb $3,CS-ARGOFFSET(%rsp)
14085 je retint_kernel
14086
14087 /* Interrupt came from user space */
14088@@ -846,12 +1154,16 @@ retint_swapgs: /* return to user-space */
14089 * The iretq could re-enable interrupts:
14090 */
14091 DISABLE_INTERRUPTS(CLBR_ANY)
14092+ pax_exit_kernel_user
14093+ pax_erase_kstack
14094 TRACE_IRQS_IRETQ
14095 SWAPGS
14096 jmp restore_args
14097
14098 retint_restore_args: /* return to kernel space */
14099 DISABLE_INTERRUPTS(CLBR_ANY)
14100+ pax_exit_kernel
14101+ pax_force_retaddr RIP-ARGOFFSET
14102 /*
14103 * The iretq could re-enable interrupts:
14104 */
14105@@ -940,7 +1252,7 @@ ENTRY(retint_kernel)
14106 #endif
14107
14108 CFI_ENDPROC
14109-END(common_interrupt)
14110+ENDPROC(common_interrupt)
14111 /*
14112 * End of kprobes section
14113 */
14114@@ -956,7 +1268,7 @@ ENTRY(\sym)
14115 interrupt \do_sym
14116 jmp ret_from_intr
14117 CFI_ENDPROC
14118-END(\sym)
14119+ENDPROC(\sym)
14120 .endm
14121
14122 #ifdef CONFIG_SMP
14123@@ -1021,12 +1333,22 @@ ENTRY(\sym)
14124 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14125 call error_entry
14126 DEFAULT_FRAME 0
14127+#ifdef CONFIG_PAX_MEMORY_UDEREF
14128+ testb $3, CS(%rsp)
14129+ jnz 1f
14130+ pax_enter_kernel
14131+ jmp 2f
14132+1: pax_enter_kernel_user
14133+2:
14134+#else
14135+ pax_enter_kernel
14136+#endif
14137 movq %rsp,%rdi /* pt_regs pointer */
14138 xorl %esi,%esi /* no error code */
14139 call \do_sym
14140 jmp error_exit /* %ebx: no swapgs flag */
14141 CFI_ENDPROC
14142-END(\sym)
14143+ENDPROC(\sym)
14144 .endm
14145
14146 .macro paranoidzeroentry sym do_sym
14147@@ -1038,15 +1360,25 @@ ENTRY(\sym)
14148 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14149 call save_paranoid
14150 TRACE_IRQS_OFF
14151+#ifdef CONFIG_PAX_MEMORY_UDEREF
14152+ testb $3, CS(%rsp)
14153+ jnz 1f
14154+ pax_enter_kernel
14155+ jmp 2f
14156+1: pax_enter_kernel_user
14157+2:
14158+#else
14159+ pax_enter_kernel
14160+#endif
14161 movq %rsp,%rdi /* pt_regs pointer */
14162 xorl %esi,%esi /* no error code */
14163 call \do_sym
14164 jmp paranoid_exit /* %ebx: no swapgs flag */
14165 CFI_ENDPROC
14166-END(\sym)
14167+ENDPROC(\sym)
14168 .endm
14169
14170-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
14171+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
14172 .macro paranoidzeroentry_ist sym do_sym ist
14173 ENTRY(\sym)
14174 INTR_FRAME
14175@@ -1056,14 +1388,30 @@ ENTRY(\sym)
14176 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14177 call save_paranoid
14178 TRACE_IRQS_OFF
14179+#ifdef CONFIG_PAX_MEMORY_UDEREF
14180+ testb $3, CS(%rsp)
14181+ jnz 1f
14182+ pax_enter_kernel
14183+ jmp 2f
14184+1: pax_enter_kernel_user
14185+2:
14186+#else
14187+ pax_enter_kernel
14188+#endif
14189 movq %rsp,%rdi /* pt_regs pointer */
14190 xorl %esi,%esi /* no error code */
14191+#ifdef CONFIG_SMP
14192+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
14193+ lea init_tss(%r12), %r12
14194+#else
14195+ lea init_tss(%rip), %r12
14196+#endif
14197 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
14198 call \do_sym
14199 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
14200 jmp paranoid_exit /* %ebx: no swapgs flag */
14201 CFI_ENDPROC
14202-END(\sym)
14203+ENDPROC(\sym)
14204 .endm
14205
14206 .macro errorentry sym do_sym
14207@@ -1074,13 +1422,23 @@ ENTRY(\sym)
14208 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14209 call error_entry
14210 DEFAULT_FRAME 0
14211+#ifdef CONFIG_PAX_MEMORY_UDEREF
14212+ testb $3, CS(%rsp)
14213+ jnz 1f
14214+ pax_enter_kernel
14215+ jmp 2f
14216+1: pax_enter_kernel_user
14217+2:
14218+#else
14219+ pax_enter_kernel
14220+#endif
14221 movq %rsp,%rdi /* pt_regs pointer */
14222 movq ORIG_RAX(%rsp),%rsi /* get error code */
14223 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14224 call \do_sym
14225 jmp error_exit /* %ebx: no swapgs flag */
14226 CFI_ENDPROC
14227-END(\sym)
14228+ENDPROC(\sym)
14229 .endm
14230
14231 /* error code is on the stack already */
14232@@ -1093,13 +1451,23 @@ ENTRY(\sym)
14233 call save_paranoid
14234 DEFAULT_FRAME 0
14235 TRACE_IRQS_OFF
14236+#ifdef CONFIG_PAX_MEMORY_UDEREF
14237+ testb $3, CS(%rsp)
14238+ jnz 1f
14239+ pax_enter_kernel
14240+ jmp 2f
14241+1: pax_enter_kernel_user
14242+2:
14243+#else
14244+ pax_enter_kernel
14245+#endif
14246 movq %rsp,%rdi /* pt_regs pointer */
14247 movq ORIG_RAX(%rsp),%rsi /* get error code */
14248 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14249 call \do_sym
14250 jmp paranoid_exit /* %ebx: no swapgs flag */
14251 CFI_ENDPROC
14252-END(\sym)
14253+ENDPROC(\sym)
14254 .endm
14255
14256 zeroentry divide_error do_divide_error
14257@@ -1129,9 +1497,10 @@ gs_change:
14258 2: mfence /* workaround */
14259 SWAPGS
14260 popfq_cfi
14261+ pax_force_retaddr
14262 ret
14263 CFI_ENDPROC
14264-END(native_load_gs_index)
14265+ENDPROC(native_load_gs_index)
14266
14267 .section __ex_table,"a"
14268 .align 8
14269@@ -1153,13 +1522,14 @@ ENTRY(kernel_thread_helper)
14270 * Here we are in the child and the registers are set as they were
14271 * at kernel_thread() invocation in the parent.
14272 */
14273+ pax_force_fptr %rsi
14274 call *%rsi
14275 # exit
14276 mov %eax, %edi
14277 call do_exit
14278 ud2 # padding for call trace
14279 CFI_ENDPROC
14280-END(kernel_thread_helper)
14281+ENDPROC(kernel_thread_helper)
14282
14283 /*
14284 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
14285@@ -1186,11 +1556,11 @@ ENTRY(kernel_execve)
14286 RESTORE_REST
14287 testq %rax,%rax
14288 je int_ret_from_sys_call
14289- RESTORE_ARGS
14290 UNFAKE_STACK_FRAME
14291+ pax_force_retaddr
14292 ret
14293 CFI_ENDPROC
14294-END(kernel_execve)
14295+ENDPROC(kernel_execve)
14296
14297 /* Call softirq on interrupt stack. Interrupts are off. */
14298 ENTRY(call_softirq)
14299@@ -1208,9 +1578,10 @@ ENTRY(call_softirq)
14300 CFI_DEF_CFA_REGISTER rsp
14301 CFI_ADJUST_CFA_OFFSET -8
14302 decl PER_CPU_VAR(irq_count)
14303+ pax_force_retaddr
14304 ret
14305 CFI_ENDPROC
14306-END(call_softirq)
14307+ENDPROC(call_softirq)
14308
14309 #ifdef CONFIG_XEN
14310 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
14311@@ -1248,7 +1619,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
14312 decl PER_CPU_VAR(irq_count)
14313 jmp error_exit
14314 CFI_ENDPROC
14315-END(xen_do_hypervisor_callback)
14316+ENDPROC(xen_do_hypervisor_callback)
14317
14318 /*
14319 * Hypervisor uses this for application faults while it executes.
14320@@ -1307,7 +1678,7 @@ ENTRY(xen_failsafe_callback)
14321 SAVE_ALL
14322 jmp error_exit
14323 CFI_ENDPROC
14324-END(xen_failsafe_callback)
14325+ENDPROC(xen_failsafe_callback)
14326
14327 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
14328 xen_hvm_callback_vector xen_evtchn_do_upcall
14329@@ -1356,16 +1727,31 @@ ENTRY(paranoid_exit)
14330 TRACE_IRQS_OFF
14331 testl %ebx,%ebx /* swapgs needed? */
14332 jnz paranoid_restore
14333- testl $3,CS(%rsp)
14334+ testb $3,CS(%rsp)
14335 jnz paranoid_userspace
14336+#ifdef CONFIG_PAX_MEMORY_UDEREF
14337+ pax_exit_kernel
14338+ TRACE_IRQS_IRETQ 0
14339+ SWAPGS_UNSAFE_STACK
14340+ RESTORE_ALL 8
14341+ pax_force_retaddr_bts
14342+ jmp irq_return
14343+#endif
14344 paranoid_swapgs:
14345+#ifdef CONFIG_PAX_MEMORY_UDEREF
14346+ pax_exit_kernel_user
14347+#else
14348+ pax_exit_kernel
14349+#endif
14350 TRACE_IRQS_IRETQ 0
14351 SWAPGS_UNSAFE_STACK
14352 RESTORE_ALL 8
14353 jmp irq_return
14354 paranoid_restore:
14355+ pax_exit_kernel
14356 TRACE_IRQS_IRETQ 0
14357 RESTORE_ALL 8
14358+ pax_force_retaddr_bts
14359 jmp irq_return
14360 paranoid_userspace:
14361 GET_THREAD_INFO(%rcx)
14362@@ -1394,7 +1780,7 @@ paranoid_schedule:
14363 TRACE_IRQS_OFF
14364 jmp paranoid_userspace
14365 CFI_ENDPROC
14366-END(paranoid_exit)
14367+ENDPROC(paranoid_exit)
14368
14369 /*
14370 * Exception entry point. This expects an error code/orig_rax on the stack.
14371@@ -1421,12 +1807,13 @@ ENTRY(error_entry)
14372 movq_cfi r14, R14+8
14373 movq_cfi r15, R15+8
14374 xorl %ebx,%ebx
14375- testl $3,CS+8(%rsp)
14376+ testb $3,CS+8(%rsp)
14377 je error_kernelspace
14378 error_swapgs:
14379 SWAPGS
14380 error_sti:
14381 TRACE_IRQS_OFF
14382+ pax_force_retaddr_bts
14383 ret
14384
14385 /*
14386@@ -1453,7 +1840,7 @@ bstep_iret:
14387 movq %rcx,RIP+8(%rsp)
14388 jmp error_swapgs
14389 CFI_ENDPROC
14390-END(error_entry)
14391+ENDPROC(error_entry)
14392
14393
14394 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
14395@@ -1473,7 +1860,7 @@ ENTRY(error_exit)
14396 jnz retint_careful
14397 jmp retint_swapgs
14398 CFI_ENDPROC
14399-END(error_exit)
14400+ENDPROC(error_exit)
14401
14402
14403 /* runs on exception stack */
14404@@ -1485,6 +1872,16 @@ ENTRY(nmi)
14405 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14406 call save_paranoid
14407 DEFAULT_FRAME 0
14408+#ifdef CONFIG_PAX_MEMORY_UDEREF
14409+ testb $3, CS(%rsp)
14410+ jnz 1f
14411+ pax_enter_kernel
14412+ jmp 2f
14413+1: pax_enter_kernel_user
14414+2:
14415+#else
14416+ pax_enter_kernel
14417+#endif
14418 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
14419 movq %rsp,%rdi
14420 movq $-1,%rsi
14421@@ -1495,12 +1892,28 @@ ENTRY(nmi)
14422 DISABLE_INTERRUPTS(CLBR_NONE)
14423 testl %ebx,%ebx /* swapgs needed? */
14424 jnz nmi_restore
14425- testl $3,CS(%rsp)
14426+ testb $3,CS(%rsp)
14427 jnz nmi_userspace
14428+#ifdef CONFIG_PAX_MEMORY_UDEREF
14429+ pax_exit_kernel
14430+ SWAPGS_UNSAFE_STACK
14431+ RESTORE_ALL 8
14432+ pax_force_retaddr_bts
14433+ jmp irq_return
14434+#endif
14435 nmi_swapgs:
14436+#ifdef CONFIG_PAX_MEMORY_UDEREF
14437+ pax_exit_kernel_user
14438+#else
14439+ pax_exit_kernel
14440+#endif
14441 SWAPGS_UNSAFE_STACK
14442+ RESTORE_ALL 8
14443+ jmp irq_return
14444 nmi_restore:
14445+ pax_exit_kernel
14446 RESTORE_ALL 8
14447+ pax_force_retaddr_bts
14448 jmp irq_return
14449 nmi_userspace:
14450 GET_THREAD_INFO(%rcx)
14451@@ -1529,14 +1942,14 @@ nmi_schedule:
14452 jmp paranoid_exit
14453 CFI_ENDPROC
14454 #endif
14455-END(nmi)
14456+ENDPROC(nmi)
14457
14458 ENTRY(ignore_sysret)
14459 CFI_STARTPROC
14460 mov $-ENOSYS,%eax
14461 sysret
14462 CFI_ENDPROC
14463-END(ignore_sysret)
14464+ENDPROC(ignore_sysret)
14465
14466 /*
14467 * End of kprobes section
14468diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
14469index c9a281f..ce2f317 100644
14470--- a/arch/x86/kernel/ftrace.c
14471+++ b/arch/x86/kernel/ftrace.c
14472@@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the IP to write to */
14473 static const void *mod_code_newcode; /* holds the text to write to the IP */
14474
14475 static unsigned nmi_wait_count;
14476-static atomic_t nmi_update_count = ATOMIC_INIT(0);
14477+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
14478
14479 int ftrace_arch_read_dyn_info(char *buf, int size)
14480 {
14481@@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
14482
14483 r = snprintf(buf, size, "%u %u",
14484 nmi_wait_count,
14485- atomic_read(&nmi_update_count));
14486+ atomic_read_unchecked(&nmi_update_count));
14487 return r;
14488 }
14489
14490@@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
14491
14492 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
14493 smp_rmb();
14494+ pax_open_kernel();
14495 ftrace_mod_code();
14496- atomic_inc(&nmi_update_count);
14497+ pax_close_kernel();
14498+ atomic_inc_unchecked(&nmi_update_count);
14499 }
14500 /* Must have previous changes seen before executions */
14501 smp_mb();
14502@@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
14503 {
14504 unsigned char replaced[MCOUNT_INSN_SIZE];
14505
14506+ ip = ktla_ktva(ip);
14507+
14508 /*
14509 * Note: Due to modules and __init, code can
14510 * disappear and change, we need to protect against faulting
14511@@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
14512 unsigned char old[MCOUNT_INSN_SIZE], *new;
14513 int ret;
14514
14515- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
14516+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
14517 new = ftrace_call_replace(ip, (unsigned long)func);
14518 ret = ftrace_modify_code(ip, old, new);
14519
14520@@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long ip,
14521 {
14522 unsigned char code[MCOUNT_INSN_SIZE];
14523
14524+ ip = ktla_ktva(ip);
14525+
14526 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
14527 return -EFAULT;
14528
14529diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
14530index 3bb0850..55a56f4 100644
14531--- a/arch/x86/kernel/head32.c
14532+++ b/arch/x86/kernel/head32.c
14533@@ -19,6 +19,7 @@
14534 #include <asm/io_apic.h>
14535 #include <asm/bios_ebda.h>
14536 #include <asm/tlbflush.h>
14537+#include <asm/boot.h>
14538
14539 static void __init i386_default_early_setup(void)
14540 {
14541@@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
14542 {
14543 memblock_init();
14544
14545- memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14546+ memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14547
14548 #ifdef CONFIG_BLK_DEV_INITRD
14549 /* Reserve INITRD */
14550diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
14551index ce0be7c..c41476e 100644
14552--- a/arch/x86/kernel/head_32.S
14553+++ b/arch/x86/kernel/head_32.S
14554@@ -25,6 +25,12 @@
14555 /* Physical address */
14556 #define pa(X) ((X) - __PAGE_OFFSET)
14557
14558+#ifdef CONFIG_PAX_KERNEXEC
14559+#define ta(X) (X)
14560+#else
14561+#define ta(X) ((X) - __PAGE_OFFSET)
14562+#endif
14563+
14564 /*
14565 * References to members of the new_cpu_data structure.
14566 */
14567@@ -54,11 +60,7 @@
14568 * and small than max_low_pfn, otherwise will waste some page table entries
14569 */
14570
14571-#if PTRS_PER_PMD > 1
14572-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
14573-#else
14574-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
14575-#endif
14576+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
14577
14578 /* Number of possible pages in the lowmem region */
14579 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
14580@@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
14581 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14582
14583 /*
14584+ * Real beginning of normal "text" segment
14585+ */
14586+ENTRY(stext)
14587+ENTRY(_stext)
14588+
14589+/*
14590 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
14591 * %esi points to the real-mode code as a 32-bit pointer.
14592 * CS and DS must be 4 GB flat segments, but we don't depend on
14593@@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14594 * can.
14595 */
14596 __HEAD
14597+
14598+#ifdef CONFIG_PAX_KERNEXEC
14599+ jmp startup_32
14600+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
14601+.fill PAGE_SIZE-5,1,0xcc
14602+#endif
14603+
14604 ENTRY(startup_32)
14605 movl pa(stack_start),%ecx
14606
14607@@ -105,6 +120,57 @@ ENTRY(startup_32)
14608 2:
14609 leal -__PAGE_OFFSET(%ecx),%esp
14610
14611+#ifdef CONFIG_SMP
14612+ movl $pa(cpu_gdt_table),%edi
14613+ movl $__per_cpu_load,%eax
14614+ movw %ax,__KERNEL_PERCPU + 2(%edi)
14615+ rorl $16,%eax
14616+ movb %al,__KERNEL_PERCPU + 4(%edi)
14617+ movb %ah,__KERNEL_PERCPU + 7(%edi)
14618+ movl $__per_cpu_end - 1,%eax
14619+ subl $__per_cpu_start,%eax
14620+ movw %ax,__KERNEL_PERCPU + 0(%edi)
14621+#endif
14622+
14623+#ifdef CONFIG_PAX_MEMORY_UDEREF
14624+ movl $NR_CPUS,%ecx
14625+ movl $pa(cpu_gdt_table),%edi
14626+1:
14627+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
14628+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
14629+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
14630+ addl $PAGE_SIZE_asm,%edi
14631+ loop 1b
14632+#endif
14633+
14634+#ifdef CONFIG_PAX_KERNEXEC
14635+ movl $pa(boot_gdt),%edi
14636+ movl $__LOAD_PHYSICAL_ADDR,%eax
14637+ movw %ax,__BOOT_CS + 2(%edi)
14638+ rorl $16,%eax
14639+ movb %al,__BOOT_CS + 4(%edi)
14640+ movb %ah,__BOOT_CS + 7(%edi)
14641+ rorl $16,%eax
14642+
14643+ ljmp $(__BOOT_CS),$1f
14644+1:
14645+
14646+ movl $NR_CPUS,%ecx
14647+ movl $pa(cpu_gdt_table),%edi
14648+ addl $__PAGE_OFFSET,%eax
14649+1:
14650+ movw %ax,__KERNEL_CS + 2(%edi)
14651+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
14652+ rorl $16,%eax
14653+ movb %al,__KERNEL_CS + 4(%edi)
14654+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
14655+ movb %ah,__KERNEL_CS + 7(%edi)
14656+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
14657+ rorl $16,%eax
14658+ addl $PAGE_SIZE_asm,%edi
14659+ loop 1b
14660+#endif
14661+
14662 /*
14663 * Clear BSS first so that there are no surprises...
14664 */
14665@@ -195,8 +261,11 @@ ENTRY(startup_32)
14666 movl %eax, pa(max_pfn_mapped)
14667
14668 /* Do early initialization of the fixmap area */
14669- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
14670- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
14671+#ifdef CONFIG_COMPAT_VDSO
14672+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
14673+#else
14674+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
14675+#endif
14676 #else /* Not PAE */
14677
14678 page_pde_offset = (__PAGE_OFFSET >> 20);
14679@@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14680 movl %eax, pa(max_pfn_mapped)
14681
14682 /* Do early initialization of the fixmap area */
14683- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
14684- movl %eax,pa(initial_page_table+0xffc)
14685+#ifdef CONFIG_COMPAT_VDSO
14686+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
14687+#else
14688+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
14689+#endif
14690 #endif
14691
14692 #ifdef CONFIG_PARAVIRT
14693@@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14694 cmpl $num_subarch_entries, %eax
14695 jae bad_subarch
14696
14697- movl pa(subarch_entries)(,%eax,4), %eax
14698- subl $__PAGE_OFFSET, %eax
14699- jmp *%eax
14700+ jmp *pa(subarch_entries)(,%eax,4)
14701
14702 bad_subarch:
14703 WEAK(lguest_entry)
14704@@ -255,10 +325,10 @@ WEAK(xen_entry)
14705 __INITDATA
14706
14707 subarch_entries:
14708- .long default_entry /* normal x86/PC */
14709- .long lguest_entry /* lguest hypervisor */
14710- .long xen_entry /* Xen hypervisor */
14711- .long default_entry /* Moorestown MID */
14712+ .long ta(default_entry) /* normal x86/PC */
14713+ .long ta(lguest_entry) /* lguest hypervisor */
14714+ .long ta(xen_entry) /* Xen hypervisor */
14715+ .long ta(default_entry) /* Moorestown MID */
14716 num_subarch_entries = (. - subarch_entries) / 4
14717 .previous
14718 #else
14719@@ -312,6 +382,7 @@ default_entry:
14720 orl %edx,%eax
14721 movl %eax,%cr4
14722
14723+#ifdef CONFIG_X86_PAE
14724 testb $X86_CR4_PAE, %al # check if PAE is enabled
14725 jz 6f
14726
14727@@ -340,6 +411,9 @@ default_entry:
14728 /* Make changes effective */
14729 wrmsr
14730
14731+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
14732+#endif
14733+
14734 6:
14735
14736 /*
14737@@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
14738 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
14739 movl %eax,%ss # after changing gdt.
14740
14741- movl $(__USER_DS),%eax # DS/ES contains default USER segment
14742+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
14743 movl %eax,%ds
14744 movl %eax,%es
14745
14746@@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
14747 */
14748 cmpb $0,ready
14749 jne 1f
14750- movl $gdt_page,%eax
14751+ movl $cpu_gdt_table,%eax
14752 movl $stack_canary,%ecx
14753+#ifdef CONFIG_SMP
14754+ addl $__per_cpu_load,%ecx
14755+#endif
14756 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
14757 shrl $16, %ecx
14758 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
14759 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
14760 1:
14761-#endif
14762 movl $(__KERNEL_STACK_CANARY),%eax
14763+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14764+ movl $(__USER_DS),%eax
14765+#else
14766+ xorl %eax,%eax
14767+#endif
14768 movl %eax,%gs
14769
14770 xorl %eax,%eax # Clear LDT
14771@@ -558,22 +639,22 @@ early_page_fault:
14772 jmp early_fault
14773
14774 early_fault:
14775- cld
14776 #ifdef CONFIG_PRINTK
14777+ cmpl $1,%ss:early_recursion_flag
14778+ je hlt_loop
14779+ incl %ss:early_recursion_flag
14780+ cld
14781 pusha
14782 movl $(__KERNEL_DS),%eax
14783 movl %eax,%ds
14784 movl %eax,%es
14785- cmpl $2,early_recursion_flag
14786- je hlt_loop
14787- incl early_recursion_flag
14788 movl %cr2,%eax
14789 pushl %eax
14790 pushl %edx /* trapno */
14791 pushl $fault_msg
14792 call printk
14793+; call dump_stack
14794 #endif
14795- call dump_stack
14796 hlt_loop:
14797 hlt
14798 jmp hlt_loop
14799@@ -581,8 +662,11 @@ hlt_loop:
14800 /* This is the default interrupt "handler" :-) */
14801 ALIGN
14802 ignore_int:
14803- cld
14804 #ifdef CONFIG_PRINTK
14805+ cmpl $2,%ss:early_recursion_flag
14806+ je hlt_loop
14807+ incl %ss:early_recursion_flag
14808+ cld
14809 pushl %eax
14810 pushl %ecx
14811 pushl %edx
14812@@ -591,9 +675,6 @@ ignore_int:
14813 movl $(__KERNEL_DS),%eax
14814 movl %eax,%ds
14815 movl %eax,%es
14816- cmpl $2,early_recursion_flag
14817- je hlt_loop
14818- incl early_recursion_flag
14819 pushl 16(%esp)
14820 pushl 24(%esp)
14821 pushl 32(%esp)
14822@@ -622,29 +703,43 @@ ENTRY(initial_code)
14823 /*
14824 * BSS section
14825 */
14826-__PAGE_ALIGNED_BSS
14827- .align PAGE_SIZE
14828 #ifdef CONFIG_X86_PAE
14829+.section .initial_pg_pmd,"a",@progbits
14830 initial_pg_pmd:
14831 .fill 1024*KPMDS,4,0
14832 #else
14833+.section .initial_page_table,"a",@progbits
14834 ENTRY(initial_page_table)
14835 .fill 1024,4,0
14836 #endif
14837+.section .initial_pg_fixmap,"a",@progbits
14838 initial_pg_fixmap:
14839 .fill 1024,4,0
14840+.section .empty_zero_page,"a",@progbits
14841 ENTRY(empty_zero_page)
14842 .fill 4096,1,0
14843+.section .swapper_pg_dir,"a",@progbits
14844 ENTRY(swapper_pg_dir)
14845+#ifdef CONFIG_X86_PAE
14846+ .fill 4,8,0
14847+#else
14848 .fill 1024,4,0
14849+#endif
14850+
14851+/*
14852+ * The IDT has to be page-aligned to simplify the Pentium
14853+ * F0 0F bug workaround.. We have a special link segment
14854+ * for this.
14855+ */
14856+.section .idt,"a",@progbits
14857+ENTRY(idt_table)
14858+ .fill 256,8,0
14859
14860 /*
14861 * This starts the data section.
14862 */
14863 #ifdef CONFIG_X86_PAE
14864-__PAGE_ALIGNED_DATA
14865- /* Page-aligned for the benefit of paravirt? */
14866- .align PAGE_SIZE
14867+.section .initial_page_table,"a",@progbits
14868 ENTRY(initial_page_table)
14869 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
14870 # if KPMDS == 3
14871@@ -663,18 +758,27 @@ ENTRY(initial_page_table)
14872 # error "Kernel PMDs should be 1, 2 or 3"
14873 # endif
14874 .align PAGE_SIZE /* needs to be page-sized too */
14875+
14876+#ifdef CONFIG_PAX_PER_CPU_PGD
14877+ENTRY(cpu_pgd)
14878+ .rept NR_CPUS
14879+ .fill 4,8,0
14880+ .endr
14881+#endif
14882+
14883 #endif
14884
14885 .data
14886 .balign 4
14887 ENTRY(stack_start)
14888- .long init_thread_union+THREAD_SIZE
14889+ .long init_thread_union+THREAD_SIZE-8
14890
14891+ready: .byte 0
14892+
14893+.section .rodata,"a",@progbits
14894 early_recursion_flag:
14895 .long 0
14896
14897-ready: .byte 0
14898-
14899 int_msg:
14900 .asciz "Unknown interrupt or fault at: %p %p %p\n"
14901
14902@@ -707,7 +811,7 @@ fault_msg:
14903 .word 0 # 32 bit align gdt_desc.address
14904 boot_gdt_descr:
14905 .word __BOOT_DS+7
14906- .long boot_gdt - __PAGE_OFFSET
14907+ .long pa(boot_gdt)
14908
14909 .word 0 # 32-bit align idt_desc.address
14910 idt_descr:
14911@@ -718,7 +822,7 @@ idt_descr:
14912 .word 0 # 32 bit align gdt_desc.address
14913 ENTRY(early_gdt_descr)
14914 .word GDT_ENTRIES*8-1
14915- .long gdt_page /* Overwritten for secondary CPUs */
14916+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
14917
14918 /*
14919 * The boot_gdt must mirror the equivalent in setup.S and is
14920@@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
14921 .align L1_CACHE_BYTES
14922 ENTRY(boot_gdt)
14923 .fill GDT_ENTRY_BOOT_CS,8,0
14924- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
14925- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
14926+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
14927+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
14928+
14929+ .align PAGE_SIZE_asm
14930+ENTRY(cpu_gdt_table)
14931+ .rept NR_CPUS
14932+ .quad 0x0000000000000000 /* NULL descriptor */
14933+ .quad 0x0000000000000000 /* 0x0b reserved */
14934+ .quad 0x0000000000000000 /* 0x13 reserved */
14935+ .quad 0x0000000000000000 /* 0x1b reserved */
14936+
14937+#ifdef CONFIG_PAX_KERNEXEC
14938+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
14939+#else
14940+ .quad 0x0000000000000000 /* 0x20 unused */
14941+#endif
14942+
14943+ .quad 0x0000000000000000 /* 0x28 unused */
14944+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
14945+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
14946+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
14947+ .quad 0x0000000000000000 /* 0x4b reserved */
14948+ .quad 0x0000000000000000 /* 0x53 reserved */
14949+ .quad 0x0000000000000000 /* 0x5b reserved */
14950+
14951+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
14952+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
14953+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
14954+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
14955+
14956+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
14957+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
14958+
14959+ /*
14960+ * Segments used for calling PnP BIOS have byte granularity.
14961+ * The code segments and data segments have fixed 64k limits,
14962+ * the transfer segment sizes are set at run time.
14963+ */
14964+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
14965+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
14966+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
14967+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
14968+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
14969+
14970+ /*
14971+ * The APM segments have byte granularity and their bases
14972+ * are set at run time. All have 64k limits.
14973+ */
14974+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
14975+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
14976+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
14977+
14978+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
14979+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
14980+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
14981+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
14982+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
14983+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
14984+
14985+ /* Be sure this is zeroed to avoid false validations in Xen */
14986+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
14987+ .endr
14988diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
14989index e11e394..9aebc5d 100644
14990--- a/arch/x86/kernel/head_64.S
14991+++ b/arch/x86/kernel/head_64.S
14992@@ -19,6 +19,8 @@
14993 #include <asm/cache.h>
14994 #include <asm/processor-flags.h>
14995 #include <asm/percpu.h>
14996+#include <asm/cpufeature.h>
14997+#include <asm/alternative-asm.h>
14998
14999 #ifdef CONFIG_PARAVIRT
15000 #include <asm/asm-offsets.h>
15001@@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
15002 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
15003 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
15004 L3_START_KERNEL = pud_index(__START_KERNEL_map)
15005+L4_VMALLOC_START = pgd_index(VMALLOC_START)
15006+L3_VMALLOC_START = pud_index(VMALLOC_START)
15007+L4_VMALLOC_END = pgd_index(VMALLOC_END)
15008+L3_VMALLOC_END = pud_index(VMALLOC_END)
15009+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
15010+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
15011
15012 .text
15013 __HEAD
15014@@ -85,35 +93,23 @@ startup_64:
15015 */
15016 addq %rbp, init_level4_pgt + 0(%rip)
15017 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
15018+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
15019+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
15020+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
15021 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
15022
15023 addq %rbp, level3_ident_pgt + 0(%rip)
15024+#ifndef CONFIG_XEN
15025+ addq %rbp, level3_ident_pgt + 8(%rip)
15026+#endif
15027
15028- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
15029- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
15030+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
15031+
15032+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
15033+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
15034
15035 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
15036-
15037- /* Add an Identity mapping if I am above 1G */
15038- leaq _text(%rip), %rdi
15039- andq $PMD_PAGE_MASK, %rdi
15040-
15041- movq %rdi, %rax
15042- shrq $PUD_SHIFT, %rax
15043- andq $(PTRS_PER_PUD - 1), %rax
15044- jz ident_complete
15045-
15046- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
15047- leaq level3_ident_pgt(%rip), %rbx
15048- movq %rdx, 0(%rbx, %rax, 8)
15049-
15050- movq %rdi, %rax
15051- shrq $PMD_SHIFT, %rax
15052- andq $(PTRS_PER_PMD - 1), %rax
15053- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
15054- leaq level2_spare_pgt(%rip), %rbx
15055- movq %rdx, 0(%rbx, %rax, 8)
15056-ident_complete:
15057+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
15058
15059 /*
15060 * Fixup the kernel text+data virtual addresses. Note that
15061@@ -160,8 +156,8 @@ ENTRY(secondary_startup_64)
15062 * after the boot processor executes this code.
15063 */
15064
15065- /* Enable PAE mode and PGE */
15066- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
15067+ /* Enable PAE mode and PSE/PGE */
15068+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
15069 movq %rax, %cr4
15070
15071 /* Setup early boot stage 4 level pagetables. */
15072@@ -183,9 +179,17 @@ ENTRY(secondary_startup_64)
15073 movl $MSR_EFER, %ecx
15074 rdmsr
15075 btsl $_EFER_SCE, %eax /* Enable System Call */
15076- btl $20,%edi /* No Execute supported? */
15077+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
15078 jnc 1f
15079 btsl $_EFER_NX, %eax
15080+ leaq init_level4_pgt(%rip), %rdi
15081+#ifndef CONFIG_EFI
15082+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
15083+#endif
15084+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
15085+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
15086+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
15087+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
15088 1: wrmsr /* Make changes effective */
15089
15090 /* Setup cr0 */
15091@@ -247,6 +251,7 @@ ENTRY(secondary_startup_64)
15092 * jump. In addition we need to ensure %cs is set so we make this
15093 * a far return.
15094 */
15095+ pax_set_fptr_mask
15096 movq initial_code(%rip),%rax
15097 pushq $0 # fake return address to stop unwinder
15098 pushq $__KERNEL_CS # set correct cs
15099@@ -269,7 +274,7 @@ ENTRY(secondary_startup_64)
15100 bad_address:
15101 jmp bad_address
15102
15103- .section ".init.text","ax"
15104+ __INIT
15105 #ifdef CONFIG_EARLY_PRINTK
15106 .globl early_idt_handlers
15107 early_idt_handlers:
15108@@ -314,18 +319,23 @@ ENTRY(early_idt_handler)
15109 #endif /* EARLY_PRINTK */
15110 1: hlt
15111 jmp 1b
15112+ .previous
15113
15114 #ifdef CONFIG_EARLY_PRINTK
15115+ __INITDATA
15116 early_recursion_flag:
15117 .long 0
15118+ .previous
15119
15120+ .section .rodata,"a",@progbits
15121 early_idt_msg:
15122 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
15123 early_idt_ripmsg:
15124 .asciz "RIP %s\n"
15125+ .previous
15126 #endif /* CONFIG_EARLY_PRINTK */
15127- .previous
15128
15129+ .section .rodata,"a",@progbits
15130 #define NEXT_PAGE(name) \
15131 .balign PAGE_SIZE; \
15132 ENTRY(name)
15133@@ -338,7 +348,6 @@ ENTRY(name)
15134 i = i + 1 ; \
15135 .endr
15136
15137- .data
15138 /*
15139 * This default setting generates an ident mapping at address 0x100000
15140 * and a mapping for the kernel that precisely maps virtual address
15141@@ -349,13 +358,41 @@ NEXT_PAGE(init_level4_pgt)
15142 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15143 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
15144 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15145+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
15146+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
15147+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
15148+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
15149+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
15150+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15151 .org init_level4_pgt + L4_START_KERNEL*8, 0
15152 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
15153 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
15154
15155+#ifdef CONFIG_PAX_PER_CPU_PGD
15156+NEXT_PAGE(cpu_pgd)
15157+ .rept NR_CPUS
15158+ .fill 512,8,0
15159+ .endr
15160+#endif
15161+
15162 NEXT_PAGE(level3_ident_pgt)
15163 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15164+#ifdef CONFIG_XEN
15165 .fill 511,8,0
15166+#else
15167+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
15168+ .fill 510,8,0
15169+#endif
15170+
15171+NEXT_PAGE(level3_vmalloc_start_pgt)
15172+ .fill 512,8,0
15173+
15174+NEXT_PAGE(level3_vmalloc_end_pgt)
15175+ .fill 512,8,0
15176+
15177+NEXT_PAGE(level3_vmemmap_pgt)
15178+ .fill L3_VMEMMAP_START,8,0
15179+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15180
15181 NEXT_PAGE(level3_kernel_pgt)
15182 .fill L3_START_KERNEL,8,0
15183@@ -363,20 +400,23 @@ NEXT_PAGE(level3_kernel_pgt)
15184 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
15185 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15186
15187+NEXT_PAGE(level2_vmemmap_pgt)
15188+ .fill 512,8,0
15189+
15190 NEXT_PAGE(level2_fixmap_pgt)
15191- .fill 506,8,0
15192- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15193- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
15194- .fill 5,8,0
15195+ .fill 507,8,0
15196+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
15197+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
15198+ .fill 4,8,0
15199
15200-NEXT_PAGE(level1_fixmap_pgt)
15201+NEXT_PAGE(level1_vsyscall_pgt)
15202 .fill 512,8,0
15203
15204-NEXT_PAGE(level2_ident_pgt)
15205- /* Since I easily can, map the first 1G.
15206+ /* Since I easily can, map the first 2G.
15207 * Don't set NX because code runs from these pages.
15208 */
15209- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
15210+NEXT_PAGE(level2_ident_pgt)
15211+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
15212
15213 NEXT_PAGE(level2_kernel_pgt)
15214 /*
15215@@ -389,33 +429,55 @@ NEXT_PAGE(level2_kernel_pgt)
15216 * If you want to increase this then increase MODULES_VADDR
15217 * too.)
15218 */
15219- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
15220- KERNEL_IMAGE_SIZE/PMD_SIZE)
15221-
15222-NEXT_PAGE(level2_spare_pgt)
15223- .fill 512, 8, 0
15224+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
15225
15226 #undef PMDS
15227 #undef NEXT_PAGE
15228
15229- .data
15230+ .align PAGE_SIZE
15231+ENTRY(cpu_gdt_table)
15232+ .rept NR_CPUS
15233+ .quad 0x0000000000000000 /* NULL descriptor */
15234+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
15235+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
15236+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
15237+ .quad 0x00cffb000000ffff /* __USER32_CS */
15238+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
15239+ .quad 0x00affb000000ffff /* __USER_CS */
15240+
15241+#ifdef CONFIG_PAX_KERNEXEC
15242+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
15243+#else
15244+ .quad 0x0 /* unused */
15245+#endif
15246+
15247+ .quad 0,0 /* TSS */
15248+ .quad 0,0 /* LDT */
15249+ .quad 0,0,0 /* three TLS descriptors */
15250+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
15251+ /* asm/segment.h:GDT_ENTRIES must match this */
15252+
15253+ /* zero the remaining page */
15254+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
15255+ .endr
15256+
15257 .align 16
15258 .globl early_gdt_descr
15259 early_gdt_descr:
15260 .word GDT_ENTRIES*8-1
15261 early_gdt_descr_base:
15262- .quad INIT_PER_CPU_VAR(gdt_page)
15263+ .quad cpu_gdt_table
15264
15265 ENTRY(phys_base)
15266 /* This must match the first entry in level2_kernel_pgt */
15267 .quad 0x0000000000000000
15268
15269 #include "../../x86/xen/xen-head.S"
15270-
15271- .section .bss, "aw", @nobits
15272+
15273+ .section .rodata,"a",@progbits
15274 .align L1_CACHE_BYTES
15275 ENTRY(idt_table)
15276- .skip IDT_ENTRIES * 16
15277+ .fill 512,8,0
15278
15279 __PAGE_ALIGNED_BSS
15280 .align PAGE_SIZE
15281diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
15282index 9c3bd4a..e1d9b35 100644
15283--- a/arch/x86/kernel/i386_ksyms_32.c
15284+++ b/arch/x86/kernel/i386_ksyms_32.c
15285@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
15286 EXPORT_SYMBOL(cmpxchg8b_emu);
15287 #endif
15288
15289+EXPORT_SYMBOL_GPL(cpu_gdt_table);
15290+
15291 /* Networking helper routines. */
15292 EXPORT_SYMBOL(csum_partial_copy_generic);
15293+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
15294+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
15295
15296 EXPORT_SYMBOL(__get_user_1);
15297 EXPORT_SYMBOL(__get_user_2);
15298@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
15299
15300 EXPORT_SYMBOL(csum_partial);
15301 EXPORT_SYMBOL(empty_zero_page);
15302+
15303+#ifdef CONFIG_PAX_KERNEXEC
15304+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
15305+#endif
15306diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
15307index 6104852..6114160 100644
15308--- a/arch/x86/kernel/i8259.c
15309+++ b/arch/x86/kernel/i8259.c
15310@@ -210,7 +210,7 @@ spurious_8259A_irq:
15311 "spurious 8259A interrupt: IRQ%d.\n", irq);
15312 spurious_irq_mask |= irqmask;
15313 }
15314- atomic_inc(&irq_err_count);
15315+ atomic_inc_unchecked(&irq_err_count);
15316 /*
15317 * Theoretically we do not have to handle this IRQ,
15318 * but in Linux this does not cause problems and is
15319diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
15320index 43e9ccf..44ccf6f 100644
15321--- a/arch/x86/kernel/init_task.c
15322+++ b/arch/x86/kernel/init_task.c
15323@@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
15324 * way process stacks are handled. This is done by having a special
15325 * "init_task" linker map entry..
15326 */
15327-union thread_union init_thread_union __init_task_data =
15328- { INIT_THREAD_INFO(init_task) };
15329+union thread_union init_thread_union __init_task_data;
15330
15331 /*
15332 * Initial task structure.
15333@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
15334 * section. Since TSS's are completely CPU-local, we want them
15335 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
15336 */
15337-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
15338-
15339+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
15340+EXPORT_SYMBOL(init_tss);
15341diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
15342index 8c96897..be66bfa 100644
15343--- a/arch/x86/kernel/ioport.c
15344+++ b/arch/x86/kernel/ioport.c
15345@@ -6,6 +6,7 @@
15346 #include <linux/sched.h>
15347 #include <linux/kernel.h>
15348 #include <linux/capability.h>
15349+#include <linux/security.h>
15350 #include <linux/errno.h>
15351 #include <linux/types.h>
15352 #include <linux/ioport.h>
15353@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
15354
15355 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
15356 return -EINVAL;
15357+#ifdef CONFIG_GRKERNSEC_IO
15358+ if (turn_on && grsec_disable_privio) {
15359+ gr_handle_ioperm();
15360+ return -EPERM;
15361+ }
15362+#endif
15363 if (turn_on && !capable(CAP_SYS_RAWIO))
15364 return -EPERM;
15365
15366@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
15367 * because the ->io_bitmap_max value must match the bitmap
15368 * contents:
15369 */
15370- tss = &per_cpu(init_tss, get_cpu());
15371+ tss = init_tss + get_cpu();
15372
15373 if (turn_on)
15374 bitmap_clear(t->io_bitmap_ptr, from, num);
15375@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
15376 return -EINVAL;
15377 /* Trying to gain more privileges? */
15378 if (level > old) {
15379+#ifdef CONFIG_GRKERNSEC_IO
15380+ if (grsec_disable_privio) {
15381+ gr_handle_iopl();
15382+ return -EPERM;
15383+ }
15384+#endif
15385 if (!capable(CAP_SYS_RAWIO))
15386 return -EPERM;
15387 }
15388diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
15389index 429e0c9..17b3ece 100644
15390--- a/arch/x86/kernel/irq.c
15391+++ b/arch/x86/kernel/irq.c
15392@@ -18,7 +18,7 @@
15393 #include <asm/mce.h>
15394 #include <asm/hw_irq.h>
15395
15396-atomic_t irq_err_count;
15397+atomic_unchecked_t irq_err_count;
15398
15399 /* Function pointer for generic interrupt vector handling */
15400 void (*x86_platform_ipi_callback)(void) = NULL;
15401@@ -117,9 +117,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
15402 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
15403 seq_printf(p, " Machine check polls\n");
15404 #endif
15405- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
15406+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
15407 #if defined(CONFIG_X86_IO_APIC)
15408- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
15409+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
15410 #endif
15411 return 0;
15412 }
15413@@ -159,10 +159,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
15414
15415 u64 arch_irq_stat(void)
15416 {
15417- u64 sum = atomic_read(&irq_err_count);
15418+ u64 sum = atomic_read_unchecked(&irq_err_count);
15419
15420 #ifdef CONFIG_X86_IO_APIC
15421- sum += atomic_read(&irq_mis_count);
15422+ sum += atomic_read_unchecked(&irq_mis_count);
15423 #endif
15424 return sum;
15425 }
15426diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
15427index 7209070..cbcd71a 100644
15428--- a/arch/x86/kernel/irq_32.c
15429+++ b/arch/x86/kernel/irq_32.c
15430@@ -36,7 +36,7 @@ static int check_stack_overflow(void)
15431 __asm__ __volatile__("andl %%esp,%0" :
15432 "=r" (sp) : "0" (THREAD_SIZE - 1));
15433
15434- return sp < (sizeof(struct thread_info) + STACK_WARN);
15435+ return sp < STACK_WARN;
15436 }
15437
15438 static void print_stack_overflow(void)
15439@@ -54,8 +54,8 @@ static inline void print_stack_overflow(void) { }
15440 * per-CPU IRQ handling contexts (thread information and stack)
15441 */
15442 union irq_ctx {
15443- struct thread_info tinfo;
15444- u32 stack[THREAD_SIZE/sizeof(u32)];
15445+ unsigned long previous_esp;
15446+ u32 stack[THREAD_SIZE/sizeof(u32)];
15447 } __attribute__((aligned(THREAD_SIZE)));
15448
15449 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
15450@@ -75,10 +75,9 @@ static void call_on_stack(void *func, void *stack)
15451 static inline int
15452 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15453 {
15454- union irq_ctx *curctx, *irqctx;
15455+ union irq_ctx *irqctx;
15456 u32 *isp, arg1, arg2;
15457
15458- curctx = (union irq_ctx *) current_thread_info();
15459 irqctx = __this_cpu_read(hardirq_ctx);
15460
15461 /*
15462@@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15463 * handler) we can't do that and just have to keep using the
15464 * current stack (which is the irq stack already after all)
15465 */
15466- if (unlikely(curctx == irqctx))
15467+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
15468 return 0;
15469
15470 /* build the stack frame on the IRQ stack */
15471- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15472- irqctx->tinfo.task = curctx->tinfo.task;
15473- irqctx->tinfo.previous_esp = current_stack_pointer;
15474+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15475+ irqctx->previous_esp = current_stack_pointer;
15476
15477- /*
15478- * Copy the softirq bits in preempt_count so that the
15479- * softirq checks work in the hardirq context.
15480- */
15481- irqctx->tinfo.preempt_count =
15482- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
15483- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
15484+#ifdef CONFIG_PAX_MEMORY_UDEREF
15485+ __set_fs(MAKE_MM_SEG(0));
15486+#endif
15487
15488 if (unlikely(overflow))
15489 call_on_stack(print_stack_overflow, isp);
15490@@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15491 : "0" (irq), "1" (desc), "2" (isp),
15492 "D" (desc->handle_irq)
15493 : "memory", "cc", "ecx");
15494+
15495+#ifdef CONFIG_PAX_MEMORY_UDEREF
15496+ __set_fs(current_thread_info()->addr_limit);
15497+#endif
15498+
15499 return 1;
15500 }
15501
15502@@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15503 */
15504 void __cpuinit irq_ctx_init(int cpu)
15505 {
15506- union irq_ctx *irqctx;
15507-
15508 if (per_cpu(hardirq_ctx, cpu))
15509 return;
15510
15511- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
15512- THREAD_FLAGS,
15513- THREAD_ORDER));
15514- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
15515- irqctx->tinfo.cpu = cpu;
15516- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
15517- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15518-
15519- per_cpu(hardirq_ctx, cpu) = irqctx;
15520-
15521- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
15522- THREAD_FLAGS,
15523- THREAD_ORDER));
15524- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
15525- irqctx->tinfo.cpu = cpu;
15526- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15527-
15528- per_cpu(softirq_ctx, cpu) = irqctx;
15529+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
15530+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
15531
15532 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
15533 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
15534@@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
15535 asmlinkage void do_softirq(void)
15536 {
15537 unsigned long flags;
15538- struct thread_info *curctx;
15539 union irq_ctx *irqctx;
15540 u32 *isp;
15541
15542@@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
15543 local_irq_save(flags);
15544
15545 if (local_softirq_pending()) {
15546- curctx = current_thread_info();
15547 irqctx = __this_cpu_read(softirq_ctx);
15548- irqctx->tinfo.task = curctx->task;
15549- irqctx->tinfo.previous_esp = current_stack_pointer;
15550+ irqctx->previous_esp = current_stack_pointer;
15551
15552 /* build the stack frame on the softirq stack */
15553- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15554+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15555+
15556+#ifdef CONFIG_PAX_MEMORY_UDEREF
15557+ __set_fs(MAKE_MM_SEG(0));
15558+#endif
15559
15560 call_on_stack(__do_softirq, isp);
15561+
15562+#ifdef CONFIG_PAX_MEMORY_UDEREF
15563+ __set_fs(current_thread_info()->addr_limit);
15564+#endif
15565+
15566 /*
15567 * Shouldn't happen, we returned above if in_interrupt():
15568 */
15569diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
15570index 69bca46..0bac999 100644
15571--- a/arch/x86/kernel/irq_64.c
15572+++ b/arch/x86/kernel/irq_64.c
15573@@ -38,7 +38,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
15574 #ifdef CONFIG_DEBUG_STACKOVERFLOW
15575 u64 curbase = (u64)task_stack_page(current);
15576
15577- if (user_mode_vm(regs))
15578+ if (user_mode(regs))
15579 return;
15580
15581 WARN_ONCE(regs->sp >= curbase &&
15582diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
15583index faba577..93b9e71 100644
15584--- a/arch/x86/kernel/kgdb.c
15585+++ b/arch/x86/kernel/kgdb.c
15586@@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
15587 #ifdef CONFIG_X86_32
15588 switch (regno) {
15589 case GDB_SS:
15590- if (!user_mode_vm(regs))
15591+ if (!user_mode(regs))
15592 *(unsigned long *)mem = __KERNEL_DS;
15593 break;
15594 case GDB_SP:
15595- if (!user_mode_vm(regs))
15596+ if (!user_mode(regs))
15597 *(unsigned long *)mem = kernel_stack_pointer(regs);
15598 break;
15599 case GDB_GS:
15600@@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
15601 case 'k':
15602 /* clear the trace bit */
15603 linux_regs->flags &= ~X86_EFLAGS_TF;
15604- atomic_set(&kgdb_cpu_doing_single_step, -1);
15605+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
15606
15607 /* set the trace bit if we're stepping */
15608 if (remcomInBuffer[0] == 's') {
15609 linux_regs->flags |= X86_EFLAGS_TF;
15610- atomic_set(&kgdb_cpu_doing_single_step,
15611+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
15612 raw_smp_processor_id());
15613 }
15614
15615@@ -543,7 +543,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
15616
15617 switch (cmd) {
15618 case DIE_DEBUG:
15619- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
15620+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
15621 if (user_mode(regs))
15622 return single_step_cont(regs, args);
15623 break;
15624diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
15625index 7da647d..5d3c4c1 100644
15626--- a/arch/x86/kernel/kprobes.c
15627+++ b/arch/x86/kernel/kprobes.c
15628@@ -118,8 +118,11 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
15629 } __attribute__((packed)) *insn;
15630
15631 insn = (struct __arch_relative_insn *)from;
15632+
15633+ pax_open_kernel();
15634 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
15635 insn->op = op;
15636+ pax_close_kernel();
15637 }
15638
15639 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
15640@@ -156,7 +159,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
15641 kprobe_opcode_t opcode;
15642 kprobe_opcode_t *orig_opcodes = opcodes;
15643
15644- if (search_exception_tables((unsigned long)opcodes))
15645+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
15646 return 0; /* Page fault may occur on this address. */
15647
15648 retry:
15649@@ -317,7 +320,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
15650 }
15651 }
15652 insn_get_length(&insn);
15653+ pax_open_kernel();
15654 memcpy(dest, insn.kaddr, insn.length);
15655+ pax_close_kernel();
15656
15657 #ifdef CONFIG_X86_64
15658 if (insn_rip_relative(&insn)) {
15659@@ -341,7 +346,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
15660 (u8 *) dest;
15661 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
15662 disp = (u8 *) dest + insn_offset_displacement(&insn);
15663+ pax_open_kernel();
15664 *(s32 *) disp = (s32) newdisp;
15665+ pax_close_kernel();
15666 }
15667 #endif
15668 return insn.length;
15669@@ -355,12 +362,12 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
15670 */
15671 __copy_instruction(p->ainsn.insn, p->addr, 0);
15672
15673- if (can_boost(p->addr))
15674+ if (can_boost(ktla_ktva(p->addr)))
15675 p->ainsn.boostable = 0;
15676 else
15677 p->ainsn.boostable = -1;
15678
15679- p->opcode = *p->addr;
15680+ p->opcode = *(ktla_ktva(p->addr));
15681 }
15682
15683 int __kprobes arch_prepare_kprobe(struct kprobe *p)
15684@@ -477,7 +484,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
15685 * nor set current_kprobe, because it doesn't use single
15686 * stepping.
15687 */
15688- regs->ip = (unsigned long)p->ainsn.insn;
15689+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15690 preempt_enable_no_resched();
15691 return;
15692 }
15693@@ -496,7 +503,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
15694 if (p->opcode == BREAKPOINT_INSTRUCTION)
15695 regs->ip = (unsigned long)p->addr;
15696 else
15697- regs->ip = (unsigned long)p->ainsn.insn;
15698+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15699 }
15700
15701 /*
15702@@ -575,7 +582,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
15703 setup_singlestep(p, regs, kcb, 0);
15704 return 1;
15705 }
15706- } else if (*addr != BREAKPOINT_INSTRUCTION) {
15707+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
15708 /*
15709 * The breakpoint instruction was removed right
15710 * after we hit it. Another cpu has removed
15711@@ -683,6 +690,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
15712 " movq %rax, 152(%rsp)\n"
15713 RESTORE_REGS_STRING
15714 " popfq\n"
15715+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
15716+ " btsq $63,(%rsp)\n"
15717+#endif
15718 #else
15719 " pushf\n"
15720 SAVE_REGS_STRING
15721@@ -820,7 +830,7 @@ static void __kprobes resume_execution(struct kprobe *p,
15722 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
15723 {
15724 unsigned long *tos = stack_addr(regs);
15725- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
15726+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
15727 unsigned long orig_ip = (unsigned long)p->addr;
15728 kprobe_opcode_t *insn = p->ainsn.insn;
15729
15730@@ -1002,7 +1012,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
15731 struct die_args *args = data;
15732 int ret = NOTIFY_DONE;
15733
15734- if (args->regs && user_mode_vm(args->regs))
15735+ if (args->regs && user_mode(args->regs))
15736 return ret;
15737
15738 switch (val) {
15739@@ -1384,7 +1394,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
15740 * Verify if the address gap is in 2GB range, because this uses
15741 * a relative jump.
15742 */
15743- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
15744+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
15745 if (abs(rel) > 0x7fffffff)
15746 return -ERANGE;
15747
15748@@ -1405,11 +1415,11 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
15749 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
15750
15751 /* Set probe function call */
15752- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
15753+ synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
15754
15755 /* Set returning jmp instruction at the tail of out-of-line buffer */
15756 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
15757- (u8 *)op->kp.addr + op->optinsn.size);
15758+ (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
15759
15760 flush_icache_range((unsigned long) buf,
15761 (unsigned long) buf + TMPL_END_IDX +
15762@@ -1431,7 +1441,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
15763 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
15764
15765 /* Backup instructions which will be replaced by jump address */
15766- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
15767+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
15768 RELATIVE_ADDR_SIZE);
15769
15770 insn_buf[0] = RELATIVEJUMP_OPCODE;
15771diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
15772index a9c2116..a52d4fc 100644
15773--- a/arch/x86/kernel/kvm.c
15774+++ b/arch/x86/kernel/kvm.c
15775@@ -437,6 +437,7 @@ static void __init paravirt_ops_setup(void)
15776 pv_mmu_ops.set_pud = kvm_set_pud;
15777 #if PAGETABLE_LEVELS == 4
15778 pv_mmu_ops.set_pgd = kvm_set_pgd;
15779+ pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
15780 #endif
15781 #endif
15782 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
15783diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
15784index ea69726..604d066 100644
15785--- a/arch/x86/kernel/ldt.c
15786+++ b/arch/x86/kernel/ldt.c
15787@@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
15788 if (reload) {
15789 #ifdef CONFIG_SMP
15790 preempt_disable();
15791- load_LDT(pc);
15792+ load_LDT_nolock(pc);
15793 if (!cpumask_equal(mm_cpumask(current->mm),
15794 cpumask_of(smp_processor_id())))
15795 smp_call_function(flush_ldt, current->mm, 1);
15796 preempt_enable();
15797 #else
15798- load_LDT(pc);
15799+ load_LDT_nolock(pc);
15800 #endif
15801 }
15802 if (oldsize) {
15803@@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
15804 return err;
15805
15806 for (i = 0; i < old->size; i++)
15807- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
15808+ write_ldt_entry(new->ldt, i, old->ldt + i);
15809 return 0;
15810 }
15811
15812@@ -116,6 +116,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
15813 retval = copy_ldt(&mm->context, &old_mm->context);
15814 mutex_unlock(&old_mm->context.lock);
15815 }
15816+
15817+ if (tsk == current) {
15818+ mm->context.vdso = 0;
15819+
15820+#ifdef CONFIG_X86_32
15821+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
15822+ mm->context.user_cs_base = 0UL;
15823+ mm->context.user_cs_limit = ~0UL;
15824+
15825+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
15826+ cpus_clear(mm->context.cpu_user_cs_mask);
15827+#endif
15828+
15829+#endif
15830+#endif
15831+
15832+ }
15833+
15834 return retval;
15835 }
15836
15837@@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
15838 }
15839 }
15840
15841+#ifdef CONFIG_PAX_SEGMEXEC
15842+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
15843+ error = -EINVAL;
15844+ goto out_unlock;
15845+ }
15846+#endif
15847+
15848 fill_ldt(&ldt, &ldt_info);
15849 if (oldmode)
15850 ldt.avl = 0;
15851diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
15852index a3fa43b..8966f4c 100644
15853--- a/arch/x86/kernel/machine_kexec_32.c
15854+++ b/arch/x86/kernel/machine_kexec_32.c
15855@@ -27,7 +27,7 @@
15856 #include <asm/cacheflush.h>
15857 #include <asm/debugreg.h>
15858
15859-static void set_idt(void *newidt, __u16 limit)
15860+static void set_idt(struct desc_struct *newidt, __u16 limit)
15861 {
15862 struct desc_ptr curidt;
15863
15864@@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16 limit)
15865 }
15866
15867
15868-static void set_gdt(void *newgdt, __u16 limit)
15869+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
15870 {
15871 struct desc_ptr curgdt;
15872
15873@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
15874 }
15875
15876 control_page = page_address(image->control_code_page);
15877- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
15878+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
15879
15880 relocate_kernel_ptr = control_page;
15881 page_list[PA_CONTROL_PAGE] = __pa(control_page);
15882diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
15883index 3ca42d0..7cff8cc 100644
15884--- a/arch/x86/kernel/microcode_intel.c
15885+++ b/arch/x86/kernel/microcode_intel.c
15886@@ -436,13 +436,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
15887
15888 static int get_ucode_user(void *to, const void *from, size_t n)
15889 {
15890- return copy_from_user(to, from, n);
15891+ return copy_from_user(to, (const void __force_user *)from, n);
15892 }
15893
15894 static enum ucode_state
15895 request_microcode_user(int cpu, const void __user *buf, size_t size)
15896 {
15897- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
15898+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
15899 }
15900
15901 static void microcode_fini_cpu(int cpu)
15902diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
15903index 925179f..267ac7a 100644
15904--- a/arch/x86/kernel/module.c
15905+++ b/arch/x86/kernel/module.c
15906@@ -36,15 +36,60 @@
15907 #define DEBUGP(fmt...)
15908 #endif
15909
15910-void *module_alloc(unsigned long size)
15911+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
15912 {
15913- if (PAGE_ALIGN(size) > MODULES_LEN)
15914+ if (size == 0 || PAGE_ALIGN(size) > MODULES_LEN)
15915 return NULL;
15916 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
15917- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
15918+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
15919 -1, __builtin_return_address(0));
15920 }
15921
15922+void *module_alloc(unsigned long size)
15923+{
15924+
15925+#ifdef CONFIG_PAX_KERNEXEC
15926+ return __module_alloc(size, PAGE_KERNEL);
15927+#else
15928+ return __module_alloc(size, PAGE_KERNEL_EXEC);
15929+#endif
15930+
15931+}
15932+
15933+#ifdef CONFIG_PAX_KERNEXEC
15934+#ifdef CONFIG_X86_32
15935+void *module_alloc_exec(unsigned long size)
15936+{
15937+ struct vm_struct *area;
15938+
15939+ if (size == 0)
15940+ return NULL;
15941+
15942+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
15943+ return area ? area->addr : NULL;
15944+}
15945+EXPORT_SYMBOL(module_alloc_exec);
15946+
15947+void module_free_exec(struct module *mod, void *module_region)
15948+{
15949+ vunmap(module_region);
15950+}
15951+EXPORT_SYMBOL(module_free_exec);
15952+#else
15953+void module_free_exec(struct module *mod, void *module_region)
15954+{
15955+ module_free(mod, module_region);
15956+}
15957+EXPORT_SYMBOL(module_free_exec);
15958+
15959+void *module_alloc_exec(unsigned long size)
15960+{
15961+ return __module_alloc(size, PAGE_KERNEL_RX);
15962+}
15963+EXPORT_SYMBOL(module_alloc_exec);
15964+#endif
15965+#endif
15966+
15967 #ifdef CONFIG_X86_32
15968 int apply_relocate(Elf32_Shdr *sechdrs,
15969 const char *strtab,
15970@@ -55,14 +100,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15971 unsigned int i;
15972 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
15973 Elf32_Sym *sym;
15974- uint32_t *location;
15975+ uint32_t *plocation, location;
15976
15977 DEBUGP("Applying relocate section %u to %u\n", relsec,
15978 sechdrs[relsec].sh_info);
15979 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
15980 /* This is where to make the change */
15981- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
15982- + rel[i].r_offset;
15983+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
15984+ location = (uint32_t)plocation;
15985+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
15986+ plocation = ktla_ktva((void *)plocation);
15987 /* This is the symbol it is referring to. Note that all
15988 undefined symbols have been resolved. */
15989 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
15990@@ -71,11 +118,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15991 switch (ELF32_R_TYPE(rel[i].r_info)) {
15992 case R_386_32:
15993 /* We add the value into the location given */
15994- *location += sym->st_value;
15995+ pax_open_kernel();
15996+ *plocation += sym->st_value;
15997+ pax_close_kernel();
15998 break;
15999 case R_386_PC32:
16000 /* Add the value, subtract its postition */
16001- *location += sym->st_value - (uint32_t)location;
16002+ pax_open_kernel();
16003+ *plocation += sym->st_value - location;
16004+ pax_close_kernel();
16005 break;
16006 default:
16007 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
16008@@ -120,21 +171,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
16009 case R_X86_64_NONE:
16010 break;
16011 case R_X86_64_64:
16012+ pax_open_kernel();
16013 *(u64 *)loc = val;
16014+ pax_close_kernel();
16015 break;
16016 case R_X86_64_32:
16017+ pax_open_kernel();
16018 *(u32 *)loc = val;
16019+ pax_close_kernel();
16020 if (val != *(u32 *)loc)
16021 goto overflow;
16022 break;
16023 case R_X86_64_32S:
16024+ pax_open_kernel();
16025 *(s32 *)loc = val;
16026+ pax_close_kernel();
16027 if ((s64)val != *(s32 *)loc)
16028 goto overflow;
16029 break;
16030 case R_X86_64_PC32:
16031 val -= (u64)loc;
16032+ pax_open_kernel();
16033 *(u32 *)loc = val;
16034+ pax_close_kernel();
16035+
16036 #if 0
16037 if ((s64)val != *(s32 *)loc)
16038 goto overflow;
16039diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
16040index e88f37b..1353db6 100644
16041--- a/arch/x86/kernel/nmi.c
16042+++ b/arch/x86/kernel/nmi.c
16043@@ -408,6 +408,17 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
16044 dotraplinkage notrace __kprobes void
16045 do_nmi(struct pt_regs *regs, long error_code)
16046 {
16047+
16048+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16049+ if (!user_mode(regs)) {
16050+ unsigned long cs = regs->cs & 0xFFFF;
16051+ unsigned long ip = ktva_ktla(regs->ip);
16052+
16053+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
16054+ regs->ip = ip;
16055+ }
16056+#endif
16057+
16058 nmi_enter();
16059
16060 inc_irq_stat(__nmi_count);
16061diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
16062index 676b8c7..870ba04 100644
16063--- a/arch/x86/kernel/paravirt-spinlocks.c
16064+++ b/arch/x86/kernel/paravirt-spinlocks.c
16065@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
16066 arch_spin_lock(lock);
16067 }
16068
16069-struct pv_lock_ops pv_lock_ops = {
16070+struct pv_lock_ops pv_lock_ops __read_only = {
16071 #ifdef CONFIG_SMP
16072 .spin_is_locked = __ticket_spin_is_locked,
16073 .spin_is_contended = __ticket_spin_is_contended,
16074diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
16075index d90272e..6bb013b 100644
16076--- a/arch/x86/kernel/paravirt.c
16077+++ b/arch/x86/kernel/paravirt.c
16078@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
16079 {
16080 return x;
16081 }
16082+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
16083+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
16084+#endif
16085
16086 void __init default_banner(void)
16087 {
16088@@ -145,15 +148,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
16089 if (opfunc == NULL)
16090 /* If there's no function, patch it with a ud2a (BUG) */
16091 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
16092- else if (opfunc == _paravirt_nop)
16093+ else if (opfunc == (void *)_paravirt_nop)
16094 /* If the operation is a nop, then nop the callsite */
16095 ret = paravirt_patch_nop();
16096
16097 /* identity functions just return their single argument */
16098- else if (opfunc == _paravirt_ident_32)
16099+ else if (opfunc == (void *)_paravirt_ident_32)
16100 ret = paravirt_patch_ident_32(insnbuf, len);
16101- else if (opfunc == _paravirt_ident_64)
16102+ else if (opfunc == (void *)_paravirt_ident_64)
16103 ret = paravirt_patch_ident_64(insnbuf, len);
16104+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
16105+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
16106+ ret = paravirt_patch_ident_64(insnbuf, len);
16107+#endif
16108
16109 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
16110 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
16111@@ -178,7 +185,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
16112 if (insn_len > len || start == NULL)
16113 insn_len = len;
16114 else
16115- memcpy(insnbuf, start, insn_len);
16116+ memcpy(insnbuf, ktla_ktva(start), insn_len);
16117
16118 return insn_len;
16119 }
16120@@ -302,7 +309,7 @@ void arch_flush_lazy_mmu_mode(void)
16121 preempt_enable();
16122 }
16123
16124-struct pv_info pv_info = {
16125+struct pv_info pv_info __read_only = {
16126 .name = "bare hardware",
16127 .paravirt_enabled = 0,
16128 .kernel_rpl = 0,
16129@@ -313,16 +320,16 @@ struct pv_info pv_info = {
16130 #endif
16131 };
16132
16133-struct pv_init_ops pv_init_ops = {
16134+struct pv_init_ops pv_init_ops __read_only = {
16135 .patch = native_patch,
16136 };
16137
16138-struct pv_time_ops pv_time_ops = {
16139+struct pv_time_ops pv_time_ops __read_only = {
16140 .sched_clock = native_sched_clock,
16141 .steal_clock = native_steal_clock,
16142 };
16143
16144-struct pv_irq_ops pv_irq_ops = {
16145+struct pv_irq_ops pv_irq_ops __read_only = {
16146 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
16147 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
16148 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
16149@@ -334,7 +341,7 @@ struct pv_irq_ops pv_irq_ops = {
16150 #endif
16151 };
16152
16153-struct pv_cpu_ops pv_cpu_ops = {
16154+struct pv_cpu_ops pv_cpu_ops __read_only = {
16155 .cpuid = native_cpuid,
16156 .get_debugreg = native_get_debugreg,
16157 .set_debugreg = native_set_debugreg,
16158@@ -395,21 +402,26 @@ struct pv_cpu_ops pv_cpu_ops = {
16159 .end_context_switch = paravirt_nop,
16160 };
16161
16162-struct pv_apic_ops pv_apic_ops = {
16163+struct pv_apic_ops pv_apic_ops __read_only = {
16164 #ifdef CONFIG_X86_LOCAL_APIC
16165 .startup_ipi_hook = paravirt_nop,
16166 #endif
16167 };
16168
16169-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
16170+#ifdef CONFIG_X86_32
16171+#ifdef CONFIG_X86_PAE
16172+/* 64-bit pagetable entries */
16173+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
16174+#else
16175 /* 32-bit pagetable entries */
16176 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
16177+#endif
16178 #else
16179 /* 64-bit pagetable entries */
16180 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
16181 #endif
16182
16183-struct pv_mmu_ops pv_mmu_ops = {
16184+struct pv_mmu_ops pv_mmu_ops __read_only = {
16185
16186 .read_cr2 = native_read_cr2,
16187 .write_cr2 = native_write_cr2,
16188@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
16189 .make_pud = PTE_IDENT,
16190
16191 .set_pgd = native_set_pgd,
16192+ .set_pgd_batched = native_set_pgd_batched,
16193 #endif
16194 #endif /* PAGETABLE_LEVELS >= 3 */
16195
16196@@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = {
16197 },
16198
16199 .set_fixmap = native_set_fixmap,
16200+
16201+#ifdef CONFIG_PAX_KERNEXEC
16202+ .pax_open_kernel = native_pax_open_kernel,
16203+ .pax_close_kernel = native_pax_close_kernel,
16204+#endif
16205+
16206 };
16207
16208 EXPORT_SYMBOL_GPL(pv_time_ops);
16209diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
16210index 35ccf75..7a15747 100644
16211--- a/arch/x86/kernel/pci-iommu_table.c
16212+++ b/arch/x86/kernel/pci-iommu_table.c
16213@@ -2,7 +2,7 @@
16214 #include <asm/iommu_table.h>
16215 #include <linux/string.h>
16216 #include <linux/kallsyms.h>
16217-
16218+#include <linux/sched.h>
16219
16220 #define DEBUG 1
16221
16222diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
16223index ee5d4fb..426649b 100644
16224--- a/arch/x86/kernel/process.c
16225+++ b/arch/x86/kernel/process.c
16226@@ -48,16 +48,33 @@ void free_thread_xstate(struct task_struct *tsk)
16227
16228 void free_thread_info(struct thread_info *ti)
16229 {
16230- free_thread_xstate(ti->task);
16231 free_pages((unsigned long)ti, THREAD_ORDER);
16232 }
16233
16234+static struct kmem_cache *task_struct_cachep;
16235+
16236 void arch_task_cache_init(void)
16237 {
16238- task_xstate_cachep =
16239- kmem_cache_create("task_xstate", xstate_size,
16240+ /* create a slab on which task_structs can be allocated */
16241+ task_struct_cachep =
16242+ kmem_cache_create("task_struct", sizeof(struct task_struct),
16243+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
16244+
16245+ task_xstate_cachep =
16246+ kmem_cache_create("task_xstate", xstate_size,
16247 __alignof__(union thread_xstate),
16248- SLAB_PANIC | SLAB_NOTRACK, NULL);
16249+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
16250+}
16251+
16252+struct task_struct *alloc_task_struct_node(int node)
16253+{
16254+ return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
16255+}
16256+
16257+void free_task_struct(struct task_struct *task)
16258+{
16259+ free_thread_xstate(task);
16260+ kmem_cache_free(task_struct_cachep, task);
16261 }
16262
16263 /*
16264@@ -70,7 +87,7 @@ void exit_thread(void)
16265 unsigned long *bp = t->io_bitmap_ptr;
16266
16267 if (bp) {
16268- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
16269+ struct tss_struct *tss = init_tss + get_cpu();
16270
16271 t->io_bitmap_ptr = NULL;
16272 clear_thread_flag(TIF_IO_BITMAP);
16273@@ -106,7 +123,7 @@ void show_regs_common(void)
16274
16275 printk(KERN_CONT "\n");
16276 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
16277- current->pid, current->comm, print_tainted(),
16278+ task_pid_nr(current), current->comm, print_tainted(),
16279 init_utsname()->release,
16280 (int)strcspn(init_utsname()->version, " "),
16281 init_utsname()->version);
16282@@ -120,6 +137,9 @@ void flush_thread(void)
16283 {
16284 struct task_struct *tsk = current;
16285
16286+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
16287+ loadsegment(gs, 0);
16288+#endif
16289 flush_ptrace_hw_breakpoint(tsk);
16290 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
16291 /*
16292@@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
16293 regs.di = (unsigned long) arg;
16294
16295 #ifdef CONFIG_X86_32
16296- regs.ds = __USER_DS;
16297- regs.es = __USER_DS;
16298+ regs.ds = __KERNEL_DS;
16299+ regs.es = __KERNEL_DS;
16300 regs.fs = __KERNEL_PERCPU;
16301- regs.gs = __KERNEL_STACK_CANARY;
16302+ savesegment(gs, regs.gs);
16303 #else
16304 regs.ss = __KERNEL_DS;
16305 #endif
16306@@ -411,7 +431,7 @@ bool set_pm_idle_to_default(void)
16307
16308 return ret;
16309 }
16310-void stop_this_cpu(void *dummy)
16311+__noreturn void stop_this_cpu(void *dummy)
16312 {
16313 local_irq_disable();
16314 /*
16315@@ -653,16 +673,37 @@ static int __init idle_setup(char *str)
16316 }
16317 early_param("idle", idle_setup);
16318
16319-unsigned long arch_align_stack(unsigned long sp)
16320+#ifdef CONFIG_PAX_RANDKSTACK
16321+void pax_randomize_kstack(struct pt_regs *regs)
16322 {
16323- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
16324- sp -= get_random_int() % 8192;
16325- return sp & ~0xf;
16326-}
16327+ struct thread_struct *thread = &current->thread;
16328+ unsigned long time;
16329
16330-unsigned long arch_randomize_brk(struct mm_struct *mm)
16331-{
16332- unsigned long range_end = mm->brk + 0x02000000;
16333- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
16334-}
16335+ if (!randomize_va_space)
16336+ return;
16337+
16338+ if (v8086_mode(regs))
16339+ return;
16340
16341+ rdtscl(time);
16342+
16343+ /* P4 seems to return a 0 LSB, ignore it */
16344+#ifdef CONFIG_MPENTIUM4
16345+ time &= 0x3EUL;
16346+ time <<= 2;
16347+#elif defined(CONFIG_X86_64)
16348+ time &= 0xFUL;
16349+ time <<= 4;
16350+#else
16351+ time &= 0x1FUL;
16352+ time <<= 3;
16353+#endif
16354+
16355+ thread->sp0 ^= time;
16356+ load_sp0(init_tss + smp_processor_id(), thread);
16357+
16358+#ifdef CONFIG_X86_64
16359+ percpu_write(kernel_stack, thread->sp0);
16360+#endif
16361+}
16362+#endif
16363diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
16364index 795b79f..063767a 100644
16365--- a/arch/x86/kernel/process_32.c
16366+++ b/arch/x86/kernel/process_32.c
16367@@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
16368 unsigned long thread_saved_pc(struct task_struct *tsk)
16369 {
16370 return ((unsigned long *)tsk->thread.sp)[3];
16371+//XXX return tsk->thread.eip;
16372 }
16373
16374 #ifndef CONFIG_SMP
16375@@ -130,15 +131,14 @@ void __show_regs(struct pt_regs *regs, int all)
16376 unsigned long sp;
16377 unsigned short ss, gs;
16378
16379- if (user_mode_vm(regs)) {
16380+ if (user_mode(regs)) {
16381 sp = regs->sp;
16382 ss = regs->ss & 0xffff;
16383- gs = get_user_gs(regs);
16384 } else {
16385 sp = kernel_stack_pointer(regs);
16386 savesegment(ss, ss);
16387- savesegment(gs, gs);
16388 }
16389+ gs = get_user_gs(regs);
16390
16391 show_regs_common();
16392
16393@@ -200,13 +200,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16394 struct task_struct *tsk;
16395 int err;
16396
16397- childregs = task_pt_regs(p);
16398+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
16399 *childregs = *regs;
16400 childregs->ax = 0;
16401 childregs->sp = sp;
16402
16403 p->thread.sp = (unsigned long) childregs;
16404 p->thread.sp0 = (unsigned long) (childregs+1);
16405+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16406
16407 p->thread.ip = (unsigned long) ret_from_fork;
16408
16409@@ -296,7 +297,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16410 struct thread_struct *prev = &prev_p->thread,
16411 *next = &next_p->thread;
16412 int cpu = smp_processor_id();
16413- struct tss_struct *tss = &per_cpu(init_tss, cpu);
16414+ struct tss_struct *tss = init_tss + cpu;
16415 bool preload_fpu;
16416
16417 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
16418@@ -331,6 +332,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16419 */
16420 lazy_save_gs(prev->gs);
16421
16422+#ifdef CONFIG_PAX_MEMORY_UDEREF
16423+ __set_fs(task_thread_info(next_p)->addr_limit);
16424+#endif
16425+
16426 /*
16427 * Load the per-thread Thread-Local Storage descriptor.
16428 */
16429@@ -366,6 +371,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16430 */
16431 arch_end_context_switch(next_p);
16432
16433+ percpu_write(current_task, next_p);
16434+ percpu_write(current_tinfo, &next_p->tinfo);
16435+
16436 if (preload_fpu)
16437 __math_state_restore();
16438
16439@@ -375,8 +383,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16440 if (prev->gs | next->gs)
16441 lazy_load_gs(next->gs);
16442
16443- percpu_write(current_task, next_p);
16444-
16445 return prev_p;
16446 }
16447
16448@@ -406,4 +412,3 @@ unsigned long get_wchan(struct task_struct *p)
16449 } while (count++ < 16);
16450 return 0;
16451 }
16452-
16453diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
16454index 3bd7e6e..90b2bcf 100644
16455--- a/arch/x86/kernel/process_64.c
16456+++ b/arch/x86/kernel/process_64.c
16457@@ -89,7 +89,7 @@ static void __exit_idle(void)
16458 void exit_idle(void)
16459 {
16460 /* idle loop has pid 0 */
16461- if (current->pid)
16462+ if (task_pid_nr(current))
16463 return;
16464 __exit_idle();
16465 }
16466@@ -264,8 +264,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16467 struct pt_regs *childregs;
16468 struct task_struct *me = current;
16469
16470- childregs = ((struct pt_regs *)
16471- (THREAD_SIZE + task_stack_page(p))) - 1;
16472+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
16473 *childregs = *regs;
16474
16475 childregs->ax = 0;
16476@@ -277,6 +276,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16477 p->thread.sp = (unsigned long) childregs;
16478 p->thread.sp0 = (unsigned long) (childregs+1);
16479 p->thread.usersp = me->thread.usersp;
16480+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16481
16482 set_tsk_thread_flag(p, TIF_FORK);
16483
16484@@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16485 struct thread_struct *prev = &prev_p->thread;
16486 struct thread_struct *next = &next_p->thread;
16487 int cpu = smp_processor_id();
16488- struct tss_struct *tss = &per_cpu(init_tss, cpu);
16489+ struct tss_struct *tss = init_tss + cpu;
16490 unsigned fsindex, gsindex;
16491 bool preload_fpu;
16492
16493@@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16494 prev->usersp = percpu_read(old_rsp);
16495 percpu_write(old_rsp, next->usersp);
16496 percpu_write(current_task, next_p);
16497+ percpu_write(current_tinfo, &next_p->tinfo);
16498
16499- percpu_write(kernel_stack,
16500- (unsigned long)task_stack_page(next_p) +
16501- THREAD_SIZE - KERNEL_STACK_OFFSET);
16502+ percpu_write(kernel_stack, next->sp0);
16503
16504 /*
16505 * Now maybe reload the debug registers and handle I/O bitmaps
16506@@ -540,12 +539,11 @@ unsigned long get_wchan(struct task_struct *p)
16507 if (!p || p == current || p->state == TASK_RUNNING)
16508 return 0;
16509 stack = (unsigned long)task_stack_page(p);
16510- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
16511+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
16512 return 0;
16513 fp = *(u64 *)(p->thread.sp);
16514 do {
16515- if (fp < (unsigned long)stack ||
16516- fp >= (unsigned long)stack+THREAD_SIZE)
16517+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
16518 return 0;
16519 ip = *(u64 *)(fp+8);
16520 if (!in_sched_functions(ip))
16521diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
16522index 8252879..d3219e0 100644
16523--- a/arch/x86/kernel/ptrace.c
16524+++ b/arch/x86/kernel/ptrace.c
16525@@ -822,7 +822,7 @@ long arch_ptrace(struct task_struct *child, long request,
16526 unsigned long addr, unsigned long data)
16527 {
16528 int ret;
16529- unsigned long __user *datap = (unsigned long __user *)data;
16530+ unsigned long __user *datap = (__force unsigned long __user *)data;
16531
16532 switch (request) {
16533 /* read the word at location addr in the USER area. */
16534@@ -907,14 +907,14 @@ long arch_ptrace(struct task_struct *child, long request,
16535 if ((int) addr < 0)
16536 return -EIO;
16537 ret = do_get_thread_area(child, addr,
16538- (struct user_desc __user *)data);
16539+ (__force struct user_desc __user *) data);
16540 break;
16541
16542 case PTRACE_SET_THREAD_AREA:
16543 if ((int) addr < 0)
16544 return -EIO;
16545 ret = do_set_thread_area(child, addr,
16546- (struct user_desc __user *)data, 0);
16547+ (__force struct user_desc __user *) data, 0);
16548 break;
16549 #endif
16550
16551@@ -1331,7 +1331,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
16552 memset(info, 0, sizeof(*info));
16553 info->si_signo = SIGTRAP;
16554 info->si_code = si_code;
16555- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
16556+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
16557 }
16558
16559 void user_single_step_siginfo(struct task_struct *tsk,
16560diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
16561index 42eb330..139955c 100644
16562--- a/arch/x86/kernel/pvclock.c
16563+++ b/arch/x86/kernel/pvclock.c
16564@@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
16565 return pv_tsc_khz;
16566 }
16567
16568-static atomic64_t last_value = ATOMIC64_INIT(0);
16569+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
16570
16571 void pvclock_resume(void)
16572 {
16573- atomic64_set(&last_value, 0);
16574+ atomic64_set_unchecked(&last_value, 0);
16575 }
16576
16577 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
16578@@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
16579 * updating at the same time, and one of them could be slightly behind,
16580 * making the assumption that last_value always go forward fail to hold.
16581 */
16582- last = atomic64_read(&last_value);
16583+ last = atomic64_read_unchecked(&last_value);
16584 do {
16585 if (ret < last)
16586 return last;
16587- last = atomic64_cmpxchg(&last_value, last, ret);
16588+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
16589 } while (unlikely(last != ret));
16590
16591 return ret;
16592diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
16593index 37a458b..e63d183 100644
16594--- a/arch/x86/kernel/reboot.c
16595+++ b/arch/x86/kernel/reboot.c
16596@@ -35,7 +35,7 @@ void (*pm_power_off)(void);
16597 EXPORT_SYMBOL(pm_power_off);
16598
16599 static const struct desc_ptr no_idt = {};
16600-static int reboot_mode;
16601+static unsigned short reboot_mode;
16602 enum reboot_type reboot_type = BOOT_ACPI;
16603 int reboot_force;
16604
16605@@ -324,13 +324,17 @@ core_initcall(reboot_init);
16606 extern const unsigned char machine_real_restart_asm[];
16607 extern const u64 machine_real_restart_gdt[3];
16608
16609-void machine_real_restart(unsigned int type)
16610+__noreturn void machine_real_restart(unsigned int type)
16611 {
16612 void *restart_va;
16613 unsigned long restart_pa;
16614- void (*restart_lowmem)(unsigned int);
16615+ void (* __noreturn restart_lowmem)(unsigned int);
16616 u64 *lowmem_gdt;
16617
16618+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
16619+ struct desc_struct *gdt;
16620+#endif
16621+
16622 local_irq_disable();
16623
16624 /* Write zero to CMOS register number 0x0f, which the BIOS POST
16625@@ -356,14 +360,14 @@ void machine_real_restart(unsigned int type)
16626 boot)". This seems like a fairly standard thing that gets set by
16627 REBOOT.COM programs, and the previous reset routine did this
16628 too. */
16629- *((unsigned short *)0x472) = reboot_mode;
16630+ *(unsigned short *)(__va(0x472)) = reboot_mode;
16631
16632 /* Patch the GDT in the low memory trampoline */
16633 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
16634
16635 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
16636 restart_pa = virt_to_phys(restart_va);
16637- restart_lowmem = (void (*)(unsigned int))restart_pa;
16638+ restart_lowmem = (void *)restart_pa;
16639
16640 /* GDT[0]: GDT self-pointer */
16641 lowmem_gdt[0] =
16642@@ -374,7 +378,33 @@ void machine_real_restart(unsigned int type)
16643 GDT_ENTRY(0x009b, restart_pa, 0xffff);
16644
16645 /* Jump to the identity-mapped low memory code */
16646+
16647+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
16648+ gdt = get_cpu_gdt_table(smp_processor_id());
16649+ pax_open_kernel();
16650+#ifdef CONFIG_PAX_MEMORY_UDEREF
16651+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
16652+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
16653+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
16654+#endif
16655+#ifdef CONFIG_PAX_KERNEXEC
16656+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
16657+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
16658+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
16659+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
16660+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
16661+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
16662+#endif
16663+ pax_close_kernel();
16664+#endif
16665+
16666+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16667+ asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
16668+ unreachable();
16669+#else
16670 restart_lowmem(type);
16671+#endif
16672+
16673 }
16674 #ifdef CONFIG_APM_MODULE
16675 EXPORT_SYMBOL(machine_real_restart);
16676@@ -540,7 +570,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
16677 * try to force a triple fault and then cycle between hitting the keyboard
16678 * controller and doing that
16679 */
16680-static void native_machine_emergency_restart(void)
16681+__noreturn static void native_machine_emergency_restart(void)
16682 {
16683 int i;
16684 int attempt = 0;
16685@@ -664,13 +694,13 @@ void native_machine_shutdown(void)
16686 #endif
16687 }
16688
16689-static void __machine_emergency_restart(int emergency)
16690+static __noreturn void __machine_emergency_restart(int emergency)
16691 {
16692 reboot_emergency = emergency;
16693 machine_ops.emergency_restart();
16694 }
16695
16696-static void native_machine_restart(char *__unused)
16697+static __noreturn void native_machine_restart(char *__unused)
16698 {
16699 printk("machine restart\n");
16700
16701@@ -679,7 +709,7 @@ static void native_machine_restart(char *__unused)
16702 __machine_emergency_restart(0);
16703 }
16704
16705-static void native_machine_halt(void)
16706+static __noreturn void native_machine_halt(void)
16707 {
16708 /* stop other cpus and apics */
16709 machine_shutdown();
16710@@ -690,7 +720,7 @@ static void native_machine_halt(void)
16711 stop_this_cpu(NULL);
16712 }
16713
16714-static void native_machine_power_off(void)
16715+__noreturn static void native_machine_power_off(void)
16716 {
16717 if (pm_power_off) {
16718 if (!reboot_force)
16719@@ -699,6 +729,7 @@ static void native_machine_power_off(void)
16720 }
16721 /* a fallback in case there is no PM info available */
16722 tboot_shutdown(TB_SHUTDOWN_HALT);
16723+ unreachable();
16724 }
16725
16726 struct machine_ops machine_ops = {
16727diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
16728index 7a6f3b3..bed145d7 100644
16729--- a/arch/x86/kernel/relocate_kernel_64.S
16730+++ b/arch/x86/kernel/relocate_kernel_64.S
16731@@ -11,6 +11,7 @@
16732 #include <asm/kexec.h>
16733 #include <asm/processor-flags.h>
16734 #include <asm/pgtable_types.h>
16735+#include <asm/alternative-asm.h>
16736
16737 /*
16738 * Must be relocatable PIC code callable as a C function
16739@@ -160,13 +161,14 @@ identity_mapped:
16740 xorq %rbp, %rbp
16741 xorq %r8, %r8
16742 xorq %r9, %r9
16743- xorq %r10, %r9
16744+ xorq %r10, %r10
16745 xorq %r11, %r11
16746 xorq %r12, %r12
16747 xorq %r13, %r13
16748 xorq %r14, %r14
16749 xorq %r15, %r15
16750
16751+ pax_force_retaddr 0, 1
16752 ret
16753
16754 1:
16755diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
16756index cf0ef98..e3f780b 100644
16757--- a/arch/x86/kernel/setup.c
16758+++ b/arch/x86/kernel/setup.c
16759@@ -447,7 +447,7 @@ static void __init parse_setup_data(void)
16760
16761 switch (data->type) {
16762 case SETUP_E820_EXT:
16763- parse_e820_ext(data);
16764+ parse_e820_ext((struct setup_data __force_kernel *)data);
16765 break;
16766 case SETUP_DTB:
16767 add_dtb(pa_data);
16768@@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
16769 * area (640->1Mb) as ram even though it is not.
16770 * take them out.
16771 */
16772- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
16773+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
16774 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
16775 }
16776
16777@@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p)
16778
16779 if (!boot_params.hdr.root_flags)
16780 root_mountflags &= ~MS_RDONLY;
16781- init_mm.start_code = (unsigned long) _text;
16782- init_mm.end_code = (unsigned long) _etext;
16783+ init_mm.start_code = ktla_ktva((unsigned long) _text);
16784+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
16785 init_mm.end_data = (unsigned long) _edata;
16786 init_mm.brk = _brk_end;
16787
16788- code_resource.start = virt_to_phys(_text);
16789- code_resource.end = virt_to_phys(_etext)-1;
16790- data_resource.start = virt_to_phys(_etext);
16791+ code_resource.start = virt_to_phys(ktla_ktva(_text));
16792+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
16793+ data_resource.start = virt_to_phys(_sdata);
16794 data_resource.end = virt_to_phys(_edata)-1;
16795 bss_resource.start = virt_to_phys(&__bss_start);
16796 bss_resource.end = virt_to_phys(&__bss_stop)-1;
16797diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
16798index 71f4727..16dc9f7 100644
16799--- a/arch/x86/kernel/setup_percpu.c
16800+++ b/arch/x86/kernel/setup_percpu.c
16801@@ -21,19 +21,17 @@
16802 #include <asm/cpu.h>
16803 #include <asm/stackprotector.h>
16804
16805-DEFINE_PER_CPU(int, cpu_number);
16806+#ifdef CONFIG_SMP
16807+DEFINE_PER_CPU(unsigned int, cpu_number);
16808 EXPORT_PER_CPU_SYMBOL(cpu_number);
16809+#endif
16810
16811-#ifdef CONFIG_X86_64
16812 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
16813-#else
16814-#define BOOT_PERCPU_OFFSET 0
16815-#endif
16816
16817 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
16818 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
16819
16820-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
16821+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
16822 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
16823 };
16824 EXPORT_SYMBOL(__per_cpu_offset);
16825@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
16826 {
16827 #ifdef CONFIG_X86_32
16828 struct desc_struct gdt;
16829+ unsigned long base = per_cpu_offset(cpu);
16830
16831- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
16832- 0x2 | DESCTYPE_S, 0x8);
16833- gdt.s = 1;
16834+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
16835+ 0x83 | DESCTYPE_S, 0xC);
16836 write_gdt_entry(get_cpu_gdt_table(cpu),
16837 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
16838 #endif
16839@@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
16840 /* alrighty, percpu areas up and running */
16841 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
16842 for_each_possible_cpu(cpu) {
16843+#ifdef CONFIG_CC_STACKPROTECTOR
16844+#ifdef CONFIG_X86_32
16845+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
16846+#endif
16847+#endif
16848 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
16849 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
16850 per_cpu(cpu_number, cpu) = cpu;
16851@@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
16852 */
16853 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
16854 #endif
16855+#ifdef CONFIG_CC_STACKPROTECTOR
16856+#ifdef CONFIG_X86_32
16857+ if (!cpu)
16858+ per_cpu(stack_canary.canary, cpu) = canary;
16859+#endif
16860+#endif
16861 /*
16862 * Up to this point, the boot CPU has been using .init.data
16863 * area. Reload any changed state for the boot CPU.
16864diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
16865index 54ddaeb2..22c3bdc 100644
16866--- a/arch/x86/kernel/signal.c
16867+++ b/arch/x86/kernel/signal.c
16868@@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsigned long sp)
16869 * Align the stack pointer according to the i386 ABI,
16870 * i.e. so that on function entry ((sp + 4) & 15) == 0.
16871 */
16872- sp = ((sp + 4) & -16ul) - 4;
16873+ sp = ((sp - 12) & -16ul) - 4;
16874 #else /* !CONFIG_X86_32 */
16875 sp = round_down(sp, 16) - 8;
16876 #endif
16877@@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
16878 * Return an always-bogus address instead so we will die with SIGSEGV.
16879 */
16880 if (onsigstack && !likely(on_sig_stack(sp)))
16881- return (void __user *)-1L;
16882+ return (__force void __user *)-1L;
16883
16884 /* save i387 state */
16885 if (used_math() && save_i387_xstate(*fpstate) < 0)
16886- return (void __user *)-1L;
16887+ return (__force void __user *)-1L;
16888
16889 return (void __user *)sp;
16890 }
16891@@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
16892 }
16893
16894 if (current->mm->context.vdso)
16895- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16896+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16897 else
16898- restorer = &frame->retcode;
16899+ restorer = (void __user *)&frame->retcode;
16900 if (ka->sa.sa_flags & SA_RESTORER)
16901 restorer = ka->sa.sa_restorer;
16902
16903@@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
16904 * reasons and because gdb uses it as a signature to notice
16905 * signal handler stack frames.
16906 */
16907- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
16908+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
16909
16910 if (err)
16911 return -EFAULT;
16912@@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
16913 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
16914
16915 /* Set up to return from userspace. */
16916- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16917+ if (current->mm->context.vdso)
16918+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16919+ else
16920+ restorer = (void __user *)&frame->retcode;
16921 if (ka->sa.sa_flags & SA_RESTORER)
16922 restorer = ka->sa.sa_restorer;
16923 put_user_ex(restorer, &frame->pretcode);
16924@@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
16925 * reasons and because gdb uses it as a signature to notice
16926 * signal handler stack frames.
16927 */
16928- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
16929+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
16930 } put_user_catch(err);
16931
16932 if (err)
16933@@ -769,7 +772,7 @@ static void do_signal(struct pt_regs *regs)
16934 * X86_32: vm86 regs switched out by assembly code before reaching
16935 * here, so testing against kernel CS suffices.
16936 */
16937- if (!user_mode(regs))
16938+ if (!user_mode_novm(regs))
16939 return;
16940
16941 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
16942diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
16943index 9f548cb..caf76f7 100644
16944--- a/arch/x86/kernel/smpboot.c
16945+++ b/arch/x86/kernel/smpboot.c
16946@@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
16947 set_idle_for_cpu(cpu, c_idle.idle);
16948 do_rest:
16949 per_cpu(current_task, cpu) = c_idle.idle;
16950+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
16951 #ifdef CONFIG_X86_32
16952 /* Stack for startup_32 can be just as for start_secondary onwards */
16953 irq_ctx_init(cpu);
16954 #else
16955 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
16956 initial_gs = per_cpu_offset(cpu);
16957- per_cpu(kernel_stack, cpu) =
16958- (unsigned long)task_stack_page(c_idle.idle) -
16959- KERNEL_STACK_OFFSET + THREAD_SIZE;
16960+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
16961 #endif
16962+
16963+ pax_open_kernel();
16964 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
16965+ pax_close_kernel();
16966+
16967 initial_code = (unsigned long)start_secondary;
16968 stack_start = c_idle.idle->thread.sp;
16969
16970@@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
16971
16972 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
16973
16974+#ifdef CONFIG_PAX_PER_CPU_PGD
16975+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
16976+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16977+ KERNEL_PGD_PTRS);
16978+#endif
16979+
16980 err = do_boot_cpu(apicid, cpu);
16981 if (err) {
16982 pr_debug("do_boot_cpu failed %d\n", err);
16983diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
16984index c346d11..d43b163 100644
16985--- a/arch/x86/kernel/step.c
16986+++ b/arch/x86/kernel/step.c
16987@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
16988 struct desc_struct *desc;
16989 unsigned long base;
16990
16991- seg &= ~7UL;
16992+ seg >>= 3;
16993
16994 mutex_lock(&child->mm->context.lock);
16995- if (unlikely((seg >> 3) >= child->mm->context.size))
16996+ if (unlikely(seg >= child->mm->context.size))
16997 addr = -1L; /* bogus selector, access would fault */
16998 else {
16999 desc = child->mm->context.ldt + seg;
17000@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
17001 addr += base;
17002 }
17003 mutex_unlock(&child->mm->context.lock);
17004- }
17005+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
17006+ addr = ktla_ktva(addr);
17007
17008 return addr;
17009 }
17010@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
17011 unsigned char opcode[15];
17012 unsigned long addr = convert_ip_to_linear(child, regs);
17013
17014+ if (addr == -EINVAL)
17015+ return 0;
17016+
17017 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
17018 for (i = 0; i < copied; i++) {
17019 switch (opcode[i]) {
17020diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
17021index 0b0cb5f..db6b9ed 100644
17022--- a/arch/x86/kernel/sys_i386_32.c
17023+++ b/arch/x86/kernel/sys_i386_32.c
17024@@ -24,17 +24,224 @@
17025
17026 #include <asm/syscalls.h>
17027
17028-/*
17029- * Do a system call from kernel instead of calling sys_execve so we
17030- * end up with proper pt_regs.
17031- */
17032-int kernel_execve(const char *filename,
17033- const char *const argv[],
17034- const char *const envp[])
17035+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
17036 {
17037- long __res;
17038- asm volatile ("int $0x80"
17039- : "=a" (__res)
17040- : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
17041- return __res;
17042+ unsigned long pax_task_size = TASK_SIZE;
17043+
17044+#ifdef CONFIG_PAX_SEGMEXEC
17045+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
17046+ pax_task_size = SEGMEXEC_TASK_SIZE;
17047+#endif
17048+
17049+ if (len > pax_task_size || addr > pax_task_size - len)
17050+ return -EINVAL;
17051+
17052+ return 0;
17053+}
17054+
17055+unsigned long
17056+arch_get_unmapped_area(struct file *filp, unsigned long addr,
17057+ unsigned long len, unsigned long pgoff, unsigned long flags)
17058+{
17059+ struct mm_struct *mm = current->mm;
17060+ struct vm_area_struct *vma;
17061+ unsigned long start_addr, pax_task_size = TASK_SIZE;
17062+
17063+#ifdef CONFIG_PAX_SEGMEXEC
17064+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
17065+ pax_task_size = SEGMEXEC_TASK_SIZE;
17066+#endif
17067+
17068+ pax_task_size -= PAGE_SIZE;
17069+
17070+ if (len > pax_task_size)
17071+ return -ENOMEM;
17072+
17073+ if (flags & MAP_FIXED)
17074+ return addr;
17075+
17076+#ifdef CONFIG_PAX_RANDMMAP
17077+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17078+#endif
17079+
17080+ if (addr) {
17081+ addr = PAGE_ALIGN(addr);
17082+ if (pax_task_size - len >= addr) {
17083+ vma = find_vma(mm, addr);
17084+ if (check_heap_stack_gap(vma, addr, len))
17085+ return addr;
17086+ }
17087+ }
17088+ if (len > mm->cached_hole_size) {
17089+ start_addr = addr = mm->free_area_cache;
17090+ } else {
17091+ start_addr = addr = mm->mmap_base;
17092+ mm->cached_hole_size = 0;
17093+ }
17094+
17095+#ifdef CONFIG_PAX_PAGEEXEC
17096+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
17097+ start_addr = 0x00110000UL;
17098+
17099+#ifdef CONFIG_PAX_RANDMMAP
17100+ if (mm->pax_flags & MF_PAX_RANDMMAP)
17101+ start_addr += mm->delta_mmap & 0x03FFF000UL;
17102+#endif
17103+
17104+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
17105+ start_addr = addr = mm->mmap_base;
17106+ else
17107+ addr = start_addr;
17108+ }
17109+#endif
17110+
17111+full_search:
17112+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
17113+ /* At this point: (!vma || addr < vma->vm_end). */
17114+ if (pax_task_size - len < addr) {
17115+ /*
17116+ * Start a new search - just in case we missed
17117+ * some holes.
17118+ */
17119+ if (start_addr != mm->mmap_base) {
17120+ start_addr = addr = mm->mmap_base;
17121+ mm->cached_hole_size = 0;
17122+ goto full_search;
17123+ }
17124+ return -ENOMEM;
17125+ }
17126+ if (check_heap_stack_gap(vma, addr, len))
17127+ break;
17128+ if (addr + mm->cached_hole_size < vma->vm_start)
17129+ mm->cached_hole_size = vma->vm_start - addr;
17130+ addr = vma->vm_end;
17131+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
17132+ start_addr = addr = mm->mmap_base;
17133+ mm->cached_hole_size = 0;
17134+ goto full_search;
17135+ }
17136+ }
17137+
17138+ /*
17139+ * Remember the place where we stopped the search:
17140+ */
17141+ mm->free_area_cache = addr + len;
17142+ return addr;
17143+}
17144+
17145+unsigned long
17146+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17147+ const unsigned long len, const unsigned long pgoff,
17148+ const unsigned long flags)
17149+{
17150+ struct vm_area_struct *vma;
17151+ struct mm_struct *mm = current->mm;
17152+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
17153+
17154+#ifdef CONFIG_PAX_SEGMEXEC
17155+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
17156+ pax_task_size = SEGMEXEC_TASK_SIZE;
17157+#endif
17158+
17159+ pax_task_size -= PAGE_SIZE;
17160+
17161+ /* requested length too big for entire address space */
17162+ if (len > pax_task_size)
17163+ return -ENOMEM;
17164+
17165+ if (flags & MAP_FIXED)
17166+ return addr;
17167+
17168+#ifdef CONFIG_PAX_PAGEEXEC
17169+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
17170+ goto bottomup;
17171+#endif
17172+
17173+#ifdef CONFIG_PAX_RANDMMAP
17174+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17175+#endif
17176+
17177+ /* requesting a specific address */
17178+ if (addr) {
17179+ addr = PAGE_ALIGN(addr);
17180+ if (pax_task_size - len >= addr) {
17181+ vma = find_vma(mm, addr);
17182+ if (check_heap_stack_gap(vma, addr, len))
17183+ return addr;
17184+ }
17185+ }
17186+
17187+ /* check if free_area_cache is useful for us */
17188+ if (len <= mm->cached_hole_size) {
17189+ mm->cached_hole_size = 0;
17190+ mm->free_area_cache = mm->mmap_base;
17191+ }
17192+
17193+ /* either no address requested or can't fit in requested address hole */
17194+ addr = mm->free_area_cache;
17195+
17196+ /* make sure it can fit in the remaining address space */
17197+ if (addr > len) {
17198+ vma = find_vma(mm, addr-len);
17199+ if (check_heap_stack_gap(vma, addr - len, len))
17200+ /* remember the address as a hint for next time */
17201+ return (mm->free_area_cache = addr-len);
17202+ }
17203+
17204+ if (mm->mmap_base < len)
17205+ goto bottomup;
17206+
17207+ addr = mm->mmap_base-len;
17208+
17209+ do {
17210+ /*
17211+ * Lookup failure means no vma is above this address,
17212+ * else if new region fits below vma->vm_start,
17213+ * return with success:
17214+ */
17215+ vma = find_vma(mm, addr);
17216+ if (check_heap_stack_gap(vma, addr, len))
17217+ /* remember the address as a hint for next time */
17218+ return (mm->free_area_cache = addr);
17219+
17220+ /* remember the largest hole we saw so far */
17221+ if (addr + mm->cached_hole_size < vma->vm_start)
17222+ mm->cached_hole_size = vma->vm_start - addr;
17223+
17224+ /* try just below the current vma->vm_start */
17225+ addr = skip_heap_stack_gap(vma, len);
17226+ } while (!IS_ERR_VALUE(addr));
17227+
17228+bottomup:
17229+ /*
17230+ * A failed mmap() very likely causes application failure,
17231+ * so fall back to the bottom-up function here. This scenario
17232+ * can happen with large stack limits and large mmap()
17233+ * allocations.
17234+ */
17235+
17236+#ifdef CONFIG_PAX_SEGMEXEC
17237+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
17238+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
17239+ else
17240+#endif
17241+
17242+ mm->mmap_base = TASK_UNMAPPED_BASE;
17243+
17244+#ifdef CONFIG_PAX_RANDMMAP
17245+ if (mm->pax_flags & MF_PAX_RANDMMAP)
17246+ mm->mmap_base += mm->delta_mmap;
17247+#endif
17248+
17249+ mm->free_area_cache = mm->mmap_base;
17250+ mm->cached_hole_size = ~0UL;
17251+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17252+ /*
17253+ * Restore the topdown base:
17254+ */
17255+ mm->mmap_base = base;
17256+ mm->free_area_cache = base;
17257+ mm->cached_hole_size = ~0UL;
17258+
17259+ return addr;
17260 }
17261diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
17262index 0514890..3dbebce 100644
17263--- a/arch/x86/kernel/sys_x86_64.c
17264+++ b/arch/x86/kernel/sys_x86_64.c
17265@@ -95,8 +95,8 @@ out:
17266 return error;
17267 }
17268
17269-static void find_start_end(unsigned long flags, unsigned long *begin,
17270- unsigned long *end)
17271+static void find_start_end(struct mm_struct *mm, unsigned long flags,
17272+ unsigned long *begin, unsigned long *end)
17273 {
17274 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
17275 unsigned long new_begin;
17276@@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
17277 *begin = new_begin;
17278 }
17279 } else {
17280- *begin = TASK_UNMAPPED_BASE;
17281+ *begin = mm->mmap_base;
17282 *end = TASK_SIZE;
17283 }
17284 }
17285@@ -132,16 +132,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
17286 if (flags & MAP_FIXED)
17287 return addr;
17288
17289- find_start_end(flags, &begin, &end);
17290+ find_start_end(mm, flags, &begin, &end);
17291
17292 if (len > end)
17293 return -ENOMEM;
17294
17295+#ifdef CONFIG_PAX_RANDMMAP
17296+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17297+#endif
17298+
17299 if (addr) {
17300 addr = PAGE_ALIGN(addr);
17301 vma = find_vma(mm, addr);
17302- if (end - len >= addr &&
17303- (!vma || addr + len <= vma->vm_start))
17304+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
17305 return addr;
17306 }
17307 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
17308@@ -172,7 +175,7 @@ full_search:
17309 }
17310 return -ENOMEM;
17311 }
17312- if (!vma || addr + len <= vma->vm_start) {
17313+ if (check_heap_stack_gap(vma, addr, len)) {
17314 /*
17315 * Remember the place where we stopped the search:
17316 */
17317@@ -195,7 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17318 {
17319 struct vm_area_struct *vma;
17320 struct mm_struct *mm = current->mm;
17321- unsigned long addr = addr0;
17322+ unsigned long base = mm->mmap_base, addr = addr0;
17323
17324 /* requested length too big for entire address space */
17325 if (len > TASK_SIZE)
17326@@ -208,13 +211,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17327 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
17328 goto bottomup;
17329
17330+#ifdef CONFIG_PAX_RANDMMAP
17331+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17332+#endif
17333+
17334 /* requesting a specific address */
17335 if (addr) {
17336 addr = PAGE_ALIGN(addr);
17337- vma = find_vma(mm, addr);
17338- if (TASK_SIZE - len >= addr &&
17339- (!vma || addr + len <= vma->vm_start))
17340- return addr;
17341+ if (TASK_SIZE - len >= addr) {
17342+ vma = find_vma(mm, addr);
17343+ if (check_heap_stack_gap(vma, addr, len))
17344+ return addr;
17345+ }
17346 }
17347
17348 /* check if free_area_cache is useful for us */
17349@@ -232,7 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17350 ALIGN_TOPDOWN);
17351
17352 vma = find_vma(mm, tmp_addr);
17353- if (!vma || tmp_addr + len <= vma->vm_start)
17354+ if (check_heap_stack_gap(vma, tmp_addr, len))
17355 /* remember the address as a hint for next time */
17356 return mm->free_area_cache = tmp_addr;
17357 }
17358@@ -251,7 +259,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17359 * return with success:
17360 */
17361 vma = find_vma(mm, addr);
17362- if (!vma || addr+len <= vma->vm_start)
17363+ if (check_heap_stack_gap(vma, addr, len))
17364 /* remember the address as a hint for next time */
17365 return mm->free_area_cache = addr;
17366
17367@@ -260,8 +268,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17368 mm->cached_hole_size = vma->vm_start - addr;
17369
17370 /* try just below the current vma->vm_start */
17371- addr = vma->vm_start-len;
17372- } while (len < vma->vm_start);
17373+ addr = skip_heap_stack_gap(vma, len);
17374+ } while (!IS_ERR_VALUE(addr));
17375
17376 bottomup:
17377 /*
17378@@ -270,13 +278,21 @@ bottomup:
17379 * can happen with large stack limits and large mmap()
17380 * allocations.
17381 */
17382+ mm->mmap_base = TASK_UNMAPPED_BASE;
17383+
17384+#ifdef CONFIG_PAX_RANDMMAP
17385+ if (mm->pax_flags & MF_PAX_RANDMMAP)
17386+ mm->mmap_base += mm->delta_mmap;
17387+#endif
17388+
17389+ mm->free_area_cache = mm->mmap_base;
17390 mm->cached_hole_size = ~0UL;
17391- mm->free_area_cache = TASK_UNMAPPED_BASE;
17392 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17393 /*
17394 * Restore the topdown base:
17395 */
17396- mm->free_area_cache = mm->mmap_base;
17397+ mm->mmap_base = base;
17398+ mm->free_area_cache = base;
17399 mm->cached_hole_size = ~0UL;
17400
17401 return addr;
17402diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
17403index 9a0e312..e6f66f2 100644
17404--- a/arch/x86/kernel/syscall_table_32.S
17405+++ b/arch/x86/kernel/syscall_table_32.S
17406@@ -1,3 +1,4 @@
17407+.section .rodata,"a",@progbits
17408 ENTRY(sys_call_table)
17409 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
17410 .long sys_exit
17411diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
17412index e2410e2..4fe3fbc 100644
17413--- a/arch/x86/kernel/tboot.c
17414+++ b/arch/x86/kernel/tboot.c
17415@@ -219,7 +219,7 @@ static int tboot_setup_sleep(void)
17416
17417 void tboot_shutdown(u32 shutdown_type)
17418 {
17419- void (*shutdown)(void);
17420+ void (* __noreturn shutdown)(void);
17421
17422 if (!tboot_enabled())
17423 return;
17424@@ -241,7 +241,7 @@ void tboot_shutdown(u32 shutdown_type)
17425
17426 switch_to_tboot_pt();
17427
17428- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
17429+ shutdown = (void *)tboot->shutdown_entry;
17430 shutdown();
17431
17432 /* should not reach here */
17433@@ -298,7 +298,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
17434 tboot_shutdown(acpi_shutdown_map[sleep_state]);
17435 }
17436
17437-static atomic_t ap_wfs_count;
17438+static atomic_unchecked_t ap_wfs_count;
17439
17440 static int tboot_wait_for_aps(int num_aps)
17441 {
17442@@ -322,9 +322,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
17443 {
17444 switch (action) {
17445 case CPU_DYING:
17446- atomic_inc(&ap_wfs_count);
17447+ atomic_inc_unchecked(&ap_wfs_count);
17448 if (num_online_cpus() == 1)
17449- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
17450+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
17451 return NOTIFY_BAD;
17452 break;
17453 }
17454@@ -343,7 +343,7 @@ static __init int tboot_late_init(void)
17455
17456 tboot_create_trampoline();
17457
17458- atomic_set(&ap_wfs_count, 0);
17459+ atomic_set_unchecked(&ap_wfs_count, 0);
17460 register_hotcpu_notifier(&tboot_cpu_notifier);
17461 return 0;
17462 }
17463diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
17464index dd5fbf4..b7f2232 100644
17465--- a/arch/x86/kernel/time.c
17466+++ b/arch/x86/kernel/time.c
17467@@ -31,9 +31,9 @@ unsigned long profile_pc(struct pt_regs *regs)
17468 {
17469 unsigned long pc = instruction_pointer(regs);
17470
17471- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
17472+ if (!user_mode(regs) && in_lock_functions(pc)) {
17473 #ifdef CONFIG_FRAME_POINTER
17474- return *(unsigned long *)(regs->bp + sizeof(long));
17475+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
17476 #else
17477 unsigned long *sp =
17478 (unsigned long *)kernel_stack_pointer(regs);
17479@@ -42,11 +42,17 @@ unsigned long profile_pc(struct pt_regs *regs)
17480 * or above a saved flags. Eflags has bits 22-31 zero,
17481 * kernel addresses don't.
17482 */
17483+
17484+#ifdef CONFIG_PAX_KERNEXEC
17485+ return ktla_ktva(sp[0]);
17486+#else
17487 if (sp[0] >> 22)
17488 return sp[0];
17489 if (sp[1] >> 22)
17490 return sp[1];
17491 #endif
17492+
17493+#endif
17494 }
17495 return pc;
17496 }
17497diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
17498index 6bb7b85..dd853e1 100644
17499--- a/arch/x86/kernel/tls.c
17500+++ b/arch/x86/kernel/tls.c
17501@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
17502 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
17503 return -EINVAL;
17504
17505+#ifdef CONFIG_PAX_SEGMEXEC
17506+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
17507+ return -EINVAL;
17508+#endif
17509+
17510 set_tls_desc(p, idx, &info, 1);
17511
17512 return 0;
17513diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
17514index 451c0a7..e57f551 100644
17515--- a/arch/x86/kernel/trampoline_32.S
17516+++ b/arch/x86/kernel/trampoline_32.S
17517@@ -32,6 +32,12 @@
17518 #include <asm/segment.h>
17519 #include <asm/page_types.h>
17520
17521+#ifdef CONFIG_PAX_KERNEXEC
17522+#define ta(X) (X)
17523+#else
17524+#define ta(X) ((X) - __PAGE_OFFSET)
17525+#endif
17526+
17527 #ifdef CONFIG_SMP
17528
17529 .section ".x86_trampoline","a"
17530@@ -62,7 +68,7 @@ r_base = .
17531 inc %ax # protected mode (PE) bit
17532 lmsw %ax # into protected mode
17533 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
17534- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
17535+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
17536
17537 # These need to be in the same 64K segment as the above;
17538 # hence we don't use the boot_gdt_descr defined in head.S
17539diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
17540index 09ff517..df19fbff 100644
17541--- a/arch/x86/kernel/trampoline_64.S
17542+++ b/arch/x86/kernel/trampoline_64.S
17543@@ -90,7 +90,7 @@ startup_32:
17544 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
17545 movl %eax, %ds
17546
17547- movl $X86_CR4_PAE, %eax
17548+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17549 movl %eax, %cr4 # Enable PAE mode
17550
17551 # Setup trampoline 4 level pagetables
17552@@ -138,7 +138,7 @@ tidt:
17553 # so the kernel can live anywhere
17554 .balign 4
17555 tgdt:
17556- .short tgdt_end - tgdt # gdt limit
17557+ .short tgdt_end - tgdt - 1 # gdt limit
17558 .long tgdt - r_base
17559 .short 0
17560 .quad 0x00cf9b000000ffff # __KERNEL32_CS
17561diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
17562index a8e3eb8..c9dbd7d 100644
17563--- a/arch/x86/kernel/traps.c
17564+++ b/arch/x86/kernel/traps.c
17565@@ -70,12 +70,6 @@ asmlinkage int system_call(void);
17566
17567 /* Do we ignore FPU interrupts ? */
17568 char ignore_fpu_irq;
17569-
17570-/*
17571- * The IDT has to be page-aligned to simplify the Pentium
17572- * F0 0F bug workaround.
17573- */
17574-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
17575 #endif
17576
17577 DECLARE_BITMAP(used_vectors, NR_VECTORS);
17578@@ -108,13 +102,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
17579 }
17580
17581 static void __kprobes
17582-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17583+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
17584 long error_code, siginfo_t *info)
17585 {
17586 struct task_struct *tsk = current;
17587
17588 #ifdef CONFIG_X86_32
17589- if (regs->flags & X86_VM_MASK) {
17590+ if (v8086_mode(regs)) {
17591 /*
17592 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
17593 * On nmi (interrupt 2), do_trap should not be called.
17594@@ -125,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17595 }
17596 #endif
17597
17598- if (!user_mode(regs))
17599+ if (!user_mode_novm(regs))
17600 goto kernel_trap;
17601
17602 #ifdef CONFIG_X86_32
17603@@ -148,7 +142,7 @@ trap_signal:
17604 printk_ratelimit()) {
17605 printk(KERN_INFO
17606 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
17607- tsk->comm, tsk->pid, str,
17608+ tsk->comm, task_pid_nr(tsk), str,
17609 regs->ip, regs->sp, error_code);
17610 print_vma_addr(" in ", regs->ip);
17611 printk("\n");
17612@@ -165,8 +159,20 @@ kernel_trap:
17613 if (!fixup_exception(regs)) {
17614 tsk->thread.error_code = error_code;
17615 tsk->thread.trap_no = trapnr;
17616+
17617+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17618+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
17619+ str = "PAX: suspicious stack segment fault";
17620+#endif
17621+
17622 die(str, regs, error_code);
17623 }
17624+
17625+#ifdef CONFIG_PAX_REFCOUNT
17626+ if (trapnr == 4)
17627+ pax_report_refcount_overflow(regs);
17628+#endif
17629+
17630 return;
17631
17632 #ifdef CONFIG_X86_32
17633@@ -255,14 +261,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
17634 conditional_sti(regs);
17635
17636 #ifdef CONFIG_X86_32
17637- if (regs->flags & X86_VM_MASK)
17638+ if (v8086_mode(regs))
17639 goto gp_in_vm86;
17640 #endif
17641
17642 tsk = current;
17643- if (!user_mode(regs))
17644+ if (!user_mode_novm(regs))
17645 goto gp_in_kernel;
17646
17647+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17648+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
17649+ struct mm_struct *mm = tsk->mm;
17650+ unsigned long limit;
17651+
17652+ down_write(&mm->mmap_sem);
17653+ limit = mm->context.user_cs_limit;
17654+ if (limit < TASK_SIZE) {
17655+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
17656+ up_write(&mm->mmap_sem);
17657+ return;
17658+ }
17659+ up_write(&mm->mmap_sem);
17660+ }
17661+#endif
17662+
17663 tsk->thread.error_code = error_code;
17664 tsk->thread.trap_no = 13;
17665
17666@@ -295,6 +317,13 @@ gp_in_kernel:
17667 if (notify_die(DIE_GPF, "general protection fault", regs,
17668 error_code, 13, SIGSEGV) == NOTIFY_STOP)
17669 return;
17670+
17671+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17672+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
17673+ die("PAX: suspicious general protection fault", regs, error_code);
17674+ else
17675+#endif
17676+
17677 die("general protection fault", regs, error_code);
17678 }
17679
17680@@ -414,7 +443,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
17681 /* It's safe to allow irq's after DR6 has been saved */
17682 preempt_conditional_sti(regs);
17683
17684- if (regs->flags & X86_VM_MASK) {
17685+ if (v8086_mode(regs)) {
17686 handle_vm86_trap((struct kernel_vm86_regs *) regs,
17687 error_code, 1);
17688 preempt_conditional_cli(regs);
17689@@ -428,7 +457,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
17690 * We already checked v86 mode above, so we can check for kernel mode
17691 * by just checking the CPL of CS.
17692 */
17693- if ((dr6 & DR_STEP) && !user_mode(regs)) {
17694+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
17695 tsk->thread.debugreg6 &= ~DR_STEP;
17696 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
17697 regs->flags &= ~X86_EFLAGS_TF;
17698@@ -457,7 +486,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
17699 return;
17700 conditional_sti(regs);
17701
17702- if (!user_mode_vm(regs))
17703+ if (!user_mode(regs))
17704 {
17705 if (!fixup_exception(regs)) {
17706 task->thread.error_code = error_code;
17707@@ -568,7 +597,7 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
17708 void __math_state_restore(void)
17709 {
17710 struct thread_info *thread = current_thread_info();
17711- struct task_struct *tsk = thread->task;
17712+ struct task_struct *tsk = current;
17713
17714 /*
17715 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
17716@@ -595,8 +624,7 @@ void __math_state_restore(void)
17717 */
17718 asmlinkage void math_state_restore(void)
17719 {
17720- struct thread_info *thread = current_thread_info();
17721- struct task_struct *tsk = thread->task;
17722+ struct task_struct *tsk = current;
17723
17724 if (!tsk_used_math(tsk)) {
17725 local_irq_enable();
17726diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
17727index b9242ba..50c5edd 100644
17728--- a/arch/x86/kernel/verify_cpu.S
17729+++ b/arch/x86/kernel/verify_cpu.S
17730@@ -20,6 +20,7 @@
17731 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
17732 * arch/x86/kernel/trampoline_64.S: secondary processor verification
17733 * arch/x86/kernel/head_32.S: processor startup
17734+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
17735 *
17736 * verify_cpu, returns the status of longmode and SSE in register %eax.
17737 * 0: Success 1: Failure
17738diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
17739index 863f875..4307295 100644
17740--- a/arch/x86/kernel/vm86_32.c
17741+++ b/arch/x86/kernel/vm86_32.c
17742@@ -41,6 +41,7 @@
17743 #include <linux/ptrace.h>
17744 #include <linux/audit.h>
17745 #include <linux/stddef.h>
17746+#include <linux/grsecurity.h>
17747
17748 #include <asm/uaccess.h>
17749 #include <asm/io.h>
17750@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
17751 do_exit(SIGSEGV);
17752 }
17753
17754- tss = &per_cpu(init_tss, get_cpu());
17755+ tss = init_tss + get_cpu();
17756 current->thread.sp0 = current->thread.saved_sp0;
17757 current->thread.sysenter_cs = __KERNEL_CS;
17758 load_sp0(tss, &current->thread);
17759@@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
17760 struct task_struct *tsk;
17761 int tmp, ret = -EPERM;
17762
17763+#ifdef CONFIG_GRKERNSEC_VM86
17764+ if (!capable(CAP_SYS_RAWIO)) {
17765+ gr_handle_vm86();
17766+ goto out;
17767+ }
17768+#endif
17769+
17770 tsk = current;
17771 if (tsk->thread.saved_sp0)
17772 goto out;
17773@@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
17774 int tmp, ret;
17775 struct vm86plus_struct __user *v86;
17776
17777+#ifdef CONFIG_GRKERNSEC_VM86
17778+ if (!capable(CAP_SYS_RAWIO)) {
17779+ gr_handle_vm86();
17780+ ret = -EPERM;
17781+ goto out;
17782+ }
17783+#endif
17784+
17785 tsk = current;
17786 switch (cmd) {
17787 case VM86_REQUEST_IRQ:
17788@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
17789 tsk->thread.saved_fs = info->regs32->fs;
17790 tsk->thread.saved_gs = get_user_gs(info->regs32);
17791
17792- tss = &per_cpu(init_tss, get_cpu());
17793+ tss = init_tss + get_cpu();
17794 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
17795 if (cpu_has_sep)
17796 tsk->thread.sysenter_cs = 0;
17797@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
17798 goto cannot_handle;
17799 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
17800 goto cannot_handle;
17801- intr_ptr = (unsigned long __user *) (i << 2);
17802+ intr_ptr = (__force unsigned long __user *) (i << 2);
17803 if (get_user(segoffs, intr_ptr))
17804 goto cannot_handle;
17805 if ((segoffs >> 16) == BIOSSEG)
17806diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
17807index 0f703f1..9e15f64 100644
17808--- a/arch/x86/kernel/vmlinux.lds.S
17809+++ b/arch/x86/kernel/vmlinux.lds.S
17810@@ -26,6 +26,13 @@
17811 #include <asm/page_types.h>
17812 #include <asm/cache.h>
17813 #include <asm/boot.h>
17814+#include <asm/segment.h>
17815+
17816+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17817+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
17818+#else
17819+#define __KERNEL_TEXT_OFFSET 0
17820+#endif
17821
17822 #undef i386 /* in case the preprocessor is a 32bit one */
17823
17824@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
17825
17826 PHDRS {
17827 text PT_LOAD FLAGS(5); /* R_E */
17828+#ifdef CONFIG_X86_32
17829+ module PT_LOAD FLAGS(5); /* R_E */
17830+#endif
17831+#ifdef CONFIG_XEN
17832+ rodata PT_LOAD FLAGS(5); /* R_E */
17833+#else
17834+ rodata PT_LOAD FLAGS(4); /* R__ */
17835+#endif
17836 data PT_LOAD FLAGS(6); /* RW_ */
17837-#ifdef CONFIG_X86_64
17838+ init.begin PT_LOAD FLAGS(6); /* RW_ */
17839 #ifdef CONFIG_SMP
17840 percpu PT_LOAD FLAGS(6); /* RW_ */
17841 #endif
17842+ text.init PT_LOAD FLAGS(5); /* R_E */
17843+ text.exit PT_LOAD FLAGS(5); /* R_E */
17844 init PT_LOAD FLAGS(7); /* RWE */
17845-#endif
17846 note PT_NOTE FLAGS(0); /* ___ */
17847 }
17848
17849 SECTIONS
17850 {
17851 #ifdef CONFIG_X86_32
17852- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
17853- phys_startup_32 = startup_32 - LOAD_OFFSET;
17854+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
17855 #else
17856- . = __START_KERNEL;
17857- phys_startup_64 = startup_64 - LOAD_OFFSET;
17858+ . = __START_KERNEL;
17859 #endif
17860
17861 /* Text and read-only data */
17862- .text : AT(ADDR(.text) - LOAD_OFFSET) {
17863- _text = .;
17864+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
17865 /* bootstrapping code */
17866+#ifdef CONFIG_X86_32
17867+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17868+#else
17869+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17870+#endif
17871+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17872+ _text = .;
17873 HEAD_TEXT
17874 #ifdef CONFIG_X86_32
17875 . = ALIGN(PAGE_SIZE);
17876@@ -108,13 +128,47 @@ SECTIONS
17877 IRQENTRY_TEXT
17878 *(.fixup)
17879 *(.gnu.warning)
17880- /* End of text section */
17881- _etext = .;
17882 } :text = 0x9090
17883
17884- NOTES :text :note
17885+ . += __KERNEL_TEXT_OFFSET;
17886
17887- EXCEPTION_TABLE(16) :text = 0x9090
17888+#ifdef CONFIG_X86_32
17889+ . = ALIGN(PAGE_SIZE);
17890+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
17891+
17892+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
17893+ MODULES_EXEC_VADDR = .;
17894+ BYTE(0)
17895+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
17896+ . = ALIGN(HPAGE_SIZE);
17897+ MODULES_EXEC_END = . - 1;
17898+#endif
17899+
17900+ } :module
17901+#endif
17902+
17903+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
17904+ /* End of text section */
17905+ _etext = . - __KERNEL_TEXT_OFFSET;
17906+ }
17907+
17908+#ifdef CONFIG_X86_32
17909+ . = ALIGN(PAGE_SIZE);
17910+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
17911+ *(.idt)
17912+ . = ALIGN(PAGE_SIZE);
17913+ *(.empty_zero_page)
17914+ *(.initial_pg_fixmap)
17915+ *(.initial_pg_pmd)
17916+ *(.initial_page_table)
17917+ *(.swapper_pg_dir)
17918+ } :rodata
17919+#endif
17920+
17921+ . = ALIGN(PAGE_SIZE);
17922+ NOTES :rodata :note
17923+
17924+ EXCEPTION_TABLE(16) :rodata
17925
17926 #if defined(CONFIG_DEBUG_RODATA)
17927 /* .text should occupy whole number of pages */
17928@@ -126,16 +180,20 @@ SECTIONS
17929
17930 /* Data */
17931 .data : AT(ADDR(.data) - LOAD_OFFSET) {
17932+
17933+#ifdef CONFIG_PAX_KERNEXEC
17934+ . = ALIGN(HPAGE_SIZE);
17935+#else
17936+ . = ALIGN(PAGE_SIZE);
17937+#endif
17938+
17939 /* Start of data section */
17940 _sdata = .;
17941
17942 /* init_task */
17943 INIT_TASK_DATA(THREAD_SIZE)
17944
17945-#ifdef CONFIG_X86_32
17946- /* 32 bit has nosave before _edata */
17947 NOSAVE_DATA
17948-#endif
17949
17950 PAGE_ALIGNED_DATA(PAGE_SIZE)
17951
17952@@ -176,12 +234,19 @@ SECTIONS
17953 #endif /* CONFIG_X86_64 */
17954
17955 /* Init code and data - will be freed after init */
17956- . = ALIGN(PAGE_SIZE);
17957 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
17958+ BYTE(0)
17959+
17960+#ifdef CONFIG_PAX_KERNEXEC
17961+ . = ALIGN(HPAGE_SIZE);
17962+#else
17963+ . = ALIGN(PAGE_SIZE);
17964+#endif
17965+
17966 __init_begin = .; /* paired with __init_end */
17967- }
17968+ } :init.begin
17969
17970-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
17971+#ifdef CONFIG_SMP
17972 /*
17973 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
17974 * output PHDR, so the next output section - .init.text - should
17975@@ -190,12 +255,27 @@ SECTIONS
17976 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
17977 #endif
17978
17979- INIT_TEXT_SECTION(PAGE_SIZE)
17980-#ifdef CONFIG_X86_64
17981- :init
17982-#endif
17983+ . = ALIGN(PAGE_SIZE);
17984+ init_begin = .;
17985+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
17986+ VMLINUX_SYMBOL(_sinittext) = .;
17987+ INIT_TEXT
17988+ VMLINUX_SYMBOL(_einittext) = .;
17989+ . = ALIGN(PAGE_SIZE);
17990+ } :text.init
17991
17992- INIT_DATA_SECTION(16)
17993+ /*
17994+ * .exit.text is discard at runtime, not link time, to deal with
17995+ * references from .altinstructions and .eh_frame
17996+ */
17997+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
17998+ EXIT_TEXT
17999+ . = ALIGN(16);
18000+ } :text.exit
18001+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
18002+
18003+ . = ALIGN(PAGE_SIZE);
18004+ INIT_DATA_SECTION(16) :init
18005
18006 /*
18007 * Code and data for a variety of lowlevel trampolines, to be
18008@@ -269,19 +349,12 @@ SECTIONS
18009 }
18010
18011 . = ALIGN(8);
18012- /*
18013- * .exit.text is discard at runtime, not link time, to deal with
18014- * references from .altinstructions and .eh_frame
18015- */
18016- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
18017- EXIT_TEXT
18018- }
18019
18020 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
18021 EXIT_DATA
18022 }
18023
18024-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
18025+#ifndef CONFIG_SMP
18026 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
18027 #endif
18028
18029@@ -300,16 +373,10 @@ SECTIONS
18030 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
18031 __smp_locks = .;
18032 *(.smp_locks)
18033- . = ALIGN(PAGE_SIZE);
18034 __smp_locks_end = .;
18035+ . = ALIGN(PAGE_SIZE);
18036 }
18037
18038-#ifdef CONFIG_X86_64
18039- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
18040- NOSAVE_DATA
18041- }
18042-#endif
18043-
18044 /* BSS */
18045 . = ALIGN(PAGE_SIZE);
18046 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
18047@@ -325,6 +392,7 @@ SECTIONS
18048 __brk_base = .;
18049 . += 64 * 1024; /* 64k alignment slop space */
18050 *(.brk_reservation) /* areas brk users have reserved */
18051+ . = ALIGN(HPAGE_SIZE);
18052 __brk_limit = .;
18053 }
18054
18055@@ -351,13 +419,12 @@ SECTIONS
18056 * for the boot processor.
18057 */
18058 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
18059-INIT_PER_CPU(gdt_page);
18060 INIT_PER_CPU(irq_stack_union);
18061
18062 /*
18063 * Build-time check on the image size:
18064 */
18065-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
18066+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
18067 "kernel image bigger than KERNEL_IMAGE_SIZE");
18068
18069 #ifdef CONFIG_SMP
18070diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
18071index e4d4a22..47ee71f 100644
18072--- a/arch/x86/kernel/vsyscall_64.c
18073+++ b/arch/x86/kernel/vsyscall_64.c
18074@@ -57,15 +57,13 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
18075 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
18076 };
18077
18078-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE;
18079+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
18080
18081 static int __init vsyscall_setup(char *str)
18082 {
18083 if (str) {
18084 if (!strcmp("emulate", str))
18085 vsyscall_mode = EMULATE;
18086- else if (!strcmp("native", str))
18087- vsyscall_mode = NATIVE;
18088 else if (!strcmp("none", str))
18089 vsyscall_mode = NONE;
18090 else
18091@@ -178,7 +176,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
18092
18093 tsk = current;
18094 if (seccomp_mode(&tsk->seccomp))
18095- do_exit(SIGKILL);
18096+ do_group_exit(SIGKILL);
18097
18098 switch (vsyscall_nr) {
18099 case 0:
18100@@ -220,8 +218,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
18101 return true;
18102
18103 sigsegv:
18104- force_sig(SIGSEGV, current);
18105- return true;
18106+ do_group_exit(SIGKILL);
18107 }
18108
18109 /*
18110@@ -274,10 +271,7 @@ void __init map_vsyscall(void)
18111 extern char __vvar_page;
18112 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
18113
18114- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
18115- vsyscall_mode == NATIVE
18116- ? PAGE_KERNEL_VSYSCALL
18117- : PAGE_KERNEL_VVAR);
18118+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
18119 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
18120 (unsigned long)VSYSCALL_START);
18121
18122diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
18123index 9796c2f..f686fbf 100644
18124--- a/arch/x86/kernel/x8664_ksyms_64.c
18125+++ b/arch/x86/kernel/x8664_ksyms_64.c
18126@@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
18127 EXPORT_SYMBOL(copy_user_generic_string);
18128 EXPORT_SYMBOL(copy_user_generic_unrolled);
18129 EXPORT_SYMBOL(__copy_user_nocache);
18130-EXPORT_SYMBOL(_copy_from_user);
18131-EXPORT_SYMBOL(_copy_to_user);
18132
18133 EXPORT_SYMBOL(copy_page);
18134 EXPORT_SYMBOL(clear_page);
18135diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
18136index a391134..d0b63b6e 100644
18137--- a/arch/x86/kernel/xsave.c
18138+++ b/arch/x86/kernel/xsave.c
18139@@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
18140 fx_sw_user->xstate_size > fx_sw_user->extended_size)
18141 return -EINVAL;
18142
18143- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
18144+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
18145 fx_sw_user->extended_size -
18146 FP_XSTATE_MAGIC2_SIZE));
18147 if (err)
18148@@ -267,7 +267,7 @@ fx_only:
18149 * the other extended state.
18150 */
18151 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
18152- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
18153+ return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
18154 }
18155
18156 /*
18157@@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf)
18158 if (use_xsave())
18159 err = restore_user_xstate(buf);
18160 else
18161- err = fxrstor_checking((__force struct i387_fxsave_struct *)
18162+ err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
18163 buf);
18164 if (unlikely(err)) {
18165 /*
18166diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
18167index f1e3be1..588efc8 100644
18168--- a/arch/x86/kvm/emulate.c
18169+++ b/arch/x86/kvm/emulate.c
18170@@ -249,6 +249,7 @@ struct gprefix {
18171
18172 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
18173 do { \
18174+ unsigned long _tmp; \
18175 __asm__ __volatile__ ( \
18176 _PRE_EFLAGS("0", "4", "2") \
18177 _op _suffix " %"_x"3,%1; " \
18178@@ -263,8 +264,6 @@ struct gprefix {
18179 /* Raw emulation: instruction has two explicit operands. */
18180 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
18181 do { \
18182- unsigned long _tmp; \
18183- \
18184 switch ((ctxt)->dst.bytes) { \
18185 case 2: \
18186 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
18187@@ -280,7 +279,6 @@ struct gprefix {
18188
18189 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
18190 do { \
18191- unsigned long _tmp; \
18192 switch ((ctxt)->dst.bytes) { \
18193 case 1: \
18194 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
18195diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
18196index 54abb40..a192606 100644
18197--- a/arch/x86/kvm/lapic.c
18198+++ b/arch/x86/kvm/lapic.c
18199@@ -53,7 +53,7 @@
18200 #define APIC_BUS_CYCLE_NS 1
18201
18202 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
18203-#define apic_debug(fmt, arg...)
18204+#define apic_debug(fmt, arg...) do {} while (0)
18205
18206 #define APIC_LVT_NUM 6
18207 /* 14 is the version for Xeon and Pentium 8.4.8*/
18208diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
18209index f1b36cf..af8a124 100644
18210--- a/arch/x86/kvm/mmu.c
18211+++ b/arch/x86/kvm/mmu.c
18212@@ -3555,7 +3555,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
18213
18214 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
18215
18216- invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
18217+ invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
18218
18219 /*
18220 * Assume that the pte write on a page table of the same type
18221@@ -3587,7 +3587,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
18222 }
18223
18224 spin_lock(&vcpu->kvm->mmu_lock);
18225- if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
18226+ if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
18227 gentry = 0;
18228 kvm_mmu_free_some_pages(vcpu);
18229 ++vcpu->kvm->stat.mmu_pte_write;
18230diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
18231index 9299410..ade2f9b 100644
18232--- a/arch/x86/kvm/paging_tmpl.h
18233+++ b/arch/x86/kvm/paging_tmpl.h
18234@@ -197,7 +197,7 @@ retry_walk:
18235 if (unlikely(kvm_is_error_hva(host_addr)))
18236 goto error;
18237
18238- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
18239+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
18240 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
18241 goto error;
18242
18243@@ -705,7 +705,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
18244 if (need_flush)
18245 kvm_flush_remote_tlbs(vcpu->kvm);
18246
18247- atomic_inc(&vcpu->kvm->arch.invlpg_counter);
18248+ atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
18249
18250 spin_unlock(&vcpu->kvm->mmu_lock);
18251
18252diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
18253index e32243e..a6e6172 100644
18254--- a/arch/x86/kvm/svm.c
18255+++ b/arch/x86/kvm/svm.c
18256@@ -3400,7 +3400,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
18257 int cpu = raw_smp_processor_id();
18258
18259 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
18260+
18261+ pax_open_kernel();
18262 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
18263+ pax_close_kernel();
18264+
18265 load_TR_desc();
18266 }
18267
18268@@ -3778,6 +3782,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
18269 #endif
18270 #endif
18271
18272+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18273+ __set_fs(current_thread_info()->addr_limit);
18274+#endif
18275+
18276 reload_tss(vcpu);
18277
18278 local_irq_disable();
18279diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
18280index 579a0b5..ed7bbf9 100644
18281--- a/arch/x86/kvm/vmx.c
18282+++ b/arch/x86/kvm/vmx.c
18283@@ -1305,7 +1305,11 @@ static void reload_tss(void)
18284 struct desc_struct *descs;
18285
18286 descs = (void *)gdt->address;
18287+
18288+ pax_open_kernel();
18289 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
18290+ pax_close_kernel();
18291+
18292 load_TR_desc();
18293 }
18294
18295@@ -2633,8 +2637,11 @@ static __init int hardware_setup(void)
18296 if (!cpu_has_vmx_flexpriority())
18297 flexpriority_enabled = 0;
18298
18299- if (!cpu_has_vmx_tpr_shadow())
18300- kvm_x86_ops->update_cr8_intercept = NULL;
18301+ if (!cpu_has_vmx_tpr_shadow()) {
18302+ pax_open_kernel();
18303+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
18304+ pax_close_kernel();
18305+ }
18306
18307 if (enable_ept && !cpu_has_vmx_ept_2m_page())
18308 kvm_disable_largepages();
18309@@ -3648,7 +3655,7 @@ static void vmx_set_constant_host_state(void)
18310 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
18311
18312 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
18313- vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
18314+ vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
18315
18316 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
18317 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
18318@@ -6169,6 +6176,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18319 "jmp .Lkvm_vmx_return \n\t"
18320 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
18321 ".Lkvm_vmx_return: "
18322+
18323+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18324+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
18325+ ".Lkvm_vmx_return2: "
18326+#endif
18327+
18328 /* Save guest registers, load host registers, keep flags */
18329 "mov %0, %c[wordsize](%%"R"sp) \n\t"
18330 "pop %0 \n\t"
18331@@ -6217,6 +6230,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18332 #endif
18333 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
18334 [wordsize]"i"(sizeof(ulong))
18335+
18336+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18337+ ,[cs]"i"(__KERNEL_CS)
18338+#endif
18339+
18340 : "cc", "memory"
18341 , R"ax", R"bx", R"di", R"si"
18342 #ifdef CONFIG_X86_64
18343@@ -6245,7 +6263,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18344 }
18345 }
18346
18347- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
18348+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
18349+
18350+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18351+ loadsegment(fs, __KERNEL_PERCPU);
18352+#endif
18353+
18354+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18355+ __set_fs(current_thread_info()->addr_limit);
18356+#endif
18357+
18358 vmx->loaded_vmcs->launched = 1;
18359
18360 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
18361diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
18362index 4c938da..4ddef65 100644
18363--- a/arch/x86/kvm/x86.c
18364+++ b/arch/x86/kvm/x86.c
18365@@ -1345,8 +1345,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
18366 {
18367 struct kvm *kvm = vcpu->kvm;
18368 int lm = is_long_mode(vcpu);
18369- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
18370- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
18371+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
18372+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
18373 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
18374 : kvm->arch.xen_hvm_config.blob_size_32;
18375 u32 page_num = data & ~PAGE_MASK;
18376@@ -2165,6 +2165,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
18377 if (n < msr_list.nmsrs)
18378 goto out;
18379 r = -EFAULT;
18380+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
18381+ goto out;
18382 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
18383 num_msrs_to_save * sizeof(u32)))
18384 goto out;
18385@@ -2340,15 +2342,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
18386 struct kvm_cpuid2 *cpuid,
18387 struct kvm_cpuid_entry2 __user *entries)
18388 {
18389- int r;
18390+ int r, i;
18391
18392 r = -E2BIG;
18393 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
18394 goto out;
18395 r = -EFAULT;
18396- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
18397- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18398+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18399 goto out;
18400+ for (i = 0; i < cpuid->nent; ++i) {
18401+ struct kvm_cpuid_entry2 cpuid_entry;
18402+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
18403+ goto out;
18404+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
18405+ }
18406 vcpu->arch.cpuid_nent = cpuid->nent;
18407 kvm_apic_set_version(vcpu);
18408 kvm_x86_ops->cpuid_update(vcpu);
18409@@ -2363,15 +2370,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
18410 struct kvm_cpuid2 *cpuid,
18411 struct kvm_cpuid_entry2 __user *entries)
18412 {
18413- int r;
18414+ int r, i;
18415
18416 r = -E2BIG;
18417 if (cpuid->nent < vcpu->arch.cpuid_nent)
18418 goto out;
18419 r = -EFAULT;
18420- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
18421- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18422+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18423 goto out;
18424+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
18425+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
18426+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
18427+ goto out;
18428+ }
18429 return 0;
18430
18431 out:
18432@@ -2746,7 +2757,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
18433 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
18434 struct kvm_interrupt *irq)
18435 {
18436- if (irq->irq < 0 || irq->irq >= 256)
18437+ if (irq->irq >= 256)
18438 return -EINVAL;
18439 if (irqchip_in_kernel(vcpu->kvm))
18440 return -ENXIO;
18441@@ -5162,7 +5173,7 @@ static void kvm_set_mmio_spte_mask(void)
18442 kvm_mmu_set_mmio_spte_mask(mask);
18443 }
18444
18445-int kvm_arch_init(void *opaque)
18446+int kvm_arch_init(const void *opaque)
18447 {
18448 int r;
18449 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
18450diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
18451index cf4603b..7cdde38 100644
18452--- a/arch/x86/lguest/boot.c
18453+++ b/arch/x86/lguest/boot.c
18454@@ -1195,9 +1195,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
18455 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
18456 * Launcher to reboot us.
18457 */
18458-static void lguest_restart(char *reason)
18459+static __noreturn void lguest_restart(char *reason)
18460 {
18461 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
18462+ BUG();
18463 }
18464
18465 /*G:050
18466diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
18467index 042f682..c92afb6 100644
18468--- a/arch/x86/lib/atomic64_32.c
18469+++ b/arch/x86/lib/atomic64_32.c
18470@@ -8,18 +8,30 @@
18471
18472 long long atomic64_read_cx8(long long, const atomic64_t *v);
18473 EXPORT_SYMBOL(atomic64_read_cx8);
18474+long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
18475+EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
18476 long long atomic64_set_cx8(long long, const atomic64_t *v);
18477 EXPORT_SYMBOL(atomic64_set_cx8);
18478+long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
18479+EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
18480 long long atomic64_xchg_cx8(long long, unsigned high);
18481 EXPORT_SYMBOL(atomic64_xchg_cx8);
18482 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
18483 EXPORT_SYMBOL(atomic64_add_return_cx8);
18484+long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18485+EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
18486 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
18487 EXPORT_SYMBOL(atomic64_sub_return_cx8);
18488+long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18489+EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
18490 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
18491 EXPORT_SYMBOL(atomic64_inc_return_cx8);
18492+long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18493+EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
18494 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
18495 EXPORT_SYMBOL(atomic64_dec_return_cx8);
18496+long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18497+EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
18498 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
18499 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
18500 int atomic64_inc_not_zero_cx8(atomic64_t *v);
18501@@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
18502 #ifndef CONFIG_X86_CMPXCHG64
18503 long long atomic64_read_386(long long, const atomic64_t *v);
18504 EXPORT_SYMBOL(atomic64_read_386);
18505+long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
18506+EXPORT_SYMBOL(atomic64_read_unchecked_386);
18507 long long atomic64_set_386(long long, const atomic64_t *v);
18508 EXPORT_SYMBOL(atomic64_set_386);
18509+long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
18510+EXPORT_SYMBOL(atomic64_set_unchecked_386);
18511 long long atomic64_xchg_386(long long, unsigned high);
18512 EXPORT_SYMBOL(atomic64_xchg_386);
18513 long long atomic64_add_return_386(long long a, atomic64_t *v);
18514 EXPORT_SYMBOL(atomic64_add_return_386);
18515+long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18516+EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
18517 long long atomic64_sub_return_386(long long a, atomic64_t *v);
18518 EXPORT_SYMBOL(atomic64_sub_return_386);
18519+long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18520+EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
18521 long long atomic64_inc_return_386(long long a, atomic64_t *v);
18522 EXPORT_SYMBOL(atomic64_inc_return_386);
18523+long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18524+EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
18525 long long atomic64_dec_return_386(long long a, atomic64_t *v);
18526 EXPORT_SYMBOL(atomic64_dec_return_386);
18527+long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18528+EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
18529 long long atomic64_add_386(long long a, atomic64_t *v);
18530 EXPORT_SYMBOL(atomic64_add_386);
18531+long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
18532+EXPORT_SYMBOL(atomic64_add_unchecked_386);
18533 long long atomic64_sub_386(long long a, atomic64_t *v);
18534 EXPORT_SYMBOL(atomic64_sub_386);
18535+long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
18536+EXPORT_SYMBOL(atomic64_sub_unchecked_386);
18537 long long atomic64_inc_386(long long a, atomic64_t *v);
18538 EXPORT_SYMBOL(atomic64_inc_386);
18539+long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
18540+EXPORT_SYMBOL(atomic64_inc_unchecked_386);
18541 long long atomic64_dec_386(long long a, atomic64_t *v);
18542 EXPORT_SYMBOL(atomic64_dec_386);
18543+long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
18544+EXPORT_SYMBOL(atomic64_dec_unchecked_386);
18545 long long atomic64_dec_if_positive_386(atomic64_t *v);
18546 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
18547 int atomic64_inc_not_zero_386(atomic64_t *v);
18548diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
18549index e8e7e0d..56fd1b0 100644
18550--- a/arch/x86/lib/atomic64_386_32.S
18551+++ b/arch/x86/lib/atomic64_386_32.S
18552@@ -48,6 +48,10 @@ BEGIN(read)
18553 movl (v), %eax
18554 movl 4(v), %edx
18555 RET_ENDP
18556+BEGIN(read_unchecked)
18557+ movl (v), %eax
18558+ movl 4(v), %edx
18559+RET_ENDP
18560 #undef v
18561
18562 #define v %esi
18563@@ -55,6 +59,10 @@ BEGIN(set)
18564 movl %ebx, (v)
18565 movl %ecx, 4(v)
18566 RET_ENDP
18567+BEGIN(set_unchecked)
18568+ movl %ebx, (v)
18569+ movl %ecx, 4(v)
18570+RET_ENDP
18571 #undef v
18572
18573 #define v %esi
18574@@ -70,6 +78,20 @@ RET_ENDP
18575 BEGIN(add)
18576 addl %eax, (v)
18577 adcl %edx, 4(v)
18578+
18579+#ifdef CONFIG_PAX_REFCOUNT
18580+ jno 0f
18581+ subl %eax, (v)
18582+ sbbl %edx, 4(v)
18583+ int $4
18584+0:
18585+ _ASM_EXTABLE(0b, 0b)
18586+#endif
18587+
18588+RET_ENDP
18589+BEGIN(add_unchecked)
18590+ addl %eax, (v)
18591+ adcl %edx, 4(v)
18592 RET_ENDP
18593 #undef v
18594
18595@@ -77,6 +99,24 @@ RET_ENDP
18596 BEGIN(add_return)
18597 addl (v), %eax
18598 adcl 4(v), %edx
18599+
18600+#ifdef CONFIG_PAX_REFCOUNT
18601+ into
18602+1234:
18603+ _ASM_EXTABLE(1234b, 2f)
18604+#endif
18605+
18606+ movl %eax, (v)
18607+ movl %edx, 4(v)
18608+
18609+#ifdef CONFIG_PAX_REFCOUNT
18610+2:
18611+#endif
18612+
18613+RET_ENDP
18614+BEGIN(add_return_unchecked)
18615+ addl (v), %eax
18616+ adcl 4(v), %edx
18617 movl %eax, (v)
18618 movl %edx, 4(v)
18619 RET_ENDP
18620@@ -86,6 +126,20 @@ RET_ENDP
18621 BEGIN(sub)
18622 subl %eax, (v)
18623 sbbl %edx, 4(v)
18624+
18625+#ifdef CONFIG_PAX_REFCOUNT
18626+ jno 0f
18627+ addl %eax, (v)
18628+ adcl %edx, 4(v)
18629+ int $4
18630+0:
18631+ _ASM_EXTABLE(0b, 0b)
18632+#endif
18633+
18634+RET_ENDP
18635+BEGIN(sub_unchecked)
18636+ subl %eax, (v)
18637+ sbbl %edx, 4(v)
18638 RET_ENDP
18639 #undef v
18640
18641@@ -96,6 +150,27 @@ BEGIN(sub_return)
18642 sbbl $0, %edx
18643 addl (v), %eax
18644 adcl 4(v), %edx
18645+
18646+#ifdef CONFIG_PAX_REFCOUNT
18647+ into
18648+1234:
18649+ _ASM_EXTABLE(1234b, 2f)
18650+#endif
18651+
18652+ movl %eax, (v)
18653+ movl %edx, 4(v)
18654+
18655+#ifdef CONFIG_PAX_REFCOUNT
18656+2:
18657+#endif
18658+
18659+RET_ENDP
18660+BEGIN(sub_return_unchecked)
18661+ negl %edx
18662+ negl %eax
18663+ sbbl $0, %edx
18664+ addl (v), %eax
18665+ adcl 4(v), %edx
18666 movl %eax, (v)
18667 movl %edx, 4(v)
18668 RET_ENDP
18669@@ -105,6 +180,20 @@ RET_ENDP
18670 BEGIN(inc)
18671 addl $1, (v)
18672 adcl $0, 4(v)
18673+
18674+#ifdef CONFIG_PAX_REFCOUNT
18675+ jno 0f
18676+ subl $1, (v)
18677+ sbbl $0, 4(v)
18678+ int $4
18679+0:
18680+ _ASM_EXTABLE(0b, 0b)
18681+#endif
18682+
18683+RET_ENDP
18684+BEGIN(inc_unchecked)
18685+ addl $1, (v)
18686+ adcl $0, 4(v)
18687 RET_ENDP
18688 #undef v
18689
18690@@ -114,6 +203,26 @@ BEGIN(inc_return)
18691 movl 4(v), %edx
18692 addl $1, %eax
18693 adcl $0, %edx
18694+
18695+#ifdef CONFIG_PAX_REFCOUNT
18696+ into
18697+1234:
18698+ _ASM_EXTABLE(1234b, 2f)
18699+#endif
18700+
18701+ movl %eax, (v)
18702+ movl %edx, 4(v)
18703+
18704+#ifdef CONFIG_PAX_REFCOUNT
18705+2:
18706+#endif
18707+
18708+RET_ENDP
18709+BEGIN(inc_return_unchecked)
18710+ movl (v), %eax
18711+ movl 4(v), %edx
18712+ addl $1, %eax
18713+ adcl $0, %edx
18714 movl %eax, (v)
18715 movl %edx, 4(v)
18716 RET_ENDP
18717@@ -123,6 +232,20 @@ RET_ENDP
18718 BEGIN(dec)
18719 subl $1, (v)
18720 sbbl $0, 4(v)
18721+
18722+#ifdef CONFIG_PAX_REFCOUNT
18723+ jno 0f
18724+ addl $1, (v)
18725+ adcl $0, 4(v)
18726+ int $4
18727+0:
18728+ _ASM_EXTABLE(0b, 0b)
18729+#endif
18730+
18731+RET_ENDP
18732+BEGIN(dec_unchecked)
18733+ subl $1, (v)
18734+ sbbl $0, 4(v)
18735 RET_ENDP
18736 #undef v
18737
18738@@ -132,6 +255,26 @@ BEGIN(dec_return)
18739 movl 4(v), %edx
18740 subl $1, %eax
18741 sbbl $0, %edx
18742+
18743+#ifdef CONFIG_PAX_REFCOUNT
18744+ into
18745+1234:
18746+ _ASM_EXTABLE(1234b, 2f)
18747+#endif
18748+
18749+ movl %eax, (v)
18750+ movl %edx, 4(v)
18751+
18752+#ifdef CONFIG_PAX_REFCOUNT
18753+2:
18754+#endif
18755+
18756+RET_ENDP
18757+BEGIN(dec_return_unchecked)
18758+ movl (v), %eax
18759+ movl 4(v), %edx
18760+ subl $1, %eax
18761+ sbbl $0, %edx
18762 movl %eax, (v)
18763 movl %edx, 4(v)
18764 RET_ENDP
18765@@ -143,6 +286,13 @@ BEGIN(add_unless)
18766 adcl %edx, %edi
18767 addl (v), %eax
18768 adcl 4(v), %edx
18769+
18770+#ifdef CONFIG_PAX_REFCOUNT
18771+ into
18772+1234:
18773+ _ASM_EXTABLE(1234b, 2f)
18774+#endif
18775+
18776 cmpl %eax, %esi
18777 je 3f
18778 1:
18779@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
18780 1:
18781 addl $1, %eax
18782 adcl $0, %edx
18783+
18784+#ifdef CONFIG_PAX_REFCOUNT
18785+ into
18786+1234:
18787+ _ASM_EXTABLE(1234b, 2f)
18788+#endif
18789+
18790 movl %eax, (v)
18791 movl %edx, 4(v)
18792 movl $1, %eax
18793@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
18794 movl 4(v), %edx
18795 subl $1, %eax
18796 sbbl $0, %edx
18797+
18798+#ifdef CONFIG_PAX_REFCOUNT
18799+ into
18800+1234:
18801+ _ASM_EXTABLE(1234b, 1f)
18802+#endif
18803+
18804 js 1f
18805 movl %eax, (v)
18806 movl %edx, 4(v)
18807diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
18808index 391a083..d658e9f 100644
18809--- a/arch/x86/lib/atomic64_cx8_32.S
18810+++ b/arch/x86/lib/atomic64_cx8_32.S
18811@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
18812 CFI_STARTPROC
18813
18814 read64 %ecx
18815+ pax_force_retaddr
18816 ret
18817 CFI_ENDPROC
18818 ENDPROC(atomic64_read_cx8)
18819
18820+ENTRY(atomic64_read_unchecked_cx8)
18821+ CFI_STARTPROC
18822+
18823+ read64 %ecx
18824+ pax_force_retaddr
18825+ ret
18826+ CFI_ENDPROC
18827+ENDPROC(atomic64_read_unchecked_cx8)
18828+
18829 ENTRY(atomic64_set_cx8)
18830 CFI_STARTPROC
18831
18832@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
18833 cmpxchg8b (%esi)
18834 jne 1b
18835
18836+ pax_force_retaddr
18837 ret
18838 CFI_ENDPROC
18839 ENDPROC(atomic64_set_cx8)
18840
18841+ENTRY(atomic64_set_unchecked_cx8)
18842+ CFI_STARTPROC
18843+
18844+1:
18845+/* we don't need LOCK_PREFIX since aligned 64-bit writes
18846+ * are atomic on 586 and newer */
18847+ cmpxchg8b (%esi)
18848+ jne 1b
18849+
18850+ pax_force_retaddr
18851+ ret
18852+ CFI_ENDPROC
18853+ENDPROC(atomic64_set_unchecked_cx8)
18854+
18855 ENTRY(atomic64_xchg_cx8)
18856 CFI_STARTPROC
18857
18858@@ -62,12 +87,13 @@ ENTRY(atomic64_xchg_cx8)
18859 cmpxchg8b (%esi)
18860 jne 1b
18861
18862+ pax_force_retaddr
18863 ret
18864 CFI_ENDPROC
18865 ENDPROC(atomic64_xchg_cx8)
18866
18867-.macro addsub_return func ins insc
18868-ENTRY(atomic64_\func\()_return_cx8)
18869+.macro addsub_return func ins insc unchecked=""
18870+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
18871 CFI_STARTPROC
18872 SAVE ebp
18873 SAVE ebx
18874@@ -84,27 +110,44 @@ ENTRY(atomic64_\func\()_return_cx8)
18875 movl %edx, %ecx
18876 \ins\()l %esi, %ebx
18877 \insc\()l %edi, %ecx
18878+
18879+.ifb \unchecked
18880+#ifdef CONFIG_PAX_REFCOUNT
18881+ into
18882+2:
18883+ _ASM_EXTABLE(2b, 3f)
18884+#endif
18885+.endif
18886+
18887 LOCK_PREFIX
18888 cmpxchg8b (%ebp)
18889 jne 1b
18890-
18891-10:
18892 movl %ebx, %eax
18893 movl %ecx, %edx
18894+
18895+.ifb \unchecked
18896+#ifdef CONFIG_PAX_REFCOUNT
18897+3:
18898+#endif
18899+.endif
18900+
18901 RESTORE edi
18902 RESTORE esi
18903 RESTORE ebx
18904 RESTORE ebp
18905+ pax_force_retaddr
18906 ret
18907 CFI_ENDPROC
18908-ENDPROC(atomic64_\func\()_return_cx8)
18909+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
18910 .endm
18911
18912 addsub_return add add adc
18913 addsub_return sub sub sbb
18914+addsub_return add add adc _unchecked
18915+addsub_return sub sub sbb _unchecked
18916
18917-.macro incdec_return func ins insc
18918-ENTRY(atomic64_\func\()_return_cx8)
18919+.macro incdec_return func ins insc unchecked
18920+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
18921 CFI_STARTPROC
18922 SAVE ebx
18923
18924@@ -114,21 +157,39 @@ ENTRY(atomic64_\func\()_return_cx8)
18925 movl %edx, %ecx
18926 \ins\()l $1, %ebx
18927 \insc\()l $0, %ecx
18928+
18929+.ifb \unchecked
18930+#ifdef CONFIG_PAX_REFCOUNT
18931+ into
18932+2:
18933+ _ASM_EXTABLE(2b, 3f)
18934+#endif
18935+.endif
18936+
18937 LOCK_PREFIX
18938 cmpxchg8b (%esi)
18939 jne 1b
18940
18941-10:
18942 movl %ebx, %eax
18943 movl %ecx, %edx
18944+
18945+.ifb \unchecked
18946+#ifdef CONFIG_PAX_REFCOUNT
18947+3:
18948+#endif
18949+.endif
18950+
18951 RESTORE ebx
18952+ pax_force_retaddr
18953 ret
18954 CFI_ENDPROC
18955-ENDPROC(atomic64_\func\()_return_cx8)
18956+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
18957 .endm
18958
18959 incdec_return inc add adc
18960 incdec_return dec sub sbb
18961+incdec_return inc add adc _unchecked
18962+incdec_return dec sub sbb _unchecked
18963
18964 ENTRY(atomic64_dec_if_positive_cx8)
18965 CFI_STARTPROC
18966@@ -140,6 +201,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
18967 movl %edx, %ecx
18968 subl $1, %ebx
18969 sbb $0, %ecx
18970+
18971+#ifdef CONFIG_PAX_REFCOUNT
18972+ into
18973+1234:
18974+ _ASM_EXTABLE(1234b, 2f)
18975+#endif
18976+
18977 js 2f
18978 LOCK_PREFIX
18979 cmpxchg8b (%esi)
18980@@ -149,6 +217,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
18981 movl %ebx, %eax
18982 movl %ecx, %edx
18983 RESTORE ebx
18984+ pax_force_retaddr
18985 ret
18986 CFI_ENDPROC
18987 ENDPROC(atomic64_dec_if_positive_cx8)
18988@@ -174,6 +243,13 @@ ENTRY(atomic64_add_unless_cx8)
18989 movl %edx, %ecx
18990 addl %esi, %ebx
18991 adcl %edi, %ecx
18992+
18993+#ifdef CONFIG_PAX_REFCOUNT
18994+ into
18995+1234:
18996+ _ASM_EXTABLE(1234b, 3f)
18997+#endif
18998+
18999 LOCK_PREFIX
19000 cmpxchg8b (%ebp)
19001 jne 1b
19002@@ -184,6 +260,7 @@ ENTRY(atomic64_add_unless_cx8)
19003 CFI_ADJUST_CFA_OFFSET -8
19004 RESTORE ebx
19005 RESTORE ebp
19006+ pax_force_retaddr
19007 ret
19008 4:
19009 cmpl %edx, 4(%esp)
19010@@ -206,6 +283,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
19011 movl %edx, %ecx
19012 addl $1, %ebx
19013 adcl $0, %ecx
19014+
19015+#ifdef CONFIG_PAX_REFCOUNT
19016+ into
19017+1234:
19018+ _ASM_EXTABLE(1234b, 3f)
19019+#endif
19020+
19021 LOCK_PREFIX
19022 cmpxchg8b (%esi)
19023 jne 1b
19024@@ -213,6 +297,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
19025 movl $1, %eax
19026 3:
19027 RESTORE ebx
19028+ pax_force_retaddr
19029 ret
19030 4:
19031 testl %edx, %edx
19032diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
19033index 78d16a5..fbcf666 100644
19034--- a/arch/x86/lib/checksum_32.S
19035+++ b/arch/x86/lib/checksum_32.S
19036@@ -28,7 +28,8 @@
19037 #include <linux/linkage.h>
19038 #include <asm/dwarf2.h>
19039 #include <asm/errno.h>
19040-
19041+#include <asm/segment.h>
19042+
19043 /*
19044 * computes a partial checksum, e.g. for TCP/UDP fragments
19045 */
19046@@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
19047
19048 #define ARGBASE 16
19049 #define FP 12
19050-
19051-ENTRY(csum_partial_copy_generic)
19052+
19053+ENTRY(csum_partial_copy_generic_to_user)
19054 CFI_STARTPROC
19055+
19056+#ifdef CONFIG_PAX_MEMORY_UDEREF
19057+ pushl_cfi %gs
19058+ popl_cfi %es
19059+ jmp csum_partial_copy_generic
19060+#endif
19061+
19062+ENTRY(csum_partial_copy_generic_from_user)
19063+
19064+#ifdef CONFIG_PAX_MEMORY_UDEREF
19065+ pushl_cfi %gs
19066+ popl_cfi %ds
19067+#endif
19068+
19069+ENTRY(csum_partial_copy_generic)
19070 subl $4,%esp
19071 CFI_ADJUST_CFA_OFFSET 4
19072 pushl_cfi %edi
19073@@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
19074 jmp 4f
19075 SRC(1: movw (%esi), %bx )
19076 addl $2, %esi
19077-DST( movw %bx, (%edi) )
19078+DST( movw %bx, %es:(%edi) )
19079 addl $2, %edi
19080 addw %bx, %ax
19081 adcl $0, %eax
19082@@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
19083 SRC(1: movl (%esi), %ebx )
19084 SRC( movl 4(%esi), %edx )
19085 adcl %ebx, %eax
19086-DST( movl %ebx, (%edi) )
19087+DST( movl %ebx, %es:(%edi) )
19088 adcl %edx, %eax
19089-DST( movl %edx, 4(%edi) )
19090+DST( movl %edx, %es:4(%edi) )
19091
19092 SRC( movl 8(%esi), %ebx )
19093 SRC( movl 12(%esi), %edx )
19094 adcl %ebx, %eax
19095-DST( movl %ebx, 8(%edi) )
19096+DST( movl %ebx, %es:8(%edi) )
19097 adcl %edx, %eax
19098-DST( movl %edx, 12(%edi) )
19099+DST( movl %edx, %es:12(%edi) )
19100
19101 SRC( movl 16(%esi), %ebx )
19102 SRC( movl 20(%esi), %edx )
19103 adcl %ebx, %eax
19104-DST( movl %ebx, 16(%edi) )
19105+DST( movl %ebx, %es:16(%edi) )
19106 adcl %edx, %eax
19107-DST( movl %edx, 20(%edi) )
19108+DST( movl %edx, %es:20(%edi) )
19109
19110 SRC( movl 24(%esi), %ebx )
19111 SRC( movl 28(%esi), %edx )
19112 adcl %ebx, %eax
19113-DST( movl %ebx, 24(%edi) )
19114+DST( movl %ebx, %es:24(%edi) )
19115 adcl %edx, %eax
19116-DST( movl %edx, 28(%edi) )
19117+DST( movl %edx, %es:28(%edi) )
19118
19119 lea 32(%esi), %esi
19120 lea 32(%edi), %edi
19121@@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
19122 shrl $2, %edx # This clears CF
19123 SRC(3: movl (%esi), %ebx )
19124 adcl %ebx, %eax
19125-DST( movl %ebx, (%edi) )
19126+DST( movl %ebx, %es:(%edi) )
19127 lea 4(%esi), %esi
19128 lea 4(%edi), %edi
19129 dec %edx
19130@@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
19131 jb 5f
19132 SRC( movw (%esi), %cx )
19133 leal 2(%esi), %esi
19134-DST( movw %cx, (%edi) )
19135+DST( movw %cx, %es:(%edi) )
19136 leal 2(%edi), %edi
19137 je 6f
19138 shll $16,%ecx
19139 SRC(5: movb (%esi), %cl )
19140-DST( movb %cl, (%edi) )
19141+DST( movb %cl, %es:(%edi) )
19142 6: addl %ecx, %eax
19143 adcl $0, %eax
19144 7:
19145@@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
19146
19147 6001:
19148 movl ARGBASE+20(%esp), %ebx # src_err_ptr
19149- movl $-EFAULT, (%ebx)
19150+ movl $-EFAULT, %ss:(%ebx)
19151
19152 # zero the complete destination - computing the rest
19153 # is too much work
19154@@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
19155
19156 6002:
19157 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19158- movl $-EFAULT,(%ebx)
19159+ movl $-EFAULT,%ss:(%ebx)
19160 jmp 5000b
19161
19162 .previous
19163
19164+ pushl_cfi %ss
19165+ popl_cfi %ds
19166+ pushl_cfi %ss
19167+ popl_cfi %es
19168 popl_cfi %ebx
19169 CFI_RESTORE ebx
19170 popl_cfi %esi
19171@@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
19172 popl_cfi %ecx # equivalent to addl $4,%esp
19173 ret
19174 CFI_ENDPROC
19175-ENDPROC(csum_partial_copy_generic)
19176+ENDPROC(csum_partial_copy_generic_to_user)
19177
19178 #else
19179
19180 /* Version for PentiumII/PPro */
19181
19182 #define ROUND1(x) \
19183+ nop; nop; nop; \
19184 SRC(movl x(%esi), %ebx ) ; \
19185 addl %ebx, %eax ; \
19186- DST(movl %ebx, x(%edi) ) ;
19187+ DST(movl %ebx, %es:x(%edi)) ;
19188
19189 #define ROUND(x) \
19190+ nop; nop; nop; \
19191 SRC(movl x(%esi), %ebx ) ; \
19192 adcl %ebx, %eax ; \
19193- DST(movl %ebx, x(%edi) ) ;
19194+ DST(movl %ebx, %es:x(%edi)) ;
19195
19196 #define ARGBASE 12
19197-
19198-ENTRY(csum_partial_copy_generic)
19199+
19200+ENTRY(csum_partial_copy_generic_to_user)
19201 CFI_STARTPROC
19202+
19203+#ifdef CONFIG_PAX_MEMORY_UDEREF
19204+ pushl_cfi %gs
19205+ popl_cfi %es
19206+ jmp csum_partial_copy_generic
19207+#endif
19208+
19209+ENTRY(csum_partial_copy_generic_from_user)
19210+
19211+#ifdef CONFIG_PAX_MEMORY_UDEREF
19212+ pushl_cfi %gs
19213+ popl_cfi %ds
19214+#endif
19215+
19216+ENTRY(csum_partial_copy_generic)
19217 pushl_cfi %ebx
19218 CFI_REL_OFFSET ebx, 0
19219 pushl_cfi %edi
19220@@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
19221 subl %ebx, %edi
19222 lea -1(%esi),%edx
19223 andl $-32,%edx
19224- lea 3f(%ebx,%ebx), %ebx
19225+ lea 3f(%ebx,%ebx,2), %ebx
19226 testl %esi, %esi
19227 jmp *%ebx
19228 1: addl $64,%esi
19229@@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
19230 jb 5f
19231 SRC( movw (%esi), %dx )
19232 leal 2(%esi), %esi
19233-DST( movw %dx, (%edi) )
19234+DST( movw %dx, %es:(%edi) )
19235 leal 2(%edi), %edi
19236 je 6f
19237 shll $16,%edx
19238 5:
19239 SRC( movb (%esi), %dl )
19240-DST( movb %dl, (%edi) )
19241+DST( movb %dl, %es:(%edi) )
19242 6: addl %edx, %eax
19243 adcl $0, %eax
19244 7:
19245 .section .fixup, "ax"
19246 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
19247- movl $-EFAULT, (%ebx)
19248+ movl $-EFAULT, %ss:(%ebx)
19249 # zero the complete destination (computing the rest is too much work)
19250 movl ARGBASE+8(%esp),%edi # dst
19251 movl ARGBASE+12(%esp),%ecx # len
19252@@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
19253 rep; stosb
19254 jmp 7b
19255 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19256- movl $-EFAULT, (%ebx)
19257+ movl $-EFAULT, %ss:(%ebx)
19258 jmp 7b
19259 .previous
19260
19261+#ifdef CONFIG_PAX_MEMORY_UDEREF
19262+ pushl_cfi %ss
19263+ popl_cfi %ds
19264+ pushl_cfi %ss
19265+ popl_cfi %es
19266+#endif
19267+
19268 popl_cfi %esi
19269 CFI_RESTORE esi
19270 popl_cfi %edi
19271@@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
19272 CFI_RESTORE ebx
19273 ret
19274 CFI_ENDPROC
19275-ENDPROC(csum_partial_copy_generic)
19276+ENDPROC(csum_partial_copy_generic_to_user)
19277
19278 #undef ROUND
19279 #undef ROUND1
19280diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
19281index f2145cf..cea889d 100644
19282--- a/arch/x86/lib/clear_page_64.S
19283+++ b/arch/x86/lib/clear_page_64.S
19284@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
19285 movl $4096/8,%ecx
19286 xorl %eax,%eax
19287 rep stosq
19288+ pax_force_retaddr
19289 ret
19290 CFI_ENDPROC
19291 ENDPROC(clear_page_c)
19292@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
19293 movl $4096,%ecx
19294 xorl %eax,%eax
19295 rep stosb
19296+ pax_force_retaddr
19297 ret
19298 CFI_ENDPROC
19299 ENDPROC(clear_page_c_e)
19300@@ -43,6 +45,7 @@ ENTRY(clear_page)
19301 leaq 64(%rdi),%rdi
19302 jnz .Lloop
19303 nop
19304+ pax_force_retaddr
19305 ret
19306 CFI_ENDPROC
19307 .Lclear_page_end:
19308@@ -58,7 +61,7 @@ ENDPROC(clear_page)
19309
19310 #include <asm/cpufeature.h>
19311
19312- .section .altinstr_replacement,"ax"
19313+ .section .altinstr_replacement,"a"
19314 1: .byte 0xeb /* jmp <disp8> */
19315 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
19316 2: .byte 0xeb /* jmp <disp8> */
19317diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
19318index 1e572c5..2a162cd 100644
19319--- a/arch/x86/lib/cmpxchg16b_emu.S
19320+++ b/arch/x86/lib/cmpxchg16b_emu.S
19321@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
19322
19323 popf
19324 mov $1, %al
19325+ pax_force_retaddr
19326 ret
19327
19328 not_same:
19329 popf
19330 xor %al,%al
19331+ pax_force_retaddr
19332 ret
19333
19334 CFI_ENDPROC
19335diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
19336index 01c805b..dccb07f 100644
19337--- a/arch/x86/lib/copy_page_64.S
19338+++ b/arch/x86/lib/copy_page_64.S
19339@@ -9,6 +9,7 @@ copy_page_c:
19340 CFI_STARTPROC
19341 movl $4096/8,%ecx
19342 rep movsq
19343+ pax_force_retaddr
19344 ret
19345 CFI_ENDPROC
19346 ENDPROC(copy_page_c)
19347@@ -39,7 +40,7 @@ ENTRY(copy_page)
19348 movq 16 (%rsi), %rdx
19349 movq 24 (%rsi), %r8
19350 movq 32 (%rsi), %r9
19351- movq 40 (%rsi), %r10
19352+ movq 40 (%rsi), %r13
19353 movq 48 (%rsi), %r11
19354 movq 56 (%rsi), %r12
19355
19356@@ -50,7 +51,7 @@ ENTRY(copy_page)
19357 movq %rdx, 16 (%rdi)
19358 movq %r8, 24 (%rdi)
19359 movq %r9, 32 (%rdi)
19360- movq %r10, 40 (%rdi)
19361+ movq %r13, 40 (%rdi)
19362 movq %r11, 48 (%rdi)
19363 movq %r12, 56 (%rdi)
19364
19365@@ -69,7 +70,7 @@ ENTRY(copy_page)
19366 movq 16 (%rsi), %rdx
19367 movq 24 (%rsi), %r8
19368 movq 32 (%rsi), %r9
19369- movq 40 (%rsi), %r10
19370+ movq 40 (%rsi), %r13
19371 movq 48 (%rsi), %r11
19372 movq 56 (%rsi), %r12
19373
19374@@ -78,7 +79,7 @@ ENTRY(copy_page)
19375 movq %rdx, 16 (%rdi)
19376 movq %r8, 24 (%rdi)
19377 movq %r9, 32 (%rdi)
19378- movq %r10, 40 (%rdi)
19379+ movq %r13, 40 (%rdi)
19380 movq %r11, 48 (%rdi)
19381 movq %r12, 56 (%rdi)
19382
19383@@ -95,6 +96,7 @@ ENTRY(copy_page)
19384 CFI_RESTORE r13
19385 addq $3*8,%rsp
19386 CFI_ADJUST_CFA_OFFSET -3*8
19387+ pax_force_retaddr
19388 ret
19389 .Lcopy_page_end:
19390 CFI_ENDPROC
19391@@ -105,7 +107,7 @@ ENDPROC(copy_page)
19392
19393 #include <asm/cpufeature.h>
19394
19395- .section .altinstr_replacement,"ax"
19396+ .section .altinstr_replacement,"a"
19397 1: .byte 0xeb /* jmp <disp8> */
19398 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
19399 2:
19400diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
19401index 0248402..821c786 100644
19402--- a/arch/x86/lib/copy_user_64.S
19403+++ b/arch/x86/lib/copy_user_64.S
19404@@ -16,6 +16,7 @@
19405 #include <asm/thread_info.h>
19406 #include <asm/cpufeature.h>
19407 #include <asm/alternative-asm.h>
19408+#include <asm/pgtable.h>
19409
19410 /*
19411 * By placing feature2 after feature1 in altinstructions section, we logically
19412@@ -29,7 +30,7 @@
19413 .byte 0xe9 /* 32bit jump */
19414 .long \orig-1f /* by default jump to orig */
19415 1:
19416- .section .altinstr_replacement,"ax"
19417+ .section .altinstr_replacement,"a"
19418 2: .byte 0xe9 /* near jump with 32bit immediate */
19419 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
19420 3: .byte 0xe9 /* near jump with 32bit immediate */
19421@@ -71,47 +72,20 @@
19422 #endif
19423 .endm
19424
19425-/* Standard copy_to_user with segment limit checking */
19426-ENTRY(_copy_to_user)
19427- CFI_STARTPROC
19428- GET_THREAD_INFO(%rax)
19429- movq %rdi,%rcx
19430- addq %rdx,%rcx
19431- jc bad_to_user
19432- cmpq TI_addr_limit(%rax),%rcx
19433- ja bad_to_user
19434- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
19435- copy_user_generic_unrolled,copy_user_generic_string, \
19436- copy_user_enhanced_fast_string
19437- CFI_ENDPROC
19438-ENDPROC(_copy_to_user)
19439-
19440-/* Standard copy_from_user with segment limit checking */
19441-ENTRY(_copy_from_user)
19442- CFI_STARTPROC
19443- GET_THREAD_INFO(%rax)
19444- movq %rsi,%rcx
19445- addq %rdx,%rcx
19446- jc bad_from_user
19447- cmpq TI_addr_limit(%rax),%rcx
19448- ja bad_from_user
19449- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
19450- copy_user_generic_unrolled,copy_user_generic_string, \
19451- copy_user_enhanced_fast_string
19452- CFI_ENDPROC
19453-ENDPROC(_copy_from_user)
19454-
19455 .section .fixup,"ax"
19456 /* must zero dest */
19457 ENTRY(bad_from_user)
19458 bad_from_user:
19459 CFI_STARTPROC
19460+ testl %edx,%edx
19461+ js bad_to_user
19462 movl %edx,%ecx
19463 xorl %eax,%eax
19464 rep
19465 stosb
19466 bad_to_user:
19467 movl %edx,%eax
19468+ pax_force_retaddr
19469 ret
19470 CFI_ENDPROC
19471 ENDPROC(bad_from_user)
19472@@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
19473 jz 17f
19474 1: movq (%rsi),%r8
19475 2: movq 1*8(%rsi),%r9
19476-3: movq 2*8(%rsi),%r10
19477+3: movq 2*8(%rsi),%rax
19478 4: movq 3*8(%rsi),%r11
19479 5: movq %r8,(%rdi)
19480 6: movq %r9,1*8(%rdi)
19481-7: movq %r10,2*8(%rdi)
19482+7: movq %rax,2*8(%rdi)
19483 8: movq %r11,3*8(%rdi)
19484 9: movq 4*8(%rsi),%r8
19485 10: movq 5*8(%rsi),%r9
19486-11: movq 6*8(%rsi),%r10
19487+11: movq 6*8(%rsi),%rax
19488 12: movq 7*8(%rsi),%r11
19489 13: movq %r8,4*8(%rdi)
19490 14: movq %r9,5*8(%rdi)
19491-15: movq %r10,6*8(%rdi)
19492+15: movq %rax,6*8(%rdi)
19493 16: movq %r11,7*8(%rdi)
19494 leaq 64(%rsi),%rsi
19495 leaq 64(%rdi),%rdi
19496@@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
19497 decl %ecx
19498 jnz 21b
19499 23: xor %eax,%eax
19500+ pax_force_retaddr
19501 ret
19502
19503 .section .fixup,"ax"
19504@@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
19505 3: rep
19506 movsb
19507 4: xorl %eax,%eax
19508+ pax_force_retaddr
19509 ret
19510
19511 .section .fixup,"ax"
19512@@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
19513 1: rep
19514 movsb
19515 2: xorl %eax,%eax
19516+ pax_force_retaddr
19517 ret
19518
19519 .section .fixup,"ax"
19520diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
19521index cb0c112..e3a6895 100644
19522--- a/arch/x86/lib/copy_user_nocache_64.S
19523+++ b/arch/x86/lib/copy_user_nocache_64.S
19524@@ -8,12 +8,14 @@
19525
19526 #include <linux/linkage.h>
19527 #include <asm/dwarf2.h>
19528+#include <asm/alternative-asm.h>
19529
19530 #define FIX_ALIGNMENT 1
19531
19532 #include <asm/current.h>
19533 #include <asm/asm-offsets.h>
19534 #include <asm/thread_info.h>
19535+#include <asm/pgtable.h>
19536
19537 .macro ALIGN_DESTINATION
19538 #ifdef FIX_ALIGNMENT
19539@@ -50,6 +52,15 @@
19540 */
19541 ENTRY(__copy_user_nocache)
19542 CFI_STARTPROC
19543+
19544+#ifdef CONFIG_PAX_MEMORY_UDEREF
19545+ mov $PAX_USER_SHADOW_BASE,%rcx
19546+ cmp %rcx,%rsi
19547+ jae 1f
19548+ add %rcx,%rsi
19549+1:
19550+#endif
19551+
19552 cmpl $8,%edx
19553 jb 20f /* less then 8 bytes, go to byte copy loop */
19554 ALIGN_DESTINATION
19555@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
19556 jz 17f
19557 1: movq (%rsi),%r8
19558 2: movq 1*8(%rsi),%r9
19559-3: movq 2*8(%rsi),%r10
19560+3: movq 2*8(%rsi),%rax
19561 4: movq 3*8(%rsi),%r11
19562 5: movnti %r8,(%rdi)
19563 6: movnti %r9,1*8(%rdi)
19564-7: movnti %r10,2*8(%rdi)
19565+7: movnti %rax,2*8(%rdi)
19566 8: movnti %r11,3*8(%rdi)
19567 9: movq 4*8(%rsi),%r8
19568 10: movq 5*8(%rsi),%r9
19569-11: movq 6*8(%rsi),%r10
19570+11: movq 6*8(%rsi),%rax
19571 12: movq 7*8(%rsi),%r11
19572 13: movnti %r8,4*8(%rdi)
19573 14: movnti %r9,5*8(%rdi)
19574-15: movnti %r10,6*8(%rdi)
19575+15: movnti %rax,6*8(%rdi)
19576 16: movnti %r11,7*8(%rdi)
19577 leaq 64(%rsi),%rsi
19578 leaq 64(%rdi),%rdi
19579@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
19580 jnz 21b
19581 23: xorl %eax,%eax
19582 sfence
19583+ pax_force_retaddr
19584 ret
19585
19586 .section .fixup,"ax"
19587diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
19588index fb903b7..c92b7f7 100644
19589--- a/arch/x86/lib/csum-copy_64.S
19590+++ b/arch/x86/lib/csum-copy_64.S
19591@@ -8,6 +8,7 @@
19592 #include <linux/linkage.h>
19593 #include <asm/dwarf2.h>
19594 #include <asm/errno.h>
19595+#include <asm/alternative-asm.h>
19596
19597 /*
19598 * Checksum copy with exception handling.
19599@@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
19600 CFI_RESTORE rbp
19601 addq $7*8, %rsp
19602 CFI_ADJUST_CFA_OFFSET -7*8
19603+ pax_force_retaddr 0, 1
19604 ret
19605 CFI_RESTORE_STATE
19606
19607diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
19608index 459b58a..9570bc7 100644
19609--- a/arch/x86/lib/csum-wrappers_64.c
19610+++ b/arch/x86/lib/csum-wrappers_64.c
19611@@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
19612 len -= 2;
19613 }
19614 }
19615- isum = csum_partial_copy_generic((__force const void *)src,
19616+
19617+#ifdef CONFIG_PAX_MEMORY_UDEREF
19618+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19619+ src += PAX_USER_SHADOW_BASE;
19620+#endif
19621+
19622+ isum = csum_partial_copy_generic((const void __force_kernel *)src,
19623 dst, len, isum, errp, NULL);
19624 if (unlikely(*errp))
19625 goto out_err;
19626@@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
19627 }
19628
19629 *errp = 0;
19630- return csum_partial_copy_generic(src, (void __force *)dst,
19631+
19632+#ifdef CONFIG_PAX_MEMORY_UDEREF
19633+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
19634+ dst += PAX_USER_SHADOW_BASE;
19635+#endif
19636+
19637+ return csum_partial_copy_generic(src, (void __force_kernel *)dst,
19638 len, isum, NULL, errp);
19639 }
19640 EXPORT_SYMBOL(csum_partial_copy_to_user);
19641diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
19642index 51f1504..ddac4c1 100644
19643--- a/arch/x86/lib/getuser.S
19644+++ b/arch/x86/lib/getuser.S
19645@@ -33,15 +33,38 @@
19646 #include <asm/asm-offsets.h>
19647 #include <asm/thread_info.h>
19648 #include <asm/asm.h>
19649+#include <asm/segment.h>
19650+#include <asm/pgtable.h>
19651+#include <asm/alternative-asm.h>
19652+
19653+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19654+#define __copyuser_seg gs;
19655+#else
19656+#define __copyuser_seg
19657+#endif
19658
19659 .text
19660 ENTRY(__get_user_1)
19661 CFI_STARTPROC
19662+
19663+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19664 GET_THREAD_INFO(%_ASM_DX)
19665 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19666 jae bad_get_user
19667-1: movzb (%_ASM_AX),%edx
19668+
19669+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19670+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19671+ cmp %_ASM_DX,%_ASM_AX
19672+ jae 1234f
19673+ add %_ASM_DX,%_ASM_AX
19674+1234:
19675+#endif
19676+
19677+#endif
19678+
19679+1: __copyuser_seg movzb (%_ASM_AX),%edx
19680 xor %eax,%eax
19681+ pax_force_retaddr
19682 ret
19683 CFI_ENDPROC
19684 ENDPROC(__get_user_1)
19685@@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
19686 ENTRY(__get_user_2)
19687 CFI_STARTPROC
19688 add $1,%_ASM_AX
19689+
19690+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19691 jc bad_get_user
19692 GET_THREAD_INFO(%_ASM_DX)
19693 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19694 jae bad_get_user
19695-2: movzwl -1(%_ASM_AX),%edx
19696+
19697+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19698+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19699+ cmp %_ASM_DX,%_ASM_AX
19700+ jae 1234f
19701+ add %_ASM_DX,%_ASM_AX
19702+1234:
19703+#endif
19704+
19705+#endif
19706+
19707+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
19708 xor %eax,%eax
19709+ pax_force_retaddr
19710 ret
19711 CFI_ENDPROC
19712 ENDPROC(__get_user_2)
19713@@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
19714 ENTRY(__get_user_4)
19715 CFI_STARTPROC
19716 add $3,%_ASM_AX
19717+
19718+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19719 jc bad_get_user
19720 GET_THREAD_INFO(%_ASM_DX)
19721 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19722 jae bad_get_user
19723-3: mov -3(%_ASM_AX),%edx
19724+
19725+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19726+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19727+ cmp %_ASM_DX,%_ASM_AX
19728+ jae 1234f
19729+ add %_ASM_DX,%_ASM_AX
19730+1234:
19731+#endif
19732+
19733+#endif
19734+
19735+3: __copyuser_seg mov -3(%_ASM_AX),%edx
19736 xor %eax,%eax
19737+ pax_force_retaddr
19738 ret
19739 CFI_ENDPROC
19740 ENDPROC(__get_user_4)
19741@@ -80,8 +131,18 @@ ENTRY(__get_user_8)
19742 GET_THREAD_INFO(%_ASM_DX)
19743 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19744 jae bad_get_user
19745+
19746+#ifdef CONFIG_PAX_MEMORY_UDEREF
19747+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19748+ cmp %_ASM_DX,%_ASM_AX
19749+ jae 1234f
19750+ add %_ASM_DX,%_ASM_AX
19751+1234:
19752+#endif
19753+
19754 4: movq -7(%_ASM_AX),%_ASM_DX
19755 xor %eax,%eax
19756+ pax_force_retaddr
19757 ret
19758 CFI_ENDPROC
19759 ENDPROC(__get_user_8)
19760@@ -91,6 +152,7 @@ bad_get_user:
19761 CFI_STARTPROC
19762 xor %edx,%edx
19763 mov $(-EFAULT),%_ASM_AX
19764+ pax_force_retaddr
19765 ret
19766 CFI_ENDPROC
19767 END(bad_get_user)
19768diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
19769index 374562e..a75830b 100644
19770--- a/arch/x86/lib/insn.c
19771+++ b/arch/x86/lib/insn.c
19772@@ -21,6 +21,11 @@
19773 #include <linux/string.h>
19774 #include <asm/inat.h>
19775 #include <asm/insn.h>
19776+#ifdef __KERNEL__
19777+#include <asm/pgtable_types.h>
19778+#else
19779+#define ktla_ktva(addr) addr
19780+#endif
19781
19782 /* Verify next sizeof(t) bytes can be on the same instruction */
19783 #define validate_next(t, insn, n) \
19784@@ -49,8 +54,8 @@
19785 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
19786 {
19787 memset(insn, 0, sizeof(*insn));
19788- insn->kaddr = kaddr;
19789- insn->next_byte = kaddr;
19790+ insn->kaddr = ktla_ktva(kaddr);
19791+ insn->next_byte = ktla_ktva(kaddr);
19792 insn->x86_64 = x86_64 ? 1 : 0;
19793 insn->opnd_bytes = 4;
19794 if (x86_64)
19795diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
19796index 05a95e7..326f2fa 100644
19797--- a/arch/x86/lib/iomap_copy_64.S
19798+++ b/arch/x86/lib/iomap_copy_64.S
19799@@ -17,6 +17,7 @@
19800
19801 #include <linux/linkage.h>
19802 #include <asm/dwarf2.h>
19803+#include <asm/alternative-asm.h>
19804
19805 /*
19806 * override generic version in lib/iomap_copy.c
19807@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
19808 CFI_STARTPROC
19809 movl %edx,%ecx
19810 rep movsd
19811+ pax_force_retaddr
19812 ret
19813 CFI_ENDPROC
19814 ENDPROC(__iowrite32_copy)
19815diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
19816index efbf2a0..8893637 100644
19817--- a/arch/x86/lib/memcpy_64.S
19818+++ b/arch/x86/lib/memcpy_64.S
19819@@ -34,6 +34,7 @@
19820 rep movsq
19821 movl %edx, %ecx
19822 rep movsb
19823+ pax_force_retaddr
19824 ret
19825 .Lmemcpy_e:
19826 .previous
19827@@ -51,6 +52,7 @@
19828
19829 movl %edx, %ecx
19830 rep movsb
19831+ pax_force_retaddr
19832 ret
19833 .Lmemcpy_e_e:
19834 .previous
19835@@ -81,13 +83,13 @@ ENTRY(memcpy)
19836 */
19837 movq 0*8(%rsi), %r8
19838 movq 1*8(%rsi), %r9
19839- movq 2*8(%rsi), %r10
19840+ movq 2*8(%rsi), %rcx
19841 movq 3*8(%rsi), %r11
19842 leaq 4*8(%rsi), %rsi
19843
19844 movq %r8, 0*8(%rdi)
19845 movq %r9, 1*8(%rdi)
19846- movq %r10, 2*8(%rdi)
19847+ movq %rcx, 2*8(%rdi)
19848 movq %r11, 3*8(%rdi)
19849 leaq 4*8(%rdi), %rdi
19850 jae .Lcopy_forward_loop
19851@@ -110,12 +112,12 @@ ENTRY(memcpy)
19852 subq $0x20, %rdx
19853 movq -1*8(%rsi), %r8
19854 movq -2*8(%rsi), %r9
19855- movq -3*8(%rsi), %r10
19856+ movq -3*8(%rsi), %rcx
19857 movq -4*8(%rsi), %r11
19858 leaq -4*8(%rsi), %rsi
19859 movq %r8, -1*8(%rdi)
19860 movq %r9, -2*8(%rdi)
19861- movq %r10, -3*8(%rdi)
19862+ movq %rcx, -3*8(%rdi)
19863 movq %r11, -4*8(%rdi)
19864 leaq -4*8(%rdi), %rdi
19865 jae .Lcopy_backward_loop
19866@@ -135,12 +137,13 @@ ENTRY(memcpy)
19867 */
19868 movq 0*8(%rsi), %r8
19869 movq 1*8(%rsi), %r9
19870- movq -2*8(%rsi, %rdx), %r10
19871+ movq -2*8(%rsi, %rdx), %rcx
19872 movq -1*8(%rsi, %rdx), %r11
19873 movq %r8, 0*8(%rdi)
19874 movq %r9, 1*8(%rdi)
19875- movq %r10, -2*8(%rdi, %rdx)
19876+ movq %rcx, -2*8(%rdi, %rdx)
19877 movq %r11, -1*8(%rdi, %rdx)
19878+ pax_force_retaddr
19879 retq
19880 .p2align 4
19881 .Lless_16bytes:
19882@@ -153,6 +156,7 @@ ENTRY(memcpy)
19883 movq -1*8(%rsi, %rdx), %r9
19884 movq %r8, 0*8(%rdi)
19885 movq %r9, -1*8(%rdi, %rdx)
19886+ pax_force_retaddr
19887 retq
19888 .p2align 4
19889 .Lless_8bytes:
19890@@ -166,6 +170,7 @@ ENTRY(memcpy)
19891 movl -4(%rsi, %rdx), %r8d
19892 movl %ecx, (%rdi)
19893 movl %r8d, -4(%rdi, %rdx)
19894+ pax_force_retaddr
19895 retq
19896 .p2align 4
19897 .Lless_3bytes:
19898@@ -183,6 +188,7 @@ ENTRY(memcpy)
19899 jnz .Lloop_1
19900
19901 .Lend:
19902+ pax_force_retaddr
19903 retq
19904 CFI_ENDPROC
19905 ENDPROC(memcpy)
19906diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
19907index ee16461..c39c199 100644
19908--- a/arch/x86/lib/memmove_64.S
19909+++ b/arch/x86/lib/memmove_64.S
19910@@ -61,13 +61,13 @@ ENTRY(memmove)
19911 5:
19912 sub $0x20, %rdx
19913 movq 0*8(%rsi), %r11
19914- movq 1*8(%rsi), %r10
19915+ movq 1*8(%rsi), %rcx
19916 movq 2*8(%rsi), %r9
19917 movq 3*8(%rsi), %r8
19918 leaq 4*8(%rsi), %rsi
19919
19920 movq %r11, 0*8(%rdi)
19921- movq %r10, 1*8(%rdi)
19922+ movq %rcx, 1*8(%rdi)
19923 movq %r9, 2*8(%rdi)
19924 movq %r8, 3*8(%rdi)
19925 leaq 4*8(%rdi), %rdi
19926@@ -81,10 +81,10 @@ ENTRY(memmove)
19927 4:
19928 movq %rdx, %rcx
19929 movq -8(%rsi, %rdx), %r11
19930- lea -8(%rdi, %rdx), %r10
19931+ lea -8(%rdi, %rdx), %r9
19932 shrq $3, %rcx
19933 rep movsq
19934- movq %r11, (%r10)
19935+ movq %r11, (%r9)
19936 jmp 13f
19937 .Lmemmove_end_forward:
19938
19939@@ -95,14 +95,14 @@ ENTRY(memmove)
19940 7:
19941 movq %rdx, %rcx
19942 movq (%rsi), %r11
19943- movq %rdi, %r10
19944+ movq %rdi, %r9
19945 leaq -8(%rsi, %rdx), %rsi
19946 leaq -8(%rdi, %rdx), %rdi
19947 shrq $3, %rcx
19948 std
19949 rep movsq
19950 cld
19951- movq %r11, (%r10)
19952+ movq %r11, (%r9)
19953 jmp 13f
19954
19955 /*
19956@@ -127,13 +127,13 @@ ENTRY(memmove)
19957 8:
19958 subq $0x20, %rdx
19959 movq -1*8(%rsi), %r11
19960- movq -2*8(%rsi), %r10
19961+ movq -2*8(%rsi), %rcx
19962 movq -3*8(%rsi), %r9
19963 movq -4*8(%rsi), %r8
19964 leaq -4*8(%rsi), %rsi
19965
19966 movq %r11, -1*8(%rdi)
19967- movq %r10, -2*8(%rdi)
19968+ movq %rcx, -2*8(%rdi)
19969 movq %r9, -3*8(%rdi)
19970 movq %r8, -4*8(%rdi)
19971 leaq -4*8(%rdi), %rdi
19972@@ -151,11 +151,11 @@ ENTRY(memmove)
19973 * Move data from 16 bytes to 31 bytes.
19974 */
19975 movq 0*8(%rsi), %r11
19976- movq 1*8(%rsi), %r10
19977+ movq 1*8(%rsi), %rcx
19978 movq -2*8(%rsi, %rdx), %r9
19979 movq -1*8(%rsi, %rdx), %r8
19980 movq %r11, 0*8(%rdi)
19981- movq %r10, 1*8(%rdi)
19982+ movq %rcx, 1*8(%rdi)
19983 movq %r9, -2*8(%rdi, %rdx)
19984 movq %r8, -1*8(%rdi, %rdx)
19985 jmp 13f
19986@@ -167,9 +167,9 @@ ENTRY(memmove)
19987 * Move data from 8 bytes to 15 bytes.
19988 */
19989 movq 0*8(%rsi), %r11
19990- movq -1*8(%rsi, %rdx), %r10
19991+ movq -1*8(%rsi, %rdx), %r9
19992 movq %r11, 0*8(%rdi)
19993- movq %r10, -1*8(%rdi, %rdx)
19994+ movq %r9, -1*8(%rdi, %rdx)
19995 jmp 13f
19996 10:
19997 cmpq $4, %rdx
19998@@ -178,9 +178,9 @@ ENTRY(memmove)
19999 * Move data from 4 bytes to 7 bytes.
20000 */
20001 movl (%rsi), %r11d
20002- movl -4(%rsi, %rdx), %r10d
20003+ movl -4(%rsi, %rdx), %r9d
20004 movl %r11d, (%rdi)
20005- movl %r10d, -4(%rdi, %rdx)
20006+ movl %r9d, -4(%rdi, %rdx)
20007 jmp 13f
20008 11:
20009 cmp $2, %rdx
20010@@ -189,9 +189,9 @@ ENTRY(memmove)
20011 * Move data from 2 bytes to 3 bytes.
20012 */
20013 movw (%rsi), %r11w
20014- movw -2(%rsi, %rdx), %r10w
20015+ movw -2(%rsi, %rdx), %r9w
20016 movw %r11w, (%rdi)
20017- movw %r10w, -2(%rdi, %rdx)
20018+ movw %r9w, -2(%rdi, %rdx)
20019 jmp 13f
20020 12:
20021 cmp $1, %rdx
20022@@ -202,6 +202,7 @@ ENTRY(memmove)
20023 movb (%rsi), %r11b
20024 movb %r11b, (%rdi)
20025 13:
20026+ pax_force_retaddr
20027 retq
20028 CFI_ENDPROC
20029
20030@@ -210,6 +211,7 @@ ENTRY(memmove)
20031 /* Forward moving data. */
20032 movq %rdx, %rcx
20033 rep movsb
20034+ pax_force_retaddr
20035 retq
20036 .Lmemmove_end_forward_efs:
20037 .previous
20038diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
20039index 79bd454..dff325a 100644
20040--- a/arch/x86/lib/memset_64.S
20041+++ b/arch/x86/lib/memset_64.S
20042@@ -31,6 +31,7 @@
20043 movl %r8d,%ecx
20044 rep stosb
20045 movq %r9,%rax
20046+ pax_force_retaddr
20047 ret
20048 .Lmemset_e:
20049 .previous
20050@@ -53,6 +54,7 @@
20051 movl %edx,%ecx
20052 rep stosb
20053 movq %r9,%rax
20054+ pax_force_retaddr
20055 ret
20056 .Lmemset_e_e:
20057 .previous
20058@@ -60,13 +62,13 @@
20059 ENTRY(memset)
20060 ENTRY(__memset)
20061 CFI_STARTPROC
20062- movq %rdi,%r10
20063 movq %rdx,%r11
20064
20065 /* expand byte value */
20066 movzbl %sil,%ecx
20067 movabs $0x0101010101010101,%rax
20068 mul %rcx /* with rax, clobbers rdx */
20069+ movq %rdi,%rdx
20070
20071 /* align dst */
20072 movl %edi,%r9d
20073@@ -120,7 +122,8 @@ ENTRY(__memset)
20074 jnz .Lloop_1
20075
20076 .Lende:
20077- movq %r10,%rax
20078+ movq %rdx,%rax
20079+ pax_force_retaddr
20080 ret
20081
20082 CFI_RESTORE_STATE
20083diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
20084index c9f2d9b..e7fd2c0 100644
20085--- a/arch/x86/lib/mmx_32.c
20086+++ b/arch/x86/lib/mmx_32.c
20087@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
20088 {
20089 void *p;
20090 int i;
20091+ unsigned long cr0;
20092
20093 if (unlikely(in_interrupt()))
20094 return __memcpy(to, from, len);
20095@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
20096 kernel_fpu_begin();
20097
20098 __asm__ __volatile__ (
20099- "1: prefetch (%0)\n" /* This set is 28 bytes */
20100- " prefetch 64(%0)\n"
20101- " prefetch 128(%0)\n"
20102- " prefetch 192(%0)\n"
20103- " prefetch 256(%0)\n"
20104+ "1: prefetch (%1)\n" /* This set is 28 bytes */
20105+ " prefetch 64(%1)\n"
20106+ " prefetch 128(%1)\n"
20107+ " prefetch 192(%1)\n"
20108+ " prefetch 256(%1)\n"
20109 "2: \n"
20110 ".section .fixup, \"ax\"\n"
20111- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20112+ "3: \n"
20113+
20114+#ifdef CONFIG_PAX_KERNEXEC
20115+ " movl %%cr0, %0\n"
20116+ " movl %0, %%eax\n"
20117+ " andl $0xFFFEFFFF, %%eax\n"
20118+ " movl %%eax, %%cr0\n"
20119+#endif
20120+
20121+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20122+
20123+#ifdef CONFIG_PAX_KERNEXEC
20124+ " movl %0, %%cr0\n"
20125+#endif
20126+
20127 " jmp 2b\n"
20128 ".previous\n"
20129 _ASM_EXTABLE(1b, 3b)
20130- : : "r" (from));
20131+ : "=&r" (cr0) : "r" (from) : "ax");
20132
20133 for ( ; i > 5; i--) {
20134 __asm__ __volatile__ (
20135- "1: prefetch 320(%0)\n"
20136- "2: movq (%0), %%mm0\n"
20137- " movq 8(%0), %%mm1\n"
20138- " movq 16(%0), %%mm2\n"
20139- " movq 24(%0), %%mm3\n"
20140- " movq %%mm0, (%1)\n"
20141- " movq %%mm1, 8(%1)\n"
20142- " movq %%mm2, 16(%1)\n"
20143- " movq %%mm3, 24(%1)\n"
20144- " movq 32(%0), %%mm0\n"
20145- " movq 40(%0), %%mm1\n"
20146- " movq 48(%0), %%mm2\n"
20147- " movq 56(%0), %%mm3\n"
20148- " movq %%mm0, 32(%1)\n"
20149- " movq %%mm1, 40(%1)\n"
20150- " movq %%mm2, 48(%1)\n"
20151- " movq %%mm3, 56(%1)\n"
20152+ "1: prefetch 320(%1)\n"
20153+ "2: movq (%1), %%mm0\n"
20154+ " movq 8(%1), %%mm1\n"
20155+ " movq 16(%1), %%mm2\n"
20156+ " movq 24(%1), %%mm3\n"
20157+ " movq %%mm0, (%2)\n"
20158+ " movq %%mm1, 8(%2)\n"
20159+ " movq %%mm2, 16(%2)\n"
20160+ " movq %%mm3, 24(%2)\n"
20161+ " movq 32(%1), %%mm0\n"
20162+ " movq 40(%1), %%mm1\n"
20163+ " movq 48(%1), %%mm2\n"
20164+ " movq 56(%1), %%mm3\n"
20165+ " movq %%mm0, 32(%2)\n"
20166+ " movq %%mm1, 40(%2)\n"
20167+ " movq %%mm2, 48(%2)\n"
20168+ " movq %%mm3, 56(%2)\n"
20169 ".section .fixup, \"ax\"\n"
20170- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20171+ "3:\n"
20172+
20173+#ifdef CONFIG_PAX_KERNEXEC
20174+ " movl %%cr0, %0\n"
20175+ " movl %0, %%eax\n"
20176+ " andl $0xFFFEFFFF, %%eax\n"
20177+ " movl %%eax, %%cr0\n"
20178+#endif
20179+
20180+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20181+
20182+#ifdef CONFIG_PAX_KERNEXEC
20183+ " movl %0, %%cr0\n"
20184+#endif
20185+
20186 " jmp 2b\n"
20187 ".previous\n"
20188 _ASM_EXTABLE(1b, 3b)
20189- : : "r" (from), "r" (to) : "memory");
20190+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20191
20192 from += 64;
20193 to += 64;
20194@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
20195 static void fast_copy_page(void *to, void *from)
20196 {
20197 int i;
20198+ unsigned long cr0;
20199
20200 kernel_fpu_begin();
20201
20202@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
20203 * but that is for later. -AV
20204 */
20205 __asm__ __volatile__(
20206- "1: prefetch (%0)\n"
20207- " prefetch 64(%0)\n"
20208- " prefetch 128(%0)\n"
20209- " prefetch 192(%0)\n"
20210- " prefetch 256(%0)\n"
20211+ "1: prefetch (%1)\n"
20212+ " prefetch 64(%1)\n"
20213+ " prefetch 128(%1)\n"
20214+ " prefetch 192(%1)\n"
20215+ " prefetch 256(%1)\n"
20216 "2: \n"
20217 ".section .fixup, \"ax\"\n"
20218- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20219+ "3: \n"
20220+
20221+#ifdef CONFIG_PAX_KERNEXEC
20222+ " movl %%cr0, %0\n"
20223+ " movl %0, %%eax\n"
20224+ " andl $0xFFFEFFFF, %%eax\n"
20225+ " movl %%eax, %%cr0\n"
20226+#endif
20227+
20228+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20229+
20230+#ifdef CONFIG_PAX_KERNEXEC
20231+ " movl %0, %%cr0\n"
20232+#endif
20233+
20234 " jmp 2b\n"
20235 ".previous\n"
20236- _ASM_EXTABLE(1b, 3b) : : "r" (from));
20237+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
20238
20239 for (i = 0; i < (4096-320)/64; i++) {
20240 __asm__ __volatile__ (
20241- "1: prefetch 320(%0)\n"
20242- "2: movq (%0), %%mm0\n"
20243- " movntq %%mm0, (%1)\n"
20244- " movq 8(%0), %%mm1\n"
20245- " movntq %%mm1, 8(%1)\n"
20246- " movq 16(%0), %%mm2\n"
20247- " movntq %%mm2, 16(%1)\n"
20248- " movq 24(%0), %%mm3\n"
20249- " movntq %%mm3, 24(%1)\n"
20250- " movq 32(%0), %%mm4\n"
20251- " movntq %%mm4, 32(%1)\n"
20252- " movq 40(%0), %%mm5\n"
20253- " movntq %%mm5, 40(%1)\n"
20254- " movq 48(%0), %%mm6\n"
20255- " movntq %%mm6, 48(%1)\n"
20256- " movq 56(%0), %%mm7\n"
20257- " movntq %%mm7, 56(%1)\n"
20258+ "1: prefetch 320(%1)\n"
20259+ "2: movq (%1), %%mm0\n"
20260+ " movntq %%mm0, (%2)\n"
20261+ " movq 8(%1), %%mm1\n"
20262+ " movntq %%mm1, 8(%2)\n"
20263+ " movq 16(%1), %%mm2\n"
20264+ " movntq %%mm2, 16(%2)\n"
20265+ " movq 24(%1), %%mm3\n"
20266+ " movntq %%mm3, 24(%2)\n"
20267+ " movq 32(%1), %%mm4\n"
20268+ " movntq %%mm4, 32(%2)\n"
20269+ " movq 40(%1), %%mm5\n"
20270+ " movntq %%mm5, 40(%2)\n"
20271+ " movq 48(%1), %%mm6\n"
20272+ " movntq %%mm6, 48(%2)\n"
20273+ " movq 56(%1), %%mm7\n"
20274+ " movntq %%mm7, 56(%2)\n"
20275 ".section .fixup, \"ax\"\n"
20276- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20277+ "3:\n"
20278+
20279+#ifdef CONFIG_PAX_KERNEXEC
20280+ " movl %%cr0, %0\n"
20281+ " movl %0, %%eax\n"
20282+ " andl $0xFFFEFFFF, %%eax\n"
20283+ " movl %%eax, %%cr0\n"
20284+#endif
20285+
20286+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20287+
20288+#ifdef CONFIG_PAX_KERNEXEC
20289+ " movl %0, %%cr0\n"
20290+#endif
20291+
20292 " jmp 2b\n"
20293 ".previous\n"
20294- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
20295+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20296
20297 from += 64;
20298 to += 64;
20299@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
20300 static void fast_copy_page(void *to, void *from)
20301 {
20302 int i;
20303+ unsigned long cr0;
20304
20305 kernel_fpu_begin();
20306
20307 __asm__ __volatile__ (
20308- "1: prefetch (%0)\n"
20309- " prefetch 64(%0)\n"
20310- " prefetch 128(%0)\n"
20311- " prefetch 192(%0)\n"
20312- " prefetch 256(%0)\n"
20313+ "1: prefetch (%1)\n"
20314+ " prefetch 64(%1)\n"
20315+ " prefetch 128(%1)\n"
20316+ " prefetch 192(%1)\n"
20317+ " prefetch 256(%1)\n"
20318 "2: \n"
20319 ".section .fixup, \"ax\"\n"
20320- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20321+ "3: \n"
20322+
20323+#ifdef CONFIG_PAX_KERNEXEC
20324+ " movl %%cr0, %0\n"
20325+ " movl %0, %%eax\n"
20326+ " andl $0xFFFEFFFF, %%eax\n"
20327+ " movl %%eax, %%cr0\n"
20328+#endif
20329+
20330+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20331+
20332+#ifdef CONFIG_PAX_KERNEXEC
20333+ " movl %0, %%cr0\n"
20334+#endif
20335+
20336 " jmp 2b\n"
20337 ".previous\n"
20338- _ASM_EXTABLE(1b, 3b) : : "r" (from));
20339+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
20340
20341 for (i = 0; i < 4096/64; i++) {
20342 __asm__ __volatile__ (
20343- "1: prefetch 320(%0)\n"
20344- "2: movq (%0), %%mm0\n"
20345- " movq 8(%0), %%mm1\n"
20346- " movq 16(%0), %%mm2\n"
20347- " movq 24(%0), %%mm3\n"
20348- " movq %%mm0, (%1)\n"
20349- " movq %%mm1, 8(%1)\n"
20350- " movq %%mm2, 16(%1)\n"
20351- " movq %%mm3, 24(%1)\n"
20352- " movq 32(%0), %%mm0\n"
20353- " movq 40(%0), %%mm1\n"
20354- " movq 48(%0), %%mm2\n"
20355- " movq 56(%0), %%mm3\n"
20356- " movq %%mm0, 32(%1)\n"
20357- " movq %%mm1, 40(%1)\n"
20358- " movq %%mm2, 48(%1)\n"
20359- " movq %%mm3, 56(%1)\n"
20360+ "1: prefetch 320(%1)\n"
20361+ "2: movq (%1), %%mm0\n"
20362+ " movq 8(%1), %%mm1\n"
20363+ " movq 16(%1), %%mm2\n"
20364+ " movq 24(%1), %%mm3\n"
20365+ " movq %%mm0, (%2)\n"
20366+ " movq %%mm1, 8(%2)\n"
20367+ " movq %%mm2, 16(%2)\n"
20368+ " movq %%mm3, 24(%2)\n"
20369+ " movq 32(%1), %%mm0\n"
20370+ " movq 40(%1), %%mm1\n"
20371+ " movq 48(%1), %%mm2\n"
20372+ " movq 56(%1), %%mm3\n"
20373+ " movq %%mm0, 32(%2)\n"
20374+ " movq %%mm1, 40(%2)\n"
20375+ " movq %%mm2, 48(%2)\n"
20376+ " movq %%mm3, 56(%2)\n"
20377 ".section .fixup, \"ax\"\n"
20378- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20379+ "3:\n"
20380+
20381+#ifdef CONFIG_PAX_KERNEXEC
20382+ " movl %%cr0, %0\n"
20383+ " movl %0, %%eax\n"
20384+ " andl $0xFFFEFFFF, %%eax\n"
20385+ " movl %%eax, %%cr0\n"
20386+#endif
20387+
20388+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20389+
20390+#ifdef CONFIG_PAX_KERNEXEC
20391+ " movl %0, %%cr0\n"
20392+#endif
20393+
20394 " jmp 2b\n"
20395 ".previous\n"
20396 _ASM_EXTABLE(1b, 3b)
20397- : : "r" (from), "r" (to) : "memory");
20398+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20399
20400 from += 64;
20401 to += 64;
20402diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
20403index 69fa106..adda88b 100644
20404--- a/arch/x86/lib/msr-reg.S
20405+++ b/arch/x86/lib/msr-reg.S
20406@@ -3,6 +3,7 @@
20407 #include <asm/dwarf2.h>
20408 #include <asm/asm.h>
20409 #include <asm/msr.h>
20410+#include <asm/alternative-asm.h>
20411
20412 #ifdef CONFIG_X86_64
20413 /*
20414@@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
20415 CFI_STARTPROC
20416 pushq_cfi %rbx
20417 pushq_cfi %rbp
20418- movq %rdi, %r10 /* Save pointer */
20419+ movq %rdi, %r9 /* Save pointer */
20420 xorl %r11d, %r11d /* Return value */
20421 movl (%rdi), %eax
20422 movl 4(%rdi), %ecx
20423@@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
20424 movl 28(%rdi), %edi
20425 CFI_REMEMBER_STATE
20426 1: \op
20427-2: movl %eax, (%r10)
20428+2: movl %eax, (%r9)
20429 movl %r11d, %eax /* Return value */
20430- movl %ecx, 4(%r10)
20431- movl %edx, 8(%r10)
20432- movl %ebx, 12(%r10)
20433- movl %ebp, 20(%r10)
20434- movl %esi, 24(%r10)
20435- movl %edi, 28(%r10)
20436+ movl %ecx, 4(%r9)
20437+ movl %edx, 8(%r9)
20438+ movl %ebx, 12(%r9)
20439+ movl %ebp, 20(%r9)
20440+ movl %esi, 24(%r9)
20441+ movl %edi, 28(%r9)
20442 popq_cfi %rbp
20443 popq_cfi %rbx
20444+ pax_force_retaddr
20445 ret
20446 3:
20447 CFI_RESTORE_STATE
20448diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
20449index 36b0d15..d381858 100644
20450--- a/arch/x86/lib/putuser.S
20451+++ b/arch/x86/lib/putuser.S
20452@@ -15,7 +15,9 @@
20453 #include <asm/thread_info.h>
20454 #include <asm/errno.h>
20455 #include <asm/asm.h>
20456-
20457+#include <asm/segment.h>
20458+#include <asm/pgtable.h>
20459+#include <asm/alternative-asm.h>
20460
20461 /*
20462 * __put_user_X
20463@@ -29,52 +31,119 @@
20464 * as they get called from within inline assembly.
20465 */
20466
20467-#define ENTER CFI_STARTPROC ; \
20468- GET_THREAD_INFO(%_ASM_BX)
20469-#define EXIT ret ; \
20470+#define ENTER CFI_STARTPROC
20471+#define EXIT pax_force_retaddr; ret ; \
20472 CFI_ENDPROC
20473
20474+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20475+#define _DEST %_ASM_CX,%_ASM_BX
20476+#else
20477+#define _DEST %_ASM_CX
20478+#endif
20479+
20480+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20481+#define __copyuser_seg gs;
20482+#else
20483+#define __copyuser_seg
20484+#endif
20485+
20486 .text
20487 ENTRY(__put_user_1)
20488 ENTER
20489+
20490+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20491+ GET_THREAD_INFO(%_ASM_BX)
20492 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
20493 jae bad_put_user
20494-1: movb %al,(%_ASM_CX)
20495+
20496+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20497+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20498+ cmp %_ASM_BX,%_ASM_CX
20499+ jb 1234f
20500+ xor %ebx,%ebx
20501+1234:
20502+#endif
20503+
20504+#endif
20505+
20506+1: __copyuser_seg movb %al,(_DEST)
20507 xor %eax,%eax
20508 EXIT
20509 ENDPROC(__put_user_1)
20510
20511 ENTRY(__put_user_2)
20512 ENTER
20513+
20514+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20515+ GET_THREAD_INFO(%_ASM_BX)
20516 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20517 sub $1,%_ASM_BX
20518 cmp %_ASM_BX,%_ASM_CX
20519 jae bad_put_user
20520-2: movw %ax,(%_ASM_CX)
20521+
20522+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20523+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20524+ cmp %_ASM_BX,%_ASM_CX
20525+ jb 1234f
20526+ xor %ebx,%ebx
20527+1234:
20528+#endif
20529+
20530+#endif
20531+
20532+2: __copyuser_seg movw %ax,(_DEST)
20533 xor %eax,%eax
20534 EXIT
20535 ENDPROC(__put_user_2)
20536
20537 ENTRY(__put_user_4)
20538 ENTER
20539+
20540+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20541+ GET_THREAD_INFO(%_ASM_BX)
20542 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20543 sub $3,%_ASM_BX
20544 cmp %_ASM_BX,%_ASM_CX
20545 jae bad_put_user
20546-3: movl %eax,(%_ASM_CX)
20547+
20548+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20549+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20550+ cmp %_ASM_BX,%_ASM_CX
20551+ jb 1234f
20552+ xor %ebx,%ebx
20553+1234:
20554+#endif
20555+
20556+#endif
20557+
20558+3: __copyuser_seg movl %eax,(_DEST)
20559 xor %eax,%eax
20560 EXIT
20561 ENDPROC(__put_user_4)
20562
20563 ENTRY(__put_user_8)
20564 ENTER
20565+
20566+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20567+ GET_THREAD_INFO(%_ASM_BX)
20568 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20569 sub $7,%_ASM_BX
20570 cmp %_ASM_BX,%_ASM_CX
20571 jae bad_put_user
20572-4: mov %_ASM_AX,(%_ASM_CX)
20573+
20574+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20575+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20576+ cmp %_ASM_BX,%_ASM_CX
20577+ jb 1234f
20578+ xor %ebx,%ebx
20579+1234:
20580+#endif
20581+
20582+#endif
20583+
20584+4: __copyuser_seg mov %_ASM_AX,(_DEST)
20585 #ifdef CONFIG_X86_32
20586-5: movl %edx,4(%_ASM_CX)
20587+5: __copyuser_seg movl %edx,4(_DEST)
20588 #endif
20589 xor %eax,%eax
20590 EXIT
20591diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
20592index 1cad221..de671ee 100644
20593--- a/arch/x86/lib/rwlock.S
20594+++ b/arch/x86/lib/rwlock.S
20595@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
20596 FRAME
20597 0: LOCK_PREFIX
20598 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
20599+
20600+#ifdef CONFIG_PAX_REFCOUNT
20601+ jno 1234f
20602+ LOCK_PREFIX
20603+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
20604+ int $4
20605+1234:
20606+ _ASM_EXTABLE(1234b, 1234b)
20607+#endif
20608+
20609 1: rep; nop
20610 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
20611 jne 1b
20612 LOCK_PREFIX
20613 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
20614+
20615+#ifdef CONFIG_PAX_REFCOUNT
20616+ jno 1234f
20617+ LOCK_PREFIX
20618+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
20619+ int $4
20620+1234:
20621+ _ASM_EXTABLE(1234b, 1234b)
20622+#endif
20623+
20624 jnz 0b
20625 ENDFRAME
20626+ pax_force_retaddr
20627 ret
20628 CFI_ENDPROC
20629 END(__write_lock_failed)
20630@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
20631 FRAME
20632 0: LOCK_PREFIX
20633 READ_LOCK_SIZE(inc) (%__lock_ptr)
20634+
20635+#ifdef CONFIG_PAX_REFCOUNT
20636+ jno 1234f
20637+ LOCK_PREFIX
20638+ READ_LOCK_SIZE(dec) (%__lock_ptr)
20639+ int $4
20640+1234:
20641+ _ASM_EXTABLE(1234b, 1234b)
20642+#endif
20643+
20644 1: rep; nop
20645 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
20646 js 1b
20647 LOCK_PREFIX
20648 READ_LOCK_SIZE(dec) (%__lock_ptr)
20649+
20650+#ifdef CONFIG_PAX_REFCOUNT
20651+ jno 1234f
20652+ LOCK_PREFIX
20653+ READ_LOCK_SIZE(inc) (%__lock_ptr)
20654+ int $4
20655+1234:
20656+ _ASM_EXTABLE(1234b, 1234b)
20657+#endif
20658+
20659 js 0b
20660 ENDFRAME
20661+ pax_force_retaddr
20662 ret
20663 CFI_ENDPROC
20664 END(__read_lock_failed)
20665diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
20666index 5dff5f0..cadebf4 100644
20667--- a/arch/x86/lib/rwsem.S
20668+++ b/arch/x86/lib/rwsem.S
20669@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
20670 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
20671 CFI_RESTORE __ASM_REG(dx)
20672 restore_common_regs
20673+ pax_force_retaddr
20674 ret
20675 CFI_ENDPROC
20676 ENDPROC(call_rwsem_down_read_failed)
20677@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
20678 movq %rax,%rdi
20679 call rwsem_down_write_failed
20680 restore_common_regs
20681+ pax_force_retaddr
20682 ret
20683 CFI_ENDPROC
20684 ENDPROC(call_rwsem_down_write_failed)
20685@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
20686 movq %rax,%rdi
20687 call rwsem_wake
20688 restore_common_regs
20689-1: ret
20690+1: pax_force_retaddr
20691+ ret
20692 CFI_ENDPROC
20693 ENDPROC(call_rwsem_wake)
20694
20695@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
20696 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
20697 CFI_RESTORE __ASM_REG(dx)
20698 restore_common_regs
20699+ pax_force_retaddr
20700 ret
20701 CFI_ENDPROC
20702 ENDPROC(call_rwsem_downgrade_wake)
20703diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
20704index a63efd6..ccecad8 100644
20705--- a/arch/x86/lib/thunk_64.S
20706+++ b/arch/x86/lib/thunk_64.S
20707@@ -8,6 +8,7 @@
20708 #include <linux/linkage.h>
20709 #include <asm/dwarf2.h>
20710 #include <asm/calling.h>
20711+#include <asm/alternative-asm.h>
20712
20713 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
20714 .macro THUNK name, func, put_ret_addr_in_rdi=0
20715@@ -41,5 +42,6 @@
20716 SAVE_ARGS
20717 restore:
20718 RESTORE_ARGS
20719+ pax_force_retaddr
20720 ret
20721 CFI_ENDPROC
20722diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
20723index e218d5d..35679b4 100644
20724--- a/arch/x86/lib/usercopy_32.c
20725+++ b/arch/x86/lib/usercopy_32.c
20726@@ -43,7 +43,7 @@ do { \
20727 __asm__ __volatile__( \
20728 " testl %1,%1\n" \
20729 " jz 2f\n" \
20730- "0: lodsb\n" \
20731+ "0: "__copyuser_seg"lodsb\n" \
20732 " stosb\n" \
20733 " testb %%al,%%al\n" \
20734 " jz 1f\n" \
20735@@ -128,10 +128,12 @@ do { \
20736 int __d0; \
20737 might_fault(); \
20738 __asm__ __volatile__( \
20739+ __COPYUSER_SET_ES \
20740 "0: rep; stosl\n" \
20741 " movl %2,%0\n" \
20742 "1: rep; stosb\n" \
20743 "2:\n" \
20744+ __COPYUSER_RESTORE_ES \
20745 ".section .fixup,\"ax\"\n" \
20746 "3: lea 0(%2,%0,4),%0\n" \
20747 " jmp 2b\n" \
20748@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
20749 might_fault();
20750
20751 __asm__ __volatile__(
20752+ __COPYUSER_SET_ES
20753 " testl %0, %0\n"
20754 " jz 3f\n"
20755 " andl %0,%%ecx\n"
20756@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
20757 " subl %%ecx,%0\n"
20758 " addl %0,%%eax\n"
20759 "1:\n"
20760+ __COPYUSER_RESTORE_ES
20761 ".section .fixup,\"ax\"\n"
20762 "2: xorl %%eax,%%eax\n"
20763 " jmp 1b\n"
20764@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
20765
20766 #ifdef CONFIG_X86_INTEL_USERCOPY
20767 static unsigned long
20768-__copy_user_intel(void __user *to, const void *from, unsigned long size)
20769+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
20770 {
20771 int d0, d1;
20772 __asm__ __volatile__(
20773@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
20774 " .align 2,0x90\n"
20775 "3: movl 0(%4), %%eax\n"
20776 "4: movl 4(%4), %%edx\n"
20777- "5: movl %%eax, 0(%3)\n"
20778- "6: movl %%edx, 4(%3)\n"
20779+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
20780+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
20781 "7: movl 8(%4), %%eax\n"
20782 "8: movl 12(%4),%%edx\n"
20783- "9: movl %%eax, 8(%3)\n"
20784- "10: movl %%edx, 12(%3)\n"
20785+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
20786+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
20787 "11: movl 16(%4), %%eax\n"
20788 "12: movl 20(%4), %%edx\n"
20789- "13: movl %%eax, 16(%3)\n"
20790- "14: movl %%edx, 20(%3)\n"
20791+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
20792+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
20793 "15: movl 24(%4), %%eax\n"
20794 "16: movl 28(%4), %%edx\n"
20795- "17: movl %%eax, 24(%3)\n"
20796- "18: movl %%edx, 28(%3)\n"
20797+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
20798+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
20799 "19: movl 32(%4), %%eax\n"
20800 "20: movl 36(%4), %%edx\n"
20801- "21: movl %%eax, 32(%3)\n"
20802- "22: movl %%edx, 36(%3)\n"
20803+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
20804+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
20805 "23: movl 40(%4), %%eax\n"
20806 "24: movl 44(%4), %%edx\n"
20807- "25: movl %%eax, 40(%3)\n"
20808- "26: movl %%edx, 44(%3)\n"
20809+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
20810+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
20811 "27: movl 48(%4), %%eax\n"
20812 "28: movl 52(%4), %%edx\n"
20813- "29: movl %%eax, 48(%3)\n"
20814- "30: movl %%edx, 52(%3)\n"
20815+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
20816+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
20817 "31: movl 56(%4), %%eax\n"
20818 "32: movl 60(%4), %%edx\n"
20819- "33: movl %%eax, 56(%3)\n"
20820- "34: movl %%edx, 60(%3)\n"
20821+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
20822+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
20823 " addl $-64, %0\n"
20824 " addl $64, %4\n"
20825 " addl $64, %3\n"
20826@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
20827 " shrl $2, %0\n"
20828 " andl $3, %%eax\n"
20829 " cld\n"
20830+ __COPYUSER_SET_ES
20831 "99: rep; movsl\n"
20832 "36: movl %%eax, %0\n"
20833 "37: rep; movsb\n"
20834 "100:\n"
20835+ __COPYUSER_RESTORE_ES
20836+ ".section .fixup,\"ax\"\n"
20837+ "101: lea 0(%%eax,%0,4),%0\n"
20838+ " jmp 100b\n"
20839+ ".previous\n"
20840+ ".section __ex_table,\"a\"\n"
20841+ " .align 4\n"
20842+ " .long 1b,100b\n"
20843+ " .long 2b,100b\n"
20844+ " .long 3b,100b\n"
20845+ " .long 4b,100b\n"
20846+ " .long 5b,100b\n"
20847+ " .long 6b,100b\n"
20848+ " .long 7b,100b\n"
20849+ " .long 8b,100b\n"
20850+ " .long 9b,100b\n"
20851+ " .long 10b,100b\n"
20852+ " .long 11b,100b\n"
20853+ " .long 12b,100b\n"
20854+ " .long 13b,100b\n"
20855+ " .long 14b,100b\n"
20856+ " .long 15b,100b\n"
20857+ " .long 16b,100b\n"
20858+ " .long 17b,100b\n"
20859+ " .long 18b,100b\n"
20860+ " .long 19b,100b\n"
20861+ " .long 20b,100b\n"
20862+ " .long 21b,100b\n"
20863+ " .long 22b,100b\n"
20864+ " .long 23b,100b\n"
20865+ " .long 24b,100b\n"
20866+ " .long 25b,100b\n"
20867+ " .long 26b,100b\n"
20868+ " .long 27b,100b\n"
20869+ " .long 28b,100b\n"
20870+ " .long 29b,100b\n"
20871+ " .long 30b,100b\n"
20872+ " .long 31b,100b\n"
20873+ " .long 32b,100b\n"
20874+ " .long 33b,100b\n"
20875+ " .long 34b,100b\n"
20876+ " .long 35b,100b\n"
20877+ " .long 36b,100b\n"
20878+ " .long 37b,100b\n"
20879+ " .long 99b,101b\n"
20880+ ".previous"
20881+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
20882+ : "1"(to), "2"(from), "0"(size)
20883+ : "eax", "edx", "memory");
20884+ return size;
20885+}
20886+
20887+static unsigned long
20888+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
20889+{
20890+ int d0, d1;
20891+ __asm__ __volatile__(
20892+ " .align 2,0x90\n"
20893+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
20894+ " cmpl $67, %0\n"
20895+ " jbe 3f\n"
20896+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
20897+ " .align 2,0x90\n"
20898+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
20899+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
20900+ "5: movl %%eax, 0(%3)\n"
20901+ "6: movl %%edx, 4(%3)\n"
20902+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
20903+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
20904+ "9: movl %%eax, 8(%3)\n"
20905+ "10: movl %%edx, 12(%3)\n"
20906+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
20907+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
20908+ "13: movl %%eax, 16(%3)\n"
20909+ "14: movl %%edx, 20(%3)\n"
20910+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
20911+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
20912+ "17: movl %%eax, 24(%3)\n"
20913+ "18: movl %%edx, 28(%3)\n"
20914+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
20915+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
20916+ "21: movl %%eax, 32(%3)\n"
20917+ "22: movl %%edx, 36(%3)\n"
20918+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
20919+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
20920+ "25: movl %%eax, 40(%3)\n"
20921+ "26: movl %%edx, 44(%3)\n"
20922+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
20923+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
20924+ "29: movl %%eax, 48(%3)\n"
20925+ "30: movl %%edx, 52(%3)\n"
20926+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
20927+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
20928+ "33: movl %%eax, 56(%3)\n"
20929+ "34: movl %%edx, 60(%3)\n"
20930+ " addl $-64, %0\n"
20931+ " addl $64, %4\n"
20932+ " addl $64, %3\n"
20933+ " cmpl $63, %0\n"
20934+ " ja 1b\n"
20935+ "35: movl %0, %%eax\n"
20936+ " shrl $2, %0\n"
20937+ " andl $3, %%eax\n"
20938+ " cld\n"
20939+ "99: rep; "__copyuser_seg" movsl\n"
20940+ "36: movl %%eax, %0\n"
20941+ "37: rep; "__copyuser_seg" movsb\n"
20942+ "100:\n"
20943 ".section .fixup,\"ax\"\n"
20944 "101: lea 0(%%eax,%0,4),%0\n"
20945 " jmp 100b\n"
20946@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
20947 int d0, d1;
20948 __asm__ __volatile__(
20949 " .align 2,0x90\n"
20950- "0: movl 32(%4), %%eax\n"
20951+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20952 " cmpl $67, %0\n"
20953 " jbe 2f\n"
20954- "1: movl 64(%4), %%eax\n"
20955+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20956 " .align 2,0x90\n"
20957- "2: movl 0(%4), %%eax\n"
20958- "21: movl 4(%4), %%edx\n"
20959+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20960+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20961 " movl %%eax, 0(%3)\n"
20962 " movl %%edx, 4(%3)\n"
20963- "3: movl 8(%4), %%eax\n"
20964- "31: movl 12(%4),%%edx\n"
20965+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20966+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20967 " movl %%eax, 8(%3)\n"
20968 " movl %%edx, 12(%3)\n"
20969- "4: movl 16(%4), %%eax\n"
20970- "41: movl 20(%4), %%edx\n"
20971+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20972+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20973 " movl %%eax, 16(%3)\n"
20974 " movl %%edx, 20(%3)\n"
20975- "10: movl 24(%4), %%eax\n"
20976- "51: movl 28(%4), %%edx\n"
20977+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20978+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20979 " movl %%eax, 24(%3)\n"
20980 " movl %%edx, 28(%3)\n"
20981- "11: movl 32(%4), %%eax\n"
20982- "61: movl 36(%4), %%edx\n"
20983+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20984+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20985 " movl %%eax, 32(%3)\n"
20986 " movl %%edx, 36(%3)\n"
20987- "12: movl 40(%4), %%eax\n"
20988- "71: movl 44(%4), %%edx\n"
20989+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20990+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20991 " movl %%eax, 40(%3)\n"
20992 " movl %%edx, 44(%3)\n"
20993- "13: movl 48(%4), %%eax\n"
20994- "81: movl 52(%4), %%edx\n"
20995+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20996+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20997 " movl %%eax, 48(%3)\n"
20998 " movl %%edx, 52(%3)\n"
20999- "14: movl 56(%4), %%eax\n"
21000- "91: movl 60(%4), %%edx\n"
21001+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21002+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21003 " movl %%eax, 56(%3)\n"
21004 " movl %%edx, 60(%3)\n"
21005 " addl $-64, %0\n"
21006@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
21007 " shrl $2, %0\n"
21008 " andl $3, %%eax\n"
21009 " cld\n"
21010- "6: rep; movsl\n"
21011+ "6: rep; "__copyuser_seg" movsl\n"
21012 " movl %%eax,%0\n"
21013- "7: rep; movsb\n"
21014+ "7: rep; "__copyuser_seg" movsb\n"
21015 "8:\n"
21016 ".section .fixup,\"ax\"\n"
21017 "9: lea 0(%%eax,%0,4),%0\n"
21018@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
21019
21020 __asm__ __volatile__(
21021 " .align 2,0x90\n"
21022- "0: movl 32(%4), %%eax\n"
21023+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
21024 " cmpl $67, %0\n"
21025 " jbe 2f\n"
21026- "1: movl 64(%4), %%eax\n"
21027+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
21028 " .align 2,0x90\n"
21029- "2: movl 0(%4), %%eax\n"
21030- "21: movl 4(%4), %%edx\n"
21031+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
21032+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
21033 " movnti %%eax, 0(%3)\n"
21034 " movnti %%edx, 4(%3)\n"
21035- "3: movl 8(%4), %%eax\n"
21036- "31: movl 12(%4),%%edx\n"
21037+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
21038+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
21039 " movnti %%eax, 8(%3)\n"
21040 " movnti %%edx, 12(%3)\n"
21041- "4: movl 16(%4), %%eax\n"
21042- "41: movl 20(%4), %%edx\n"
21043+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
21044+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
21045 " movnti %%eax, 16(%3)\n"
21046 " movnti %%edx, 20(%3)\n"
21047- "10: movl 24(%4), %%eax\n"
21048- "51: movl 28(%4), %%edx\n"
21049+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
21050+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
21051 " movnti %%eax, 24(%3)\n"
21052 " movnti %%edx, 28(%3)\n"
21053- "11: movl 32(%4), %%eax\n"
21054- "61: movl 36(%4), %%edx\n"
21055+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
21056+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
21057 " movnti %%eax, 32(%3)\n"
21058 " movnti %%edx, 36(%3)\n"
21059- "12: movl 40(%4), %%eax\n"
21060- "71: movl 44(%4), %%edx\n"
21061+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
21062+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
21063 " movnti %%eax, 40(%3)\n"
21064 " movnti %%edx, 44(%3)\n"
21065- "13: movl 48(%4), %%eax\n"
21066- "81: movl 52(%4), %%edx\n"
21067+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
21068+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
21069 " movnti %%eax, 48(%3)\n"
21070 " movnti %%edx, 52(%3)\n"
21071- "14: movl 56(%4), %%eax\n"
21072- "91: movl 60(%4), %%edx\n"
21073+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21074+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21075 " movnti %%eax, 56(%3)\n"
21076 " movnti %%edx, 60(%3)\n"
21077 " addl $-64, %0\n"
21078@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
21079 " shrl $2, %0\n"
21080 " andl $3, %%eax\n"
21081 " cld\n"
21082- "6: rep; movsl\n"
21083+ "6: rep; "__copyuser_seg" movsl\n"
21084 " movl %%eax,%0\n"
21085- "7: rep; movsb\n"
21086+ "7: rep; "__copyuser_seg" movsb\n"
21087 "8:\n"
21088 ".section .fixup,\"ax\"\n"
21089 "9: lea 0(%%eax,%0,4),%0\n"
21090@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
21091
21092 __asm__ __volatile__(
21093 " .align 2,0x90\n"
21094- "0: movl 32(%4), %%eax\n"
21095+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
21096 " cmpl $67, %0\n"
21097 " jbe 2f\n"
21098- "1: movl 64(%4), %%eax\n"
21099+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
21100 " .align 2,0x90\n"
21101- "2: movl 0(%4), %%eax\n"
21102- "21: movl 4(%4), %%edx\n"
21103+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
21104+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
21105 " movnti %%eax, 0(%3)\n"
21106 " movnti %%edx, 4(%3)\n"
21107- "3: movl 8(%4), %%eax\n"
21108- "31: movl 12(%4),%%edx\n"
21109+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
21110+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
21111 " movnti %%eax, 8(%3)\n"
21112 " movnti %%edx, 12(%3)\n"
21113- "4: movl 16(%4), %%eax\n"
21114- "41: movl 20(%4), %%edx\n"
21115+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
21116+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
21117 " movnti %%eax, 16(%3)\n"
21118 " movnti %%edx, 20(%3)\n"
21119- "10: movl 24(%4), %%eax\n"
21120- "51: movl 28(%4), %%edx\n"
21121+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
21122+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
21123 " movnti %%eax, 24(%3)\n"
21124 " movnti %%edx, 28(%3)\n"
21125- "11: movl 32(%4), %%eax\n"
21126- "61: movl 36(%4), %%edx\n"
21127+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
21128+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
21129 " movnti %%eax, 32(%3)\n"
21130 " movnti %%edx, 36(%3)\n"
21131- "12: movl 40(%4), %%eax\n"
21132- "71: movl 44(%4), %%edx\n"
21133+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
21134+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
21135 " movnti %%eax, 40(%3)\n"
21136 " movnti %%edx, 44(%3)\n"
21137- "13: movl 48(%4), %%eax\n"
21138- "81: movl 52(%4), %%edx\n"
21139+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
21140+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
21141 " movnti %%eax, 48(%3)\n"
21142 " movnti %%edx, 52(%3)\n"
21143- "14: movl 56(%4), %%eax\n"
21144- "91: movl 60(%4), %%edx\n"
21145+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21146+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21147 " movnti %%eax, 56(%3)\n"
21148 " movnti %%edx, 60(%3)\n"
21149 " addl $-64, %0\n"
21150@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
21151 " shrl $2, %0\n"
21152 " andl $3, %%eax\n"
21153 " cld\n"
21154- "6: rep; movsl\n"
21155+ "6: rep; "__copyuser_seg" movsl\n"
21156 " movl %%eax,%0\n"
21157- "7: rep; movsb\n"
21158+ "7: rep; "__copyuser_seg" movsb\n"
21159 "8:\n"
21160 ".section .fixup,\"ax\"\n"
21161 "9: lea 0(%%eax,%0,4),%0\n"
21162@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
21163 */
21164 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
21165 unsigned long size);
21166-unsigned long __copy_user_intel(void __user *to, const void *from,
21167+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
21168+ unsigned long size);
21169+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
21170 unsigned long size);
21171 unsigned long __copy_user_zeroing_intel_nocache(void *to,
21172 const void __user *from, unsigned long size);
21173 #endif /* CONFIG_X86_INTEL_USERCOPY */
21174
21175 /* Generic arbitrary sized copy. */
21176-#define __copy_user(to, from, size) \
21177+#define __copy_user(to, from, size, prefix, set, restore) \
21178 do { \
21179 int __d0, __d1, __d2; \
21180 __asm__ __volatile__( \
21181+ set \
21182 " cmp $7,%0\n" \
21183 " jbe 1f\n" \
21184 " movl %1,%0\n" \
21185 " negl %0\n" \
21186 " andl $7,%0\n" \
21187 " subl %0,%3\n" \
21188- "4: rep; movsb\n" \
21189+ "4: rep; "prefix"movsb\n" \
21190 " movl %3,%0\n" \
21191 " shrl $2,%0\n" \
21192 " andl $3,%3\n" \
21193 " .align 2,0x90\n" \
21194- "0: rep; movsl\n" \
21195+ "0: rep; "prefix"movsl\n" \
21196 " movl %3,%0\n" \
21197- "1: rep; movsb\n" \
21198+ "1: rep; "prefix"movsb\n" \
21199 "2:\n" \
21200+ restore \
21201 ".section .fixup,\"ax\"\n" \
21202 "5: addl %3,%0\n" \
21203 " jmp 2b\n" \
21204@@ -682,14 +799,14 @@ do { \
21205 " negl %0\n" \
21206 " andl $7,%0\n" \
21207 " subl %0,%3\n" \
21208- "4: rep; movsb\n" \
21209+ "4: rep; "__copyuser_seg"movsb\n" \
21210 " movl %3,%0\n" \
21211 " shrl $2,%0\n" \
21212 " andl $3,%3\n" \
21213 " .align 2,0x90\n" \
21214- "0: rep; movsl\n" \
21215+ "0: rep; "__copyuser_seg"movsl\n" \
21216 " movl %3,%0\n" \
21217- "1: rep; movsb\n" \
21218+ "1: rep; "__copyuser_seg"movsb\n" \
21219 "2:\n" \
21220 ".section .fixup,\"ax\"\n" \
21221 "5: addl %3,%0\n" \
21222@@ -775,9 +892,9 @@ survive:
21223 }
21224 #endif
21225 if (movsl_is_ok(to, from, n))
21226- __copy_user(to, from, n);
21227+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
21228 else
21229- n = __copy_user_intel(to, from, n);
21230+ n = __generic_copy_to_user_intel(to, from, n);
21231 return n;
21232 }
21233 EXPORT_SYMBOL(__copy_to_user_ll);
21234@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
21235 unsigned long n)
21236 {
21237 if (movsl_is_ok(to, from, n))
21238- __copy_user(to, from, n);
21239+ __copy_user(to, from, n, __copyuser_seg, "", "");
21240 else
21241- n = __copy_user_intel((void __user *)to,
21242- (const void *)from, n);
21243+ n = __generic_copy_from_user_intel(to, from, n);
21244 return n;
21245 }
21246 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
21247@@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
21248 if (n > 64 && cpu_has_xmm2)
21249 n = __copy_user_intel_nocache(to, from, n);
21250 else
21251- __copy_user(to, from, n);
21252+ __copy_user(to, from, n, __copyuser_seg, "", "");
21253 #else
21254- __copy_user(to, from, n);
21255+ __copy_user(to, from, n, __copyuser_seg, "", "");
21256 #endif
21257 return n;
21258 }
21259 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
21260
21261-/**
21262- * copy_to_user: - Copy a block of data into user space.
21263- * @to: Destination address, in user space.
21264- * @from: Source address, in kernel space.
21265- * @n: Number of bytes to copy.
21266- *
21267- * Context: User context only. This function may sleep.
21268- *
21269- * Copy data from kernel space to user space.
21270- *
21271- * Returns number of bytes that could not be copied.
21272- * On success, this will be zero.
21273- */
21274-unsigned long
21275-copy_to_user(void __user *to, const void *from, unsigned long n)
21276-{
21277- if (access_ok(VERIFY_WRITE, to, n))
21278- n = __copy_to_user(to, from, n);
21279- return n;
21280-}
21281-EXPORT_SYMBOL(copy_to_user);
21282-
21283-/**
21284- * copy_from_user: - Copy a block of data from user space.
21285- * @to: Destination address, in kernel space.
21286- * @from: Source address, in user space.
21287- * @n: Number of bytes to copy.
21288- *
21289- * Context: User context only. This function may sleep.
21290- *
21291- * Copy data from user space to kernel space.
21292- *
21293- * Returns number of bytes that could not be copied.
21294- * On success, this will be zero.
21295- *
21296- * If some data could not be copied, this function will pad the copied
21297- * data to the requested size using zero bytes.
21298- */
21299-unsigned long
21300-_copy_from_user(void *to, const void __user *from, unsigned long n)
21301-{
21302- if (access_ok(VERIFY_READ, from, n))
21303- n = __copy_from_user(to, from, n);
21304- else
21305- memset(to, 0, n);
21306- return n;
21307-}
21308-EXPORT_SYMBOL(_copy_from_user);
21309-
21310 void copy_from_user_overflow(void)
21311 {
21312 WARN(1, "Buffer overflow detected!\n");
21313 }
21314 EXPORT_SYMBOL(copy_from_user_overflow);
21315+
21316+void copy_to_user_overflow(void)
21317+{
21318+ WARN(1, "Buffer overflow detected!\n");
21319+}
21320+EXPORT_SYMBOL(copy_to_user_overflow);
21321+
21322+#ifdef CONFIG_PAX_MEMORY_UDEREF
21323+void __set_fs(mm_segment_t x)
21324+{
21325+ switch (x.seg) {
21326+ case 0:
21327+ loadsegment(gs, 0);
21328+ break;
21329+ case TASK_SIZE_MAX:
21330+ loadsegment(gs, __USER_DS);
21331+ break;
21332+ case -1UL:
21333+ loadsegment(gs, __KERNEL_DS);
21334+ break;
21335+ default:
21336+ BUG();
21337+ }
21338+ return;
21339+}
21340+EXPORT_SYMBOL(__set_fs);
21341+
21342+void set_fs(mm_segment_t x)
21343+{
21344+ current_thread_info()->addr_limit = x;
21345+ __set_fs(x);
21346+}
21347+EXPORT_SYMBOL(set_fs);
21348+#endif
21349diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
21350index b7c2849..8633ad8 100644
21351--- a/arch/x86/lib/usercopy_64.c
21352+++ b/arch/x86/lib/usercopy_64.c
21353@@ -42,6 +42,12 @@ long
21354 __strncpy_from_user(char *dst, const char __user *src, long count)
21355 {
21356 long res;
21357+
21358+#ifdef CONFIG_PAX_MEMORY_UDEREF
21359+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
21360+ src += PAX_USER_SHADOW_BASE;
21361+#endif
21362+
21363 __do_strncpy_from_user(dst, src, count, res);
21364 return res;
21365 }
21366@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
21367 {
21368 long __d0;
21369 might_fault();
21370+
21371+#ifdef CONFIG_PAX_MEMORY_UDEREF
21372+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
21373+ addr += PAX_USER_SHADOW_BASE;
21374+#endif
21375+
21376 /* no memory constraint because it doesn't change any memory gcc knows
21377 about */
21378 asm volatile(
21379@@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
21380 }
21381 EXPORT_SYMBOL(strlen_user);
21382
21383-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
21384+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
21385 {
21386- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
21387- return copy_user_generic((__force void *)to, (__force void *)from, len);
21388- }
21389- return len;
21390+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
21391+
21392+#ifdef CONFIG_PAX_MEMORY_UDEREF
21393+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
21394+ to += PAX_USER_SHADOW_BASE;
21395+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
21396+ from += PAX_USER_SHADOW_BASE;
21397+#endif
21398+
21399+ return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
21400+ }
21401+ return len;
21402 }
21403 EXPORT_SYMBOL(copy_in_user);
21404
21405@@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
21406 * it is not necessary to optimize tail handling.
21407 */
21408 unsigned long
21409-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
21410+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
21411 {
21412 char c;
21413 unsigned zero_len;
21414diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
21415index d0474ad..36e9257 100644
21416--- a/arch/x86/mm/extable.c
21417+++ b/arch/x86/mm/extable.c
21418@@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs)
21419 const struct exception_table_entry *fixup;
21420
21421 #ifdef CONFIG_PNPBIOS
21422- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
21423+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
21424 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
21425 extern u32 pnp_bios_is_utter_crap;
21426 pnp_bios_is_utter_crap = 1;
21427diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
21428index 5db0490..13bd09c 100644
21429--- a/arch/x86/mm/fault.c
21430+++ b/arch/x86/mm/fault.c
21431@@ -13,11 +13,18 @@
21432 #include <linux/perf_event.h> /* perf_sw_event */
21433 #include <linux/hugetlb.h> /* hstate_index_to_shift */
21434 #include <linux/prefetch.h> /* prefetchw */
21435+#include <linux/unistd.h>
21436+#include <linux/compiler.h>
21437
21438 #include <asm/traps.h> /* dotraplinkage, ... */
21439 #include <asm/pgalloc.h> /* pgd_*(), ... */
21440 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
21441 #include <asm/fixmap.h> /* VSYSCALL_START */
21442+#include <asm/tlbflush.h>
21443+
21444+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21445+#include <asm/stacktrace.h>
21446+#endif
21447
21448 /*
21449 * Page fault error code bits:
21450@@ -55,7 +62,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
21451 int ret = 0;
21452
21453 /* kprobe_running() needs smp_processor_id() */
21454- if (kprobes_built_in() && !user_mode_vm(regs)) {
21455+ if (kprobes_built_in() && !user_mode(regs)) {
21456 preempt_disable();
21457 if (kprobe_running() && kprobe_fault_handler(regs, 14))
21458 ret = 1;
21459@@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
21460 return !instr_lo || (instr_lo>>1) == 1;
21461 case 0x00:
21462 /* Prefetch instruction is 0x0F0D or 0x0F18 */
21463- if (probe_kernel_address(instr, opcode))
21464+ if (user_mode(regs)) {
21465+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
21466+ return 0;
21467+ } else if (probe_kernel_address(instr, opcode))
21468 return 0;
21469
21470 *prefetch = (instr_lo == 0xF) &&
21471@@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
21472 while (instr < max_instr) {
21473 unsigned char opcode;
21474
21475- if (probe_kernel_address(instr, opcode))
21476+ if (user_mode(regs)) {
21477+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
21478+ break;
21479+ } else if (probe_kernel_address(instr, opcode))
21480 break;
21481
21482 instr++;
21483@@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
21484 force_sig_info(si_signo, &info, tsk);
21485 }
21486
21487+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21488+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
21489+#endif
21490+
21491+#ifdef CONFIG_PAX_EMUTRAMP
21492+static int pax_handle_fetch_fault(struct pt_regs *regs);
21493+#endif
21494+
21495+#ifdef CONFIG_PAX_PAGEEXEC
21496+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
21497+{
21498+ pgd_t *pgd;
21499+ pud_t *pud;
21500+ pmd_t *pmd;
21501+
21502+ pgd = pgd_offset(mm, address);
21503+ if (!pgd_present(*pgd))
21504+ return NULL;
21505+ pud = pud_offset(pgd, address);
21506+ if (!pud_present(*pud))
21507+ return NULL;
21508+ pmd = pmd_offset(pud, address);
21509+ if (!pmd_present(*pmd))
21510+ return NULL;
21511+ return pmd;
21512+}
21513+#endif
21514+
21515 DEFINE_SPINLOCK(pgd_lock);
21516 LIST_HEAD(pgd_list);
21517
21518@@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
21519 for (address = VMALLOC_START & PMD_MASK;
21520 address >= TASK_SIZE && address < FIXADDR_TOP;
21521 address += PMD_SIZE) {
21522+
21523+#ifdef CONFIG_PAX_PER_CPU_PGD
21524+ unsigned long cpu;
21525+#else
21526 struct page *page;
21527+#endif
21528
21529 spin_lock(&pgd_lock);
21530+
21531+#ifdef CONFIG_PAX_PER_CPU_PGD
21532+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
21533+ pgd_t *pgd = get_cpu_pgd(cpu);
21534+ pmd_t *ret;
21535+#else
21536 list_for_each_entry(page, &pgd_list, lru) {
21537+ pgd_t *pgd = page_address(page);
21538 spinlock_t *pgt_lock;
21539 pmd_t *ret;
21540
21541@@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
21542 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
21543
21544 spin_lock(pgt_lock);
21545- ret = vmalloc_sync_one(page_address(page), address);
21546+#endif
21547+
21548+ ret = vmalloc_sync_one(pgd, address);
21549+
21550+#ifndef CONFIG_PAX_PER_CPU_PGD
21551 spin_unlock(pgt_lock);
21552+#endif
21553
21554 if (!ret)
21555 break;
21556@@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
21557 * an interrupt in the middle of a task switch..
21558 */
21559 pgd_paddr = read_cr3();
21560+
21561+#ifdef CONFIG_PAX_PER_CPU_PGD
21562+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
21563+#endif
21564+
21565 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
21566 if (!pmd_k)
21567 return -1;
21568@@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
21569 * happen within a race in page table update. In the later
21570 * case just flush:
21571 */
21572+
21573+#ifdef CONFIG_PAX_PER_CPU_PGD
21574+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
21575+ pgd = pgd_offset_cpu(smp_processor_id(), address);
21576+#else
21577 pgd = pgd_offset(current->active_mm, address);
21578+#endif
21579+
21580 pgd_ref = pgd_offset_k(address);
21581 if (pgd_none(*pgd_ref))
21582 return -1;
21583@@ -540,7 +610,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
21584 static int is_errata100(struct pt_regs *regs, unsigned long address)
21585 {
21586 #ifdef CONFIG_X86_64
21587- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
21588+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
21589 return 1;
21590 #endif
21591 return 0;
21592@@ -567,7 +637,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
21593 }
21594
21595 static const char nx_warning[] = KERN_CRIT
21596-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
21597+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
21598
21599 static void
21600 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
21601@@ -576,15 +646,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
21602 if (!oops_may_print())
21603 return;
21604
21605- if (error_code & PF_INSTR) {
21606+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
21607 unsigned int level;
21608
21609 pte_t *pte = lookup_address(address, &level);
21610
21611 if (pte && pte_present(*pte) && !pte_exec(*pte))
21612- printk(nx_warning, current_uid());
21613+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
21614 }
21615
21616+#ifdef CONFIG_PAX_KERNEXEC
21617+ if (init_mm.start_code <= address && address < init_mm.end_code) {
21618+ if (current->signal->curr_ip)
21619+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21620+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
21621+ else
21622+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21623+ current->comm, task_pid_nr(current), current_uid(), current_euid());
21624+ }
21625+#endif
21626+
21627 printk(KERN_ALERT "BUG: unable to handle kernel ");
21628 if (address < PAGE_SIZE)
21629 printk(KERN_CONT "NULL pointer dereference");
21630@@ -739,6 +820,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
21631 }
21632 #endif
21633
21634+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21635+ if (pax_is_fetch_fault(regs, error_code, address)) {
21636+
21637+#ifdef CONFIG_PAX_EMUTRAMP
21638+ switch (pax_handle_fetch_fault(regs)) {
21639+ case 2:
21640+ return;
21641+ }
21642+#endif
21643+
21644+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
21645+ do_group_exit(SIGKILL);
21646+ }
21647+#endif
21648+
21649 if (unlikely(show_unhandled_signals))
21650 show_signal_msg(regs, error_code, address, tsk);
21651
21652@@ -835,7 +931,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
21653 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
21654 printk(KERN_ERR
21655 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
21656- tsk->comm, tsk->pid, address);
21657+ tsk->comm, task_pid_nr(tsk), address);
21658 code = BUS_MCEERR_AR;
21659 }
21660 #endif
21661@@ -890,6 +986,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
21662 return 1;
21663 }
21664
21665+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21666+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
21667+{
21668+ pte_t *pte;
21669+ pmd_t *pmd;
21670+ spinlock_t *ptl;
21671+ unsigned char pte_mask;
21672+
21673+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
21674+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
21675+ return 0;
21676+
21677+ /* PaX: it's our fault, let's handle it if we can */
21678+
21679+ /* PaX: take a look at read faults before acquiring any locks */
21680+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
21681+ /* instruction fetch attempt from a protected page in user mode */
21682+ up_read(&mm->mmap_sem);
21683+
21684+#ifdef CONFIG_PAX_EMUTRAMP
21685+ switch (pax_handle_fetch_fault(regs)) {
21686+ case 2:
21687+ return 1;
21688+ }
21689+#endif
21690+
21691+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
21692+ do_group_exit(SIGKILL);
21693+ }
21694+
21695+ pmd = pax_get_pmd(mm, address);
21696+ if (unlikely(!pmd))
21697+ return 0;
21698+
21699+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
21700+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
21701+ pte_unmap_unlock(pte, ptl);
21702+ return 0;
21703+ }
21704+
21705+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
21706+ /* write attempt to a protected page in user mode */
21707+ pte_unmap_unlock(pte, ptl);
21708+ return 0;
21709+ }
21710+
21711+#ifdef CONFIG_SMP
21712+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
21713+#else
21714+ if (likely(address > get_limit(regs->cs)))
21715+#endif
21716+ {
21717+ set_pte(pte, pte_mkread(*pte));
21718+ __flush_tlb_one(address);
21719+ pte_unmap_unlock(pte, ptl);
21720+ up_read(&mm->mmap_sem);
21721+ return 1;
21722+ }
21723+
21724+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
21725+
21726+ /*
21727+ * PaX: fill DTLB with user rights and retry
21728+ */
21729+ __asm__ __volatile__ (
21730+ "orb %2,(%1)\n"
21731+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
21732+/*
21733+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
21734+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
21735+ * page fault when examined during a TLB load attempt. this is true not only
21736+ * for PTEs holding a non-present entry but also present entries that will
21737+ * raise a page fault (such as those set up by PaX, or the copy-on-write
21738+ * mechanism). in effect it means that we do *not* need to flush the TLBs
21739+ * for our target pages since their PTEs are simply not in the TLBs at all.
21740+
21741+ * the best thing in omitting it is that we gain around 15-20% speed in the
21742+ * fast path of the page fault handler and can get rid of tracing since we
21743+ * can no longer flush unintended entries.
21744+ */
21745+ "invlpg (%0)\n"
21746+#endif
21747+ __copyuser_seg"testb $0,(%0)\n"
21748+ "xorb %3,(%1)\n"
21749+ :
21750+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
21751+ : "memory", "cc");
21752+ pte_unmap_unlock(pte, ptl);
21753+ up_read(&mm->mmap_sem);
21754+ return 1;
21755+}
21756+#endif
21757+
21758 /*
21759 * Handle a spurious fault caused by a stale TLB entry.
21760 *
21761@@ -962,6 +1151,9 @@ int show_unhandled_signals = 1;
21762 static inline int
21763 access_error(unsigned long error_code, struct vm_area_struct *vma)
21764 {
21765+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
21766+ return 1;
21767+
21768 if (error_code & PF_WRITE) {
21769 /* write, present and write, not present: */
21770 if (unlikely(!(vma->vm_flags & VM_WRITE)))
21771@@ -995,18 +1187,32 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
21772 {
21773 struct vm_area_struct *vma;
21774 struct task_struct *tsk;
21775- unsigned long address;
21776 struct mm_struct *mm;
21777 int fault;
21778 int write = error_code & PF_WRITE;
21779 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
21780 (write ? FAULT_FLAG_WRITE : 0);
21781
21782- tsk = current;
21783- mm = tsk->mm;
21784-
21785 /* Get the faulting address: */
21786- address = read_cr2();
21787+ unsigned long address = read_cr2();
21788+
21789+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21790+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
21791+ if (!search_exception_tables(regs->ip)) {
21792+ bad_area_nosemaphore(regs, error_code, address);
21793+ return;
21794+ }
21795+ if (address < PAX_USER_SHADOW_BASE) {
21796+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
21797+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
21798+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
21799+ } else
21800+ address -= PAX_USER_SHADOW_BASE;
21801+ }
21802+#endif
21803+
21804+ tsk = current;
21805+ mm = tsk->mm;
21806
21807 /*
21808 * Detect and handle instructions that would cause a page fault for
21809@@ -1067,7 +1273,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
21810 * User-mode registers count as a user access even for any
21811 * potential system fault or CPU buglet:
21812 */
21813- if (user_mode_vm(regs)) {
21814+ if (user_mode(regs)) {
21815 local_irq_enable();
21816 error_code |= PF_USER;
21817 } else {
21818@@ -1122,6 +1328,11 @@ retry:
21819 might_sleep();
21820 }
21821
21822+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21823+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
21824+ return;
21825+#endif
21826+
21827 vma = find_vma(mm, address);
21828 if (unlikely(!vma)) {
21829 bad_area(regs, error_code, address);
21830@@ -1133,18 +1344,24 @@ retry:
21831 bad_area(regs, error_code, address);
21832 return;
21833 }
21834- if (error_code & PF_USER) {
21835- /*
21836- * Accessing the stack below %sp is always a bug.
21837- * The large cushion allows instructions like enter
21838- * and pusha to work. ("enter $65535, $31" pushes
21839- * 32 pointers and then decrements %sp by 65535.)
21840- */
21841- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
21842- bad_area(regs, error_code, address);
21843- return;
21844- }
21845+ /*
21846+ * Accessing the stack below %sp is always a bug.
21847+ * The large cushion allows instructions like enter
21848+ * and pusha to work. ("enter $65535, $31" pushes
21849+ * 32 pointers and then decrements %sp by 65535.)
21850+ */
21851+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
21852+ bad_area(regs, error_code, address);
21853+ return;
21854 }
21855+
21856+#ifdef CONFIG_PAX_SEGMEXEC
21857+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
21858+ bad_area(regs, error_code, address);
21859+ return;
21860+ }
21861+#endif
21862+
21863 if (unlikely(expand_stack(vma, address))) {
21864 bad_area(regs, error_code, address);
21865 return;
21866@@ -1199,3 +1416,292 @@ good_area:
21867
21868 up_read(&mm->mmap_sem);
21869 }
21870+
21871+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21872+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
21873+{
21874+ struct mm_struct *mm = current->mm;
21875+ unsigned long ip = regs->ip;
21876+
21877+ if (v8086_mode(regs))
21878+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
21879+
21880+#ifdef CONFIG_PAX_PAGEEXEC
21881+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
21882+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
21883+ return true;
21884+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
21885+ return true;
21886+ return false;
21887+ }
21888+#endif
21889+
21890+#ifdef CONFIG_PAX_SEGMEXEC
21891+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
21892+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
21893+ return true;
21894+ return false;
21895+ }
21896+#endif
21897+
21898+ return false;
21899+}
21900+#endif
21901+
21902+#ifdef CONFIG_PAX_EMUTRAMP
21903+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
21904+{
21905+ int err;
21906+
21907+ do { /* PaX: libffi trampoline emulation */
21908+ unsigned char mov, jmp;
21909+ unsigned int addr1, addr2;
21910+
21911+#ifdef CONFIG_X86_64
21912+ if ((regs->ip + 9) >> 32)
21913+ break;
21914+#endif
21915+
21916+ err = get_user(mov, (unsigned char __user *)regs->ip);
21917+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21918+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
21919+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21920+
21921+ if (err)
21922+ break;
21923+
21924+ if (mov == 0xB8 && jmp == 0xE9) {
21925+ regs->ax = addr1;
21926+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
21927+ return 2;
21928+ }
21929+ } while (0);
21930+
21931+ do { /* PaX: gcc trampoline emulation #1 */
21932+ unsigned char mov1, mov2;
21933+ unsigned short jmp;
21934+ unsigned int addr1, addr2;
21935+
21936+#ifdef CONFIG_X86_64
21937+ if ((regs->ip + 11) >> 32)
21938+ break;
21939+#endif
21940+
21941+ err = get_user(mov1, (unsigned char __user *)regs->ip);
21942+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21943+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
21944+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21945+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
21946+
21947+ if (err)
21948+ break;
21949+
21950+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
21951+ regs->cx = addr1;
21952+ regs->ax = addr2;
21953+ regs->ip = addr2;
21954+ return 2;
21955+ }
21956+ } while (0);
21957+
21958+ do { /* PaX: gcc trampoline emulation #2 */
21959+ unsigned char mov, jmp;
21960+ unsigned int addr1, addr2;
21961+
21962+#ifdef CONFIG_X86_64
21963+ if ((regs->ip + 9) >> 32)
21964+ break;
21965+#endif
21966+
21967+ err = get_user(mov, (unsigned char __user *)regs->ip);
21968+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21969+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
21970+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21971+
21972+ if (err)
21973+ break;
21974+
21975+ if (mov == 0xB9 && jmp == 0xE9) {
21976+ regs->cx = addr1;
21977+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
21978+ return 2;
21979+ }
21980+ } while (0);
21981+
21982+ return 1; /* PaX in action */
21983+}
21984+
21985+#ifdef CONFIG_X86_64
21986+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
21987+{
21988+ int err;
21989+
21990+ do { /* PaX: libffi trampoline emulation */
21991+ unsigned short mov1, mov2, jmp1;
21992+ unsigned char stcclc, jmp2;
21993+ unsigned long addr1, addr2;
21994+
21995+ err = get_user(mov1, (unsigned short __user *)regs->ip);
21996+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
21997+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
21998+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
21999+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
22000+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
22001+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
22002+
22003+ if (err)
22004+ break;
22005+
22006+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
22007+ regs->r11 = addr1;
22008+ regs->r10 = addr2;
22009+ if (stcclc == 0xF8)
22010+ regs->flags &= ~X86_EFLAGS_CF;
22011+ else
22012+ regs->flags |= X86_EFLAGS_CF;
22013+ regs->ip = addr1;
22014+ return 2;
22015+ }
22016+ } while (0);
22017+
22018+ do { /* PaX: gcc trampoline emulation #1 */
22019+ unsigned short mov1, mov2, jmp1;
22020+ unsigned char jmp2;
22021+ unsigned int addr1;
22022+ unsigned long addr2;
22023+
22024+ err = get_user(mov1, (unsigned short __user *)regs->ip);
22025+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
22026+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
22027+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
22028+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
22029+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
22030+
22031+ if (err)
22032+ break;
22033+
22034+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
22035+ regs->r11 = addr1;
22036+ regs->r10 = addr2;
22037+ regs->ip = addr1;
22038+ return 2;
22039+ }
22040+ } while (0);
22041+
22042+ do { /* PaX: gcc trampoline emulation #2 */
22043+ unsigned short mov1, mov2, jmp1;
22044+ unsigned char jmp2;
22045+ unsigned long addr1, addr2;
22046+
22047+ err = get_user(mov1, (unsigned short __user *)regs->ip);
22048+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
22049+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
22050+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
22051+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
22052+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
22053+
22054+ if (err)
22055+ break;
22056+
22057+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
22058+ regs->r11 = addr1;
22059+ regs->r10 = addr2;
22060+ regs->ip = addr1;
22061+ return 2;
22062+ }
22063+ } while (0);
22064+
22065+ return 1; /* PaX in action */
22066+}
22067+#endif
22068+
22069+/*
22070+ * PaX: decide what to do with offenders (regs->ip = fault address)
22071+ *
22072+ * returns 1 when task should be killed
22073+ * 2 when gcc trampoline was detected
22074+ */
22075+static int pax_handle_fetch_fault(struct pt_regs *regs)
22076+{
22077+ if (v8086_mode(regs))
22078+ return 1;
22079+
22080+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
22081+ return 1;
22082+
22083+#ifdef CONFIG_X86_32
22084+ return pax_handle_fetch_fault_32(regs);
22085+#else
22086+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
22087+ return pax_handle_fetch_fault_32(regs);
22088+ else
22089+ return pax_handle_fetch_fault_64(regs);
22090+#endif
22091+}
22092+#endif
22093+
22094+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
22095+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
22096+{
22097+ long i;
22098+
22099+ printk(KERN_ERR "PAX: bytes at PC: ");
22100+ for (i = 0; i < 20; i++) {
22101+ unsigned char c;
22102+ if (get_user(c, (unsigned char __force_user *)pc+i))
22103+ printk(KERN_CONT "?? ");
22104+ else
22105+ printk(KERN_CONT "%02x ", c);
22106+ }
22107+ printk("\n");
22108+
22109+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
22110+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
22111+ unsigned long c;
22112+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
22113+#ifdef CONFIG_X86_32
22114+ printk(KERN_CONT "???????? ");
22115+#else
22116+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
22117+ printk(KERN_CONT "???????? ???????? ");
22118+ else
22119+ printk(KERN_CONT "???????????????? ");
22120+#endif
22121+ } else {
22122+#ifdef CONFIG_X86_64
22123+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
22124+ printk(KERN_CONT "%08x ", (unsigned int)c);
22125+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
22126+ } else
22127+#endif
22128+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
22129+ }
22130+ }
22131+ printk("\n");
22132+}
22133+#endif
22134+
22135+/**
22136+ * probe_kernel_write(): safely attempt to write to a location
22137+ * @dst: address to write to
22138+ * @src: pointer to the data that shall be written
22139+ * @size: size of the data chunk
22140+ *
22141+ * Safely write to address @dst from the buffer at @src. If a kernel fault
22142+ * happens, handle that and return -EFAULT.
22143+ */
22144+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
22145+{
22146+ long ret;
22147+ mm_segment_t old_fs = get_fs();
22148+
22149+ set_fs(KERNEL_DS);
22150+ pagefault_disable();
22151+ pax_open_kernel();
22152+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
22153+ pax_close_kernel();
22154+ pagefault_enable();
22155+ set_fs(old_fs);
22156+
22157+ return ret ? -EFAULT : 0;
22158+}
22159diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
22160index dd74e46..7d26398 100644
22161--- a/arch/x86/mm/gup.c
22162+++ b/arch/x86/mm/gup.c
22163@@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
22164 addr = start;
22165 len = (unsigned long) nr_pages << PAGE_SHIFT;
22166 end = start + len;
22167- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
22168+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
22169 (void __user *)start, len)))
22170 return 0;
22171
22172diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
22173index f4f29b1..5cac4fb 100644
22174--- a/arch/x86/mm/highmem_32.c
22175+++ b/arch/x86/mm/highmem_32.c
22176@@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
22177 idx = type + KM_TYPE_NR*smp_processor_id();
22178 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
22179 BUG_ON(!pte_none(*(kmap_pte-idx)));
22180+
22181+ pax_open_kernel();
22182 set_pte(kmap_pte-idx, mk_pte(page, prot));
22183+ pax_close_kernel();
22184+
22185 arch_flush_lazy_mmu_mode();
22186
22187 return (void *)vaddr;
22188diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
22189index f581a18..29efd37 100644
22190--- a/arch/x86/mm/hugetlbpage.c
22191+++ b/arch/x86/mm/hugetlbpage.c
22192@@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
22193 struct hstate *h = hstate_file(file);
22194 struct mm_struct *mm = current->mm;
22195 struct vm_area_struct *vma;
22196- unsigned long start_addr;
22197+ unsigned long start_addr, pax_task_size = TASK_SIZE;
22198+
22199+#ifdef CONFIG_PAX_SEGMEXEC
22200+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22201+ pax_task_size = SEGMEXEC_TASK_SIZE;
22202+#endif
22203+
22204+ pax_task_size -= PAGE_SIZE;
22205
22206 if (len > mm->cached_hole_size) {
22207- start_addr = mm->free_area_cache;
22208+ start_addr = mm->free_area_cache;
22209 } else {
22210- start_addr = TASK_UNMAPPED_BASE;
22211- mm->cached_hole_size = 0;
22212+ start_addr = mm->mmap_base;
22213+ mm->cached_hole_size = 0;
22214 }
22215
22216 full_search:
22217@@ -280,26 +287,27 @@ full_search:
22218
22219 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
22220 /* At this point: (!vma || addr < vma->vm_end). */
22221- if (TASK_SIZE - len < addr) {
22222+ if (pax_task_size - len < addr) {
22223 /*
22224 * Start a new search - just in case we missed
22225 * some holes.
22226 */
22227- if (start_addr != TASK_UNMAPPED_BASE) {
22228- start_addr = TASK_UNMAPPED_BASE;
22229+ if (start_addr != mm->mmap_base) {
22230+ start_addr = mm->mmap_base;
22231 mm->cached_hole_size = 0;
22232 goto full_search;
22233 }
22234 return -ENOMEM;
22235 }
22236- if (!vma || addr + len <= vma->vm_start) {
22237- mm->free_area_cache = addr + len;
22238- return addr;
22239- }
22240+ if (check_heap_stack_gap(vma, addr, len))
22241+ break;
22242 if (addr + mm->cached_hole_size < vma->vm_start)
22243 mm->cached_hole_size = vma->vm_start - addr;
22244 addr = ALIGN(vma->vm_end, huge_page_size(h));
22245 }
22246+
22247+ mm->free_area_cache = addr + len;
22248+ return addr;
22249 }
22250
22251 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22252@@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22253 {
22254 struct hstate *h = hstate_file(file);
22255 struct mm_struct *mm = current->mm;
22256- struct vm_area_struct *vma, *prev_vma;
22257- unsigned long base = mm->mmap_base, addr = addr0;
22258+ struct vm_area_struct *vma;
22259+ unsigned long base = mm->mmap_base, addr;
22260 unsigned long largest_hole = mm->cached_hole_size;
22261- int first_time = 1;
22262
22263 /* don't allow allocations above current base */
22264 if (mm->free_area_cache > base)
22265@@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22266 largest_hole = 0;
22267 mm->free_area_cache = base;
22268 }
22269-try_again:
22270+
22271 /* make sure it can fit in the remaining address space */
22272 if (mm->free_area_cache < len)
22273 goto fail;
22274
22275 /* either no address requested or can't fit in requested address hole */
22276- addr = (mm->free_area_cache - len) & huge_page_mask(h);
22277+ addr = (mm->free_area_cache - len);
22278 do {
22279+ addr &= huge_page_mask(h);
22280+ vma = find_vma(mm, addr);
22281 /*
22282 * Lookup failure means no vma is above this address,
22283 * i.e. return with success:
22284- */
22285- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
22286- return addr;
22287-
22288- /*
22289 * new region fits between prev_vma->vm_end and
22290 * vma->vm_start, use it:
22291 */
22292- if (addr + len <= vma->vm_start &&
22293- (!prev_vma || (addr >= prev_vma->vm_end))) {
22294+ if (check_heap_stack_gap(vma, addr, len)) {
22295 /* remember the address as a hint for next time */
22296- mm->cached_hole_size = largest_hole;
22297- return (mm->free_area_cache = addr);
22298- } else {
22299- /* pull free_area_cache down to the first hole */
22300- if (mm->free_area_cache == vma->vm_end) {
22301- mm->free_area_cache = vma->vm_start;
22302- mm->cached_hole_size = largest_hole;
22303- }
22304+ mm->cached_hole_size = largest_hole;
22305+ return (mm->free_area_cache = addr);
22306+ }
22307+ /* pull free_area_cache down to the first hole */
22308+ if (mm->free_area_cache == vma->vm_end) {
22309+ mm->free_area_cache = vma->vm_start;
22310+ mm->cached_hole_size = largest_hole;
22311 }
22312
22313 /* remember the largest hole we saw so far */
22314 if (addr + largest_hole < vma->vm_start)
22315- largest_hole = vma->vm_start - addr;
22316+ largest_hole = vma->vm_start - addr;
22317
22318 /* try just below the current vma->vm_start */
22319- addr = (vma->vm_start - len) & huge_page_mask(h);
22320- } while (len <= vma->vm_start);
22321+ addr = skip_heap_stack_gap(vma, len);
22322+ } while (!IS_ERR_VALUE(addr));
22323
22324 fail:
22325 /*
22326- * if hint left us with no space for the requested
22327- * mapping then try again:
22328- */
22329- if (first_time) {
22330- mm->free_area_cache = base;
22331- largest_hole = 0;
22332- first_time = 0;
22333- goto try_again;
22334- }
22335- /*
22336 * A failed mmap() very likely causes application failure,
22337 * so fall back to the bottom-up function here. This scenario
22338 * can happen with large stack limits and large mmap()
22339 * allocations.
22340 */
22341- mm->free_area_cache = TASK_UNMAPPED_BASE;
22342+
22343+#ifdef CONFIG_PAX_SEGMEXEC
22344+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22345+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
22346+ else
22347+#endif
22348+
22349+ mm->mmap_base = TASK_UNMAPPED_BASE;
22350+
22351+#ifdef CONFIG_PAX_RANDMMAP
22352+ if (mm->pax_flags & MF_PAX_RANDMMAP)
22353+ mm->mmap_base += mm->delta_mmap;
22354+#endif
22355+
22356+ mm->free_area_cache = mm->mmap_base;
22357 mm->cached_hole_size = ~0UL;
22358 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
22359 len, pgoff, flags);
22360@@ -386,6 +392,7 @@ fail:
22361 /*
22362 * Restore the topdown base:
22363 */
22364+ mm->mmap_base = base;
22365 mm->free_area_cache = base;
22366 mm->cached_hole_size = ~0UL;
22367
22368@@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
22369 struct hstate *h = hstate_file(file);
22370 struct mm_struct *mm = current->mm;
22371 struct vm_area_struct *vma;
22372+ unsigned long pax_task_size = TASK_SIZE;
22373
22374 if (len & ~huge_page_mask(h))
22375 return -EINVAL;
22376- if (len > TASK_SIZE)
22377+
22378+#ifdef CONFIG_PAX_SEGMEXEC
22379+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22380+ pax_task_size = SEGMEXEC_TASK_SIZE;
22381+#endif
22382+
22383+ pax_task_size -= PAGE_SIZE;
22384+
22385+ if (len > pax_task_size)
22386 return -ENOMEM;
22387
22388 if (flags & MAP_FIXED) {
22389@@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
22390 if (addr) {
22391 addr = ALIGN(addr, huge_page_size(h));
22392 vma = find_vma(mm, addr);
22393- if (TASK_SIZE - len >= addr &&
22394- (!vma || addr + len <= vma->vm_start))
22395+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
22396 return addr;
22397 }
22398 if (mm->get_unmapped_area == arch_get_unmapped_area)
22399diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
22400index 87488b9..a55509f 100644
22401--- a/arch/x86/mm/init.c
22402+++ b/arch/x86/mm/init.c
22403@@ -15,6 +15,7 @@
22404 #include <asm/tlbflush.h>
22405 #include <asm/tlb.h>
22406 #include <asm/proto.h>
22407+#include <asm/desc.h>
22408
22409 unsigned long __initdata pgt_buf_start;
22410 unsigned long __meminitdata pgt_buf_end;
22411@@ -31,7 +32,7 @@ int direct_gbpages
22412 static void __init find_early_table_space(unsigned long end, int use_pse,
22413 int use_gbpages)
22414 {
22415- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
22416+ unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
22417 phys_addr_t base;
22418
22419 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
22420@@ -312,8 +313,29 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
22421 */
22422 int devmem_is_allowed(unsigned long pagenr)
22423 {
22424+#ifdef CONFIG_GRKERNSEC_KMEM
22425+ /* allow BDA */
22426+ if (!pagenr)
22427+ return 1;
22428+ /* allow EBDA */
22429+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
22430+ return 1;
22431+#else
22432+ if (!pagenr)
22433+ return 1;
22434+#ifdef CONFIG_VM86
22435+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
22436+ return 1;
22437+#endif
22438+#endif
22439+
22440+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
22441+ return 1;
22442+#ifdef CONFIG_GRKERNSEC_KMEM
22443+ /* throw out everything else below 1MB */
22444 if (pagenr <= 256)
22445- return 1;
22446+ return 0;
22447+#endif
22448 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
22449 return 0;
22450 if (!page_is_ram(pagenr))
22451@@ -372,6 +394,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
22452
22453 void free_initmem(void)
22454 {
22455+
22456+#ifdef CONFIG_PAX_KERNEXEC
22457+#ifdef CONFIG_X86_32
22458+ /* PaX: limit KERNEL_CS to actual size */
22459+ unsigned long addr, limit;
22460+ struct desc_struct d;
22461+ int cpu;
22462+
22463+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
22464+ limit = (limit - 1UL) >> PAGE_SHIFT;
22465+
22466+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
22467+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
22468+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
22469+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
22470+ }
22471+
22472+ /* PaX: make KERNEL_CS read-only */
22473+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
22474+ if (!paravirt_enabled())
22475+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
22476+/*
22477+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
22478+ pgd = pgd_offset_k(addr);
22479+ pud = pud_offset(pgd, addr);
22480+ pmd = pmd_offset(pud, addr);
22481+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22482+ }
22483+*/
22484+#ifdef CONFIG_X86_PAE
22485+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
22486+/*
22487+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
22488+ pgd = pgd_offset_k(addr);
22489+ pud = pud_offset(pgd, addr);
22490+ pmd = pmd_offset(pud, addr);
22491+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22492+ }
22493+*/
22494+#endif
22495+
22496+#ifdef CONFIG_MODULES
22497+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
22498+#endif
22499+
22500+#else
22501+ pgd_t *pgd;
22502+ pud_t *pud;
22503+ pmd_t *pmd;
22504+ unsigned long addr, end;
22505+
22506+ /* PaX: make kernel code/rodata read-only, rest non-executable */
22507+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
22508+ pgd = pgd_offset_k(addr);
22509+ pud = pud_offset(pgd, addr);
22510+ pmd = pmd_offset(pud, addr);
22511+ if (!pmd_present(*pmd))
22512+ continue;
22513+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
22514+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22515+ else
22516+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22517+ }
22518+
22519+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
22520+ end = addr + KERNEL_IMAGE_SIZE;
22521+ for (; addr < end; addr += PMD_SIZE) {
22522+ pgd = pgd_offset_k(addr);
22523+ pud = pud_offset(pgd, addr);
22524+ pmd = pmd_offset(pud, addr);
22525+ if (!pmd_present(*pmd))
22526+ continue;
22527+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
22528+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22529+ }
22530+#endif
22531+
22532+ flush_tlb_all();
22533+#endif
22534+
22535 free_init_pages("unused kernel memory",
22536 (unsigned long)(&__init_begin),
22537 (unsigned long)(&__init_end));
22538diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
22539index 29f7c6d..b46b35b 100644
22540--- a/arch/x86/mm/init_32.c
22541+++ b/arch/x86/mm/init_32.c
22542@@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
22543 }
22544
22545 /*
22546- * Creates a middle page table and puts a pointer to it in the
22547- * given global directory entry. This only returns the gd entry
22548- * in non-PAE compilation mode, since the middle layer is folded.
22549- */
22550-static pmd_t * __init one_md_table_init(pgd_t *pgd)
22551-{
22552- pud_t *pud;
22553- pmd_t *pmd_table;
22554-
22555-#ifdef CONFIG_X86_PAE
22556- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
22557- if (after_bootmem)
22558- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
22559- else
22560- pmd_table = (pmd_t *)alloc_low_page();
22561- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
22562- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
22563- pud = pud_offset(pgd, 0);
22564- BUG_ON(pmd_table != pmd_offset(pud, 0));
22565-
22566- return pmd_table;
22567- }
22568-#endif
22569- pud = pud_offset(pgd, 0);
22570- pmd_table = pmd_offset(pud, 0);
22571-
22572- return pmd_table;
22573-}
22574-
22575-/*
22576 * Create a page table and place a pointer to it in a middle page
22577 * directory entry:
22578 */
22579@@ -123,13 +93,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
22580 page_table = (pte_t *)alloc_low_page();
22581
22582 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
22583+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
22584+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
22585+#else
22586 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
22587+#endif
22588 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
22589 }
22590
22591 return pte_offset_kernel(pmd, 0);
22592 }
22593
22594+static pmd_t * __init one_md_table_init(pgd_t *pgd)
22595+{
22596+ pud_t *pud;
22597+ pmd_t *pmd_table;
22598+
22599+ pud = pud_offset(pgd, 0);
22600+ pmd_table = pmd_offset(pud, 0);
22601+
22602+ return pmd_table;
22603+}
22604+
22605 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
22606 {
22607 int pgd_idx = pgd_index(vaddr);
22608@@ -203,6 +188,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22609 int pgd_idx, pmd_idx;
22610 unsigned long vaddr;
22611 pgd_t *pgd;
22612+ pud_t *pud;
22613 pmd_t *pmd;
22614 pte_t *pte = NULL;
22615
22616@@ -212,8 +198,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22617 pgd = pgd_base + pgd_idx;
22618
22619 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
22620- pmd = one_md_table_init(pgd);
22621- pmd = pmd + pmd_index(vaddr);
22622+ pud = pud_offset(pgd, vaddr);
22623+ pmd = pmd_offset(pud, vaddr);
22624+
22625+#ifdef CONFIG_X86_PAE
22626+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
22627+#endif
22628+
22629 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
22630 pmd++, pmd_idx++) {
22631 pte = page_table_kmap_check(one_page_table_init(pmd),
22632@@ -225,11 +216,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22633 }
22634 }
22635
22636-static inline int is_kernel_text(unsigned long addr)
22637+static inline int is_kernel_text(unsigned long start, unsigned long end)
22638 {
22639- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
22640- return 1;
22641- return 0;
22642+ if ((start > ktla_ktva((unsigned long)_etext) ||
22643+ end <= ktla_ktva((unsigned long)_stext)) &&
22644+ (start > ktla_ktva((unsigned long)_einittext) ||
22645+ end <= ktla_ktva((unsigned long)_sinittext)) &&
22646+
22647+#ifdef CONFIG_ACPI_SLEEP
22648+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
22649+#endif
22650+
22651+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
22652+ return 0;
22653+ return 1;
22654 }
22655
22656 /*
22657@@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned long start,
22658 unsigned long last_map_addr = end;
22659 unsigned long start_pfn, end_pfn;
22660 pgd_t *pgd_base = swapper_pg_dir;
22661- int pgd_idx, pmd_idx, pte_ofs;
22662+ unsigned int pgd_idx, pmd_idx, pte_ofs;
22663 unsigned long pfn;
22664 pgd_t *pgd;
22665+ pud_t *pud;
22666 pmd_t *pmd;
22667 pte_t *pte;
22668 unsigned pages_2m, pages_4k;
22669@@ -281,8 +282,13 @@ repeat:
22670 pfn = start_pfn;
22671 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
22672 pgd = pgd_base + pgd_idx;
22673- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
22674- pmd = one_md_table_init(pgd);
22675+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
22676+ pud = pud_offset(pgd, 0);
22677+ pmd = pmd_offset(pud, 0);
22678+
22679+#ifdef CONFIG_X86_PAE
22680+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
22681+#endif
22682
22683 if (pfn >= end_pfn)
22684 continue;
22685@@ -294,14 +300,13 @@ repeat:
22686 #endif
22687 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
22688 pmd++, pmd_idx++) {
22689- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
22690+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
22691
22692 /*
22693 * Map with big pages if possible, otherwise
22694 * create normal page tables:
22695 */
22696 if (use_pse) {
22697- unsigned int addr2;
22698 pgprot_t prot = PAGE_KERNEL_LARGE;
22699 /*
22700 * first pass will use the same initial
22701@@ -311,11 +316,7 @@ repeat:
22702 __pgprot(PTE_IDENT_ATTR |
22703 _PAGE_PSE);
22704
22705- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
22706- PAGE_OFFSET + PAGE_SIZE-1;
22707-
22708- if (is_kernel_text(addr) ||
22709- is_kernel_text(addr2))
22710+ if (is_kernel_text(address, address + PMD_SIZE))
22711 prot = PAGE_KERNEL_LARGE_EXEC;
22712
22713 pages_2m++;
22714@@ -332,7 +333,7 @@ repeat:
22715 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
22716 pte += pte_ofs;
22717 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
22718- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
22719+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
22720 pgprot_t prot = PAGE_KERNEL;
22721 /*
22722 * first pass will use the same initial
22723@@ -340,7 +341,7 @@ repeat:
22724 */
22725 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
22726
22727- if (is_kernel_text(addr))
22728+ if (is_kernel_text(address, address + PAGE_SIZE))
22729 prot = PAGE_KERNEL_EXEC;
22730
22731 pages_4k++;
22732@@ -472,7 +473,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
22733
22734 pud = pud_offset(pgd, va);
22735 pmd = pmd_offset(pud, va);
22736- if (!pmd_present(*pmd))
22737+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
22738 break;
22739
22740 pte = pte_offset_kernel(pmd, va);
22741@@ -524,12 +525,10 @@ void __init early_ioremap_page_table_range_init(void)
22742
22743 static void __init pagetable_init(void)
22744 {
22745- pgd_t *pgd_base = swapper_pg_dir;
22746-
22747- permanent_kmaps_init(pgd_base);
22748+ permanent_kmaps_init(swapper_pg_dir);
22749 }
22750
22751-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
22752+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
22753 EXPORT_SYMBOL_GPL(__supported_pte_mask);
22754
22755 /* user-defined highmem size */
22756@@ -757,6 +756,12 @@ void __init mem_init(void)
22757
22758 pci_iommu_alloc();
22759
22760+#ifdef CONFIG_PAX_PER_CPU_PGD
22761+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
22762+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22763+ KERNEL_PGD_PTRS);
22764+#endif
22765+
22766 #ifdef CONFIG_FLATMEM
22767 BUG_ON(!mem_map);
22768 #endif
22769@@ -774,7 +779,7 @@ void __init mem_init(void)
22770 set_highmem_pages_init();
22771
22772 codesize = (unsigned long) &_etext - (unsigned long) &_text;
22773- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
22774+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
22775 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
22776
22777 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
22778@@ -815,10 +820,10 @@ void __init mem_init(void)
22779 ((unsigned long)&__init_end -
22780 (unsigned long)&__init_begin) >> 10,
22781
22782- (unsigned long)&_etext, (unsigned long)&_edata,
22783- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
22784+ (unsigned long)&_sdata, (unsigned long)&_edata,
22785+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
22786
22787- (unsigned long)&_text, (unsigned long)&_etext,
22788+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
22789 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
22790
22791 /*
22792@@ -896,6 +901,7 @@ void set_kernel_text_rw(void)
22793 if (!kernel_set_to_readonly)
22794 return;
22795
22796+ start = ktla_ktva(start);
22797 pr_debug("Set kernel text: %lx - %lx for read write\n",
22798 start, start+size);
22799
22800@@ -910,6 +916,7 @@ void set_kernel_text_ro(void)
22801 if (!kernel_set_to_readonly)
22802 return;
22803
22804+ start = ktla_ktva(start);
22805 pr_debug("Set kernel text: %lx - %lx for read only\n",
22806 start, start+size);
22807
22808@@ -938,6 +945,7 @@ void mark_rodata_ro(void)
22809 unsigned long start = PFN_ALIGN(_text);
22810 unsigned long size = PFN_ALIGN(_etext) - start;
22811
22812+ start = ktla_ktva(start);
22813 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
22814 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
22815 size >> 10);
22816diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
22817index bbaaa00..16dffad 100644
22818--- a/arch/x86/mm/init_64.c
22819+++ b/arch/x86/mm/init_64.c
22820@@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpages_on);
22821 * around without checking the pgd every time.
22822 */
22823
22824-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
22825+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
22826 EXPORT_SYMBOL_GPL(__supported_pte_mask);
22827
22828 int force_personality32;
22829@@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
22830
22831 for (address = start; address <= end; address += PGDIR_SIZE) {
22832 const pgd_t *pgd_ref = pgd_offset_k(address);
22833+
22834+#ifdef CONFIG_PAX_PER_CPU_PGD
22835+ unsigned long cpu;
22836+#else
22837 struct page *page;
22838+#endif
22839
22840 if (pgd_none(*pgd_ref))
22841 continue;
22842
22843 spin_lock(&pgd_lock);
22844+
22845+#ifdef CONFIG_PAX_PER_CPU_PGD
22846+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
22847+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
22848+#else
22849 list_for_each_entry(page, &pgd_list, lru) {
22850 pgd_t *pgd;
22851 spinlock_t *pgt_lock;
22852@@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
22853 /* the pgt_lock only for Xen */
22854 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
22855 spin_lock(pgt_lock);
22856+#endif
22857
22858 if (pgd_none(*pgd))
22859 set_pgd(pgd, *pgd_ref);
22860@@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
22861 BUG_ON(pgd_page_vaddr(*pgd)
22862 != pgd_page_vaddr(*pgd_ref));
22863
22864+#ifndef CONFIG_PAX_PER_CPU_PGD
22865 spin_unlock(pgt_lock);
22866+#endif
22867+
22868 }
22869 spin_unlock(&pgd_lock);
22870 }
22871@@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
22872 pmd = fill_pmd(pud, vaddr);
22873 pte = fill_pte(pmd, vaddr);
22874
22875+ pax_open_kernel();
22876 set_pte(pte, new_pte);
22877+ pax_close_kernel();
22878
22879 /*
22880 * It's enough to flush this one mapping.
22881@@ -262,14 +278,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
22882 pgd = pgd_offset_k((unsigned long)__va(phys));
22883 if (pgd_none(*pgd)) {
22884 pud = (pud_t *) spp_getpage();
22885- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
22886- _PAGE_USER));
22887+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
22888 }
22889 pud = pud_offset(pgd, (unsigned long)__va(phys));
22890 if (pud_none(*pud)) {
22891 pmd = (pmd_t *) spp_getpage();
22892- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
22893- _PAGE_USER));
22894+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
22895 }
22896 pmd = pmd_offset(pud, phys);
22897 BUG_ON(!pmd_none(*pmd));
22898@@ -330,7 +344,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
22899 if (pfn >= pgt_buf_top)
22900 panic("alloc_low_page: ran out of memory");
22901
22902- adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
22903+ adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
22904 clear_page(adr);
22905 *phys = pfn * PAGE_SIZE;
22906 return adr;
22907@@ -346,7 +360,7 @@ static __ref void *map_low_page(void *virt)
22908
22909 phys = __pa(virt);
22910 left = phys & (PAGE_SIZE - 1);
22911- adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
22912+ adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
22913 adr = (void *)(((unsigned long)adr) | left);
22914
22915 return adr;
22916@@ -693,6 +707,12 @@ void __init mem_init(void)
22917
22918 pci_iommu_alloc();
22919
22920+#ifdef CONFIG_PAX_PER_CPU_PGD
22921+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
22922+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22923+ KERNEL_PGD_PTRS);
22924+#endif
22925+
22926 /* clear_bss() already clear the empty_zero_page */
22927
22928 reservedpages = 0;
22929@@ -853,8 +873,8 @@ int kern_addr_valid(unsigned long addr)
22930 static struct vm_area_struct gate_vma = {
22931 .vm_start = VSYSCALL_START,
22932 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
22933- .vm_page_prot = PAGE_READONLY_EXEC,
22934- .vm_flags = VM_READ | VM_EXEC
22935+ .vm_page_prot = PAGE_READONLY,
22936+ .vm_flags = VM_READ
22937 };
22938
22939 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
22940@@ -888,7 +908,7 @@ int in_gate_area_no_mm(unsigned long addr)
22941
22942 const char *arch_vma_name(struct vm_area_struct *vma)
22943 {
22944- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
22945+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
22946 return "[vdso]";
22947 if (vma == &gate_vma)
22948 return "[vsyscall]";
22949diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
22950index 7b179b4..6bd1777 100644
22951--- a/arch/x86/mm/iomap_32.c
22952+++ b/arch/x86/mm/iomap_32.c
22953@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
22954 type = kmap_atomic_idx_push();
22955 idx = type + KM_TYPE_NR * smp_processor_id();
22956 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
22957+
22958+ pax_open_kernel();
22959 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
22960+ pax_close_kernel();
22961+
22962 arch_flush_lazy_mmu_mode();
22963
22964 return (void *)vaddr;
22965diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
22966index be1ef57..9680edc 100644
22967--- a/arch/x86/mm/ioremap.c
22968+++ b/arch/x86/mm/ioremap.c
22969@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
22970 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
22971 int is_ram = page_is_ram(pfn);
22972
22973- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
22974+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
22975 return NULL;
22976 WARN_ON_ONCE(is_ram);
22977 }
22978@@ -344,7 +344,7 @@ static int __init early_ioremap_debug_setup(char *str)
22979 early_param("early_ioremap_debug", early_ioremap_debug_setup);
22980
22981 static __initdata int after_paging_init;
22982-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
22983+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
22984
22985 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
22986 {
22987@@ -381,8 +381,7 @@ void __init early_ioremap_init(void)
22988 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
22989
22990 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
22991- memset(bm_pte, 0, sizeof(bm_pte));
22992- pmd_populate_kernel(&init_mm, pmd, bm_pte);
22993+ pmd_populate_user(&init_mm, pmd, bm_pte);
22994
22995 /*
22996 * The boot-ioremap range spans multiple pmds, for which
22997diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
22998index d87dd6d..bf3fa66 100644
22999--- a/arch/x86/mm/kmemcheck/kmemcheck.c
23000+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
23001@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
23002 * memory (e.g. tracked pages)? For now, we need this to avoid
23003 * invoking kmemcheck for PnP BIOS calls.
23004 */
23005- if (regs->flags & X86_VM_MASK)
23006+ if (v8086_mode(regs))
23007 return false;
23008- if (regs->cs != __KERNEL_CS)
23009+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
23010 return false;
23011
23012 pte = kmemcheck_pte_lookup(address);
23013diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
23014index 845df68..1d8d29f 100644
23015--- a/arch/x86/mm/mmap.c
23016+++ b/arch/x86/mm/mmap.c
23017@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
23018 * Leave an at least ~128 MB hole with possible stack randomization.
23019 */
23020 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
23021-#define MAX_GAP (TASK_SIZE/6*5)
23022+#define MAX_GAP (pax_task_size/6*5)
23023
23024 static int mmap_is_legacy(void)
23025 {
23026@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
23027 return rnd << PAGE_SHIFT;
23028 }
23029
23030-static unsigned long mmap_base(void)
23031+static unsigned long mmap_base(struct mm_struct *mm)
23032 {
23033 unsigned long gap = rlimit(RLIMIT_STACK);
23034+ unsigned long pax_task_size = TASK_SIZE;
23035+
23036+#ifdef CONFIG_PAX_SEGMEXEC
23037+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
23038+ pax_task_size = SEGMEXEC_TASK_SIZE;
23039+#endif
23040
23041 if (gap < MIN_GAP)
23042 gap = MIN_GAP;
23043 else if (gap > MAX_GAP)
23044 gap = MAX_GAP;
23045
23046- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
23047+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
23048 }
23049
23050 /*
23051 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
23052 * does, but not when emulating X86_32
23053 */
23054-static unsigned long mmap_legacy_base(void)
23055+static unsigned long mmap_legacy_base(struct mm_struct *mm)
23056 {
23057- if (mmap_is_ia32())
23058+ if (mmap_is_ia32()) {
23059+
23060+#ifdef CONFIG_PAX_SEGMEXEC
23061+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
23062+ return SEGMEXEC_TASK_UNMAPPED_BASE;
23063+ else
23064+#endif
23065+
23066 return TASK_UNMAPPED_BASE;
23067- else
23068+ } else
23069 return TASK_UNMAPPED_BASE + mmap_rnd();
23070 }
23071
23072@@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
23073 void arch_pick_mmap_layout(struct mm_struct *mm)
23074 {
23075 if (mmap_is_legacy()) {
23076- mm->mmap_base = mmap_legacy_base();
23077+ mm->mmap_base = mmap_legacy_base(mm);
23078+
23079+#ifdef CONFIG_PAX_RANDMMAP
23080+ if (mm->pax_flags & MF_PAX_RANDMMAP)
23081+ mm->mmap_base += mm->delta_mmap;
23082+#endif
23083+
23084 mm->get_unmapped_area = arch_get_unmapped_area;
23085 mm->unmap_area = arch_unmap_area;
23086 } else {
23087- mm->mmap_base = mmap_base();
23088+ mm->mmap_base = mmap_base(mm);
23089+
23090+#ifdef CONFIG_PAX_RANDMMAP
23091+ if (mm->pax_flags & MF_PAX_RANDMMAP)
23092+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
23093+#endif
23094+
23095 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
23096 mm->unmap_area = arch_unmap_area_topdown;
23097 }
23098diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
23099index de54b9b..799051e 100644
23100--- a/arch/x86/mm/mmio-mod.c
23101+++ b/arch/x86/mm/mmio-mod.c
23102@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
23103 break;
23104 default:
23105 {
23106- unsigned char *ip = (unsigned char *)instptr;
23107+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
23108 my_trace->opcode = MMIO_UNKNOWN_OP;
23109 my_trace->width = 0;
23110 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
23111@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
23112 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
23113 void __iomem *addr)
23114 {
23115- static atomic_t next_id;
23116+ static atomic_unchecked_t next_id;
23117 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
23118 /* These are page-unaligned. */
23119 struct mmiotrace_map map = {
23120@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
23121 .private = trace
23122 },
23123 .phys = offset,
23124- .id = atomic_inc_return(&next_id)
23125+ .id = atomic_inc_return_unchecked(&next_id)
23126 };
23127 map.map_id = trace->id;
23128
23129diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
23130index b008656..773eac2 100644
23131--- a/arch/x86/mm/pageattr-test.c
23132+++ b/arch/x86/mm/pageattr-test.c
23133@@ -36,7 +36,7 @@ enum {
23134
23135 static int pte_testbit(pte_t pte)
23136 {
23137- return pte_flags(pte) & _PAGE_UNUSED1;
23138+ return pte_flags(pte) & _PAGE_CPA_TEST;
23139 }
23140
23141 struct split_state {
23142diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
23143index f9e5267..6f6e27f 100644
23144--- a/arch/x86/mm/pageattr.c
23145+++ b/arch/x86/mm/pageattr.c
23146@@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23147 */
23148 #ifdef CONFIG_PCI_BIOS
23149 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
23150- pgprot_val(forbidden) |= _PAGE_NX;
23151+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23152 #endif
23153
23154 /*
23155@@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23156 * Does not cover __inittext since that is gone later on. On
23157 * 64bit we do not enforce !NX on the low mapping
23158 */
23159- if (within(address, (unsigned long)_text, (unsigned long)_etext))
23160- pgprot_val(forbidden) |= _PAGE_NX;
23161+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
23162+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23163
23164+#ifdef CONFIG_DEBUG_RODATA
23165 /*
23166 * The .rodata section needs to be read-only. Using the pfn
23167 * catches all aliases.
23168@@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23169 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
23170 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
23171 pgprot_val(forbidden) |= _PAGE_RW;
23172+#endif
23173
23174 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
23175 /*
23176@@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23177 }
23178 #endif
23179
23180+#ifdef CONFIG_PAX_KERNEXEC
23181+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
23182+ pgprot_val(forbidden) |= _PAGE_RW;
23183+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23184+ }
23185+#endif
23186+
23187 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
23188
23189 return prot;
23190@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
23191 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
23192 {
23193 /* change init_mm */
23194+ pax_open_kernel();
23195 set_pte_atomic(kpte, pte);
23196+
23197 #ifdef CONFIG_X86_32
23198 if (!SHARED_KERNEL_PMD) {
23199+
23200+#ifdef CONFIG_PAX_PER_CPU_PGD
23201+ unsigned long cpu;
23202+#else
23203 struct page *page;
23204+#endif
23205
23206+#ifdef CONFIG_PAX_PER_CPU_PGD
23207+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
23208+ pgd_t *pgd = get_cpu_pgd(cpu);
23209+#else
23210 list_for_each_entry(page, &pgd_list, lru) {
23211- pgd_t *pgd;
23212+ pgd_t *pgd = (pgd_t *)page_address(page);
23213+#endif
23214+
23215 pud_t *pud;
23216 pmd_t *pmd;
23217
23218- pgd = (pgd_t *)page_address(page) + pgd_index(address);
23219+ pgd += pgd_index(address);
23220 pud = pud_offset(pgd, address);
23221 pmd = pmd_offset(pud, address);
23222 set_pte_atomic((pte_t *)pmd, pte);
23223 }
23224 }
23225 #endif
23226+ pax_close_kernel();
23227 }
23228
23229 static int
23230diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
23231index f6ff57b..481690f 100644
23232--- a/arch/x86/mm/pat.c
23233+++ b/arch/x86/mm/pat.c
23234@@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
23235
23236 if (!entry) {
23237 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
23238- current->comm, current->pid, start, end);
23239+ current->comm, task_pid_nr(current), start, end);
23240 return -EINVAL;
23241 }
23242
23243@@ -492,8 +492,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23244 while (cursor < to) {
23245 if (!devmem_is_allowed(pfn)) {
23246 printk(KERN_INFO
23247- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
23248- current->comm, from, to);
23249+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
23250+ current->comm, from, to, cursor);
23251 return 0;
23252 }
23253 cursor += PAGE_SIZE;
23254@@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
23255 printk(KERN_INFO
23256 "%s:%d ioremap_change_attr failed %s "
23257 "for %Lx-%Lx\n",
23258- current->comm, current->pid,
23259+ current->comm, task_pid_nr(current),
23260 cattr_name(flags),
23261 base, (unsigned long long)(base + size));
23262 return -EINVAL;
23263@@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
23264 if (want_flags != flags) {
23265 printk(KERN_WARNING
23266 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
23267- current->comm, current->pid,
23268+ current->comm, task_pid_nr(current),
23269 cattr_name(want_flags),
23270 (unsigned long long)paddr,
23271 (unsigned long long)(paddr + size),
23272@@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
23273 free_memtype(paddr, paddr + size);
23274 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
23275 " for %Lx-%Lx, got %s\n",
23276- current->comm, current->pid,
23277+ current->comm, task_pid_nr(current),
23278 cattr_name(want_flags),
23279 (unsigned long long)paddr,
23280 (unsigned long long)(paddr + size),
23281diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
23282index 9f0614d..92ae64a 100644
23283--- a/arch/x86/mm/pf_in.c
23284+++ b/arch/x86/mm/pf_in.c
23285@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
23286 int i;
23287 enum reason_type rv = OTHERS;
23288
23289- p = (unsigned char *)ins_addr;
23290+ p = (unsigned char *)ktla_ktva(ins_addr);
23291 p += skip_prefix(p, &prf);
23292 p += get_opcode(p, &opcode);
23293
23294@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
23295 struct prefix_bits prf;
23296 int i;
23297
23298- p = (unsigned char *)ins_addr;
23299+ p = (unsigned char *)ktla_ktva(ins_addr);
23300 p += skip_prefix(p, &prf);
23301 p += get_opcode(p, &opcode);
23302
23303@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
23304 struct prefix_bits prf;
23305 int i;
23306
23307- p = (unsigned char *)ins_addr;
23308+ p = (unsigned char *)ktla_ktva(ins_addr);
23309 p += skip_prefix(p, &prf);
23310 p += get_opcode(p, &opcode);
23311
23312@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
23313 struct prefix_bits prf;
23314 int i;
23315
23316- p = (unsigned char *)ins_addr;
23317+ p = (unsigned char *)ktla_ktva(ins_addr);
23318 p += skip_prefix(p, &prf);
23319 p += get_opcode(p, &opcode);
23320 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
23321@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
23322 struct prefix_bits prf;
23323 int i;
23324
23325- p = (unsigned char *)ins_addr;
23326+ p = (unsigned char *)ktla_ktva(ins_addr);
23327 p += skip_prefix(p, &prf);
23328 p += get_opcode(p, &opcode);
23329 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
23330diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
23331index 8573b83..6372501 100644
23332--- a/arch/x86/mm/pgtable.c
23333+++ b/arch/x86/mm/pgtable.c
23334@@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *pgd)
23335 list_del(&page->lru);
23336 }
23337
23338-#define UNSHARED_PTRS_PER_PGD \
23339- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
23340+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23341+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
23342
23343+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
23344+{
23345+ while (count--)
23346+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
23347+}
23348+#endif
23349
23350+#ifdef CONFIG_PAX_PER_CPU_PGD
23351+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
23352+{
23353+ while (count--)
23354+
23355+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23356+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
23357+#else
23358+ *dst++ = *src++;
23359+#endif
23360+
23361+}
23362+#endif
23363+
23364+#ifdef CONFIG_X86_64
23365+#define pxd_t pud_t
23366+#define pyd_t pgd_t
23367+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
23368+#define pxd_free(mm, pud) pud_free((mm), (pud))
23369+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
23370+#define pyd_offset(mm ,address) pgd_offset((mm), (address))
23371+#define PYD_SIZE PGDIR_SIZE
23372+#else
23373+#define pxd_t pmd_t
23374+#define pyd_t pud_t
23375+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
23376+#define pxd_free(mm, pud) pmd_free((mm), (pud))
23377+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
23378+#define pyd_offset(mm ,address) pud_offset((mm), (address))
23379+#define PYD_SIZE PUD_SIZE
23380+#endif
23381+
23382+#ifdef CONFIG_PAX_PER_CPU_PGD
23383+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
23384+static inline void pgd_dtor(pgd_t *pgd) {}
23385+#else
23386 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
23387 {
23388 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
23389@@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
23390 pgd_list_del(pgd);
23391 spin_unlock(&pgd_lock);
23392 }
23393+#endif
23394
23395 /*
23396 * List of all pgd's needed for non-PAE so it can invalidate entries
23397@@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
23398 * -- wli
23399 */
23400
23401-#ifdef CONFIG_X86_PAE
23402+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
23403 /*
23404 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
23405 * updating the top-level pagetable entries to guarantee the
23406@@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
23407 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
23408 * and initialize the kernel pmds here.
23409 */
23410-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
23411+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
23412
23413 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
23414 {
23415@@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
23416 */
23417 flush_tlb_mm(mm);
23418 }
23419+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
23420+#define PREALLOCATED_PXDS USER_PGD_PTRS
23421 #else /* !CONFIG_X86_PAE */
23422
23423 /* No need to prepopulate any pagetable entries in non-PAE modes. */
23424-#define PREALLOCATED_PMDS 0
23425+#define PREALLOCATED_PXDS 0
23426
23427 #endif /* CONFIG_X86_PAE */
23428
23429-static void free_pmds(pmd_t *pmds[])
23430+static void free_pxds(pxd_t *pxds[])
23431 {
23432 int i;
23433
23434- for(i = 0; i < PREALLOCATED_PMDS; i++)
23435- if (pmds[i])
23436- free_page((unsigned long)pmds[i]);
23437+ for(i = 0; i < PREALLOCATED_PXDS; i++)
23438+ if (pxds[i])
23439+ free_page((unsigned long)pxds[i]);
23440 }
23441
23442-static int preallocate_pmds(pmd_t *pmds[])
23443+static int preallocate_pxds(pxd_t *pxds[])
23444 {
23445 int i;
23446 bool failed = false;
23447
23448- for(i = 0; i < PREALLOCATED_PMDS; i++) {
23449- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
23450- if (pmd == NULL)
23451+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
23452+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
23453+ if (pxd == NULL)
23454 failed = true;
23455- pmds[i] = pmd;
23456+ pxds[i] = pxd;
23457 }
23458
23459 if (failed) {
23460- free_pmds(pmds);
23461+ free_pxds(pxds);
23462 return -ENOMEM;
23463 }
23464
23465@@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[])
23466 * preallocate which never got a corresponding vma will need to be
23467 * freed manually.
23468 */
23469-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
23470+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
23471 {
23472 int i;
23473
23474- for(i = 0; i < PREALLOCATED_PMDS; i++) {
23475+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
23476 pgd_t pgd = pgdp[i];
23477
23478 if (pgd_val(pgd) != 0) {
23479- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
23480+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
23481
23482- pgdp[i] = native_make_pgd(0);
23483+ set_pgd(pgdp + i, native_make_pgd(0));
23484
23485- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
23486- pmd_free(mm, pmd);
23487+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
23488+ pxd_free(mm, pxd);
23489 }
23490 }
23491 }
23492
23493-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
23494+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
23495 {
23496- pud_t *pud;
23497+ pyd_t *pyd;
23498 unsigned long addr;
23499 int i;
23500
23501- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
23502+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
23503 return;
23504
23505- pud = pud_offset(pgd, 0);
23506+#ifdef CONFIG_X86_64
23507+ pyd = pyd_offset(mm, 0L);
23508+#else
23509+ pyd = pyd_offset(pgd, 0L);
23510+#endif
23511
23512- for (addr = i = 0; i < PREALLOCATED_PMDS;
23513- i++, pud++, addr += PUD_SIZE) {
23514- pmd_t *pmd = pmds[i];
23515+ for (addr = i = 0; i < PREALLOCATED_PXDS;
23516+ i++, pyd++, addr += PYD_SIZE) {
23517+ pxd_t *pxd = pxds[i];
23518
23519 if (i >= KERNEL_PGD_BOUNDARY)
23520- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
23521- sizeof(pmd_t) * PTRS_PER_PMD);
23522+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
23523+ sizeof(pxd_t) * PTRS_PER_PMD);
23524
23525- pud_populate(mm, pud, pmd);
23526+ pyd_populate(mm, pyd, pxd);
23527 }
23528 }
23529
23530 pgd_t *pgd_alloc(struct mm_struct *mm)
23531 {
23532 pgd_t *pgd;
23533- pmd_t *pmds[PREALLOCATED_PMDS];
23534+ pxd_t *pxds[PREALLOCATED_PXDS];
23535
23536 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
23537
23538@@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
23539
23540 mm->pgd = pgd;
23541
23542- if (preallocate_pmds(pmds) != 0)
23543+ if (preallocate_pxds(pxds) != 0)
23544 goto out_free_pgd;
23545
23546 if (paravirt_pgd_alloc(mm) != 0)
23547- goto out_free_pmds;
23548+ goto out_free_pxds;
23549
23550 /*
23551 * Make sure that pre-populating the pmds is atomic with
23552@@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
23553 spin_lock(&pgd_lock);
23554
23555 pgd_ctor(mm, pgd);
23556- pgd_prepopulate_pmd(mm, pgd, pmds);
23557+ pgd_prepopulate_pxd(mm, pgd, pxds);
23558
23559 spin_unlock(&pgd_lock);
23560
23561 return pgd;
23562
23563-out_free_pmds:
23564- free_pmds(pmds);
23565+out_free_pxds:
23566+ free_pxds(pxds);
23567 out_free_pgd:
23568 free_page((unsigned long)pgd);
23569 out:
23570@@ -295,7 +344,7 @@ out:
23571
23572 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
23573 {
23574- pgd_mop_up_pmds(mm, pgd);
23575+ pgd_mop_up_pxds(mm, pgd);
23576 pgd_dtor(pgd);
23577 paravirt_pgd_free(mm, pgd);
23578 free_page((unsigned long)pgd);
23579diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
23580index cac7184..09a39fa 100644
23581--- a/arch/x86/mm/pgtable_32.c
23582+++ b/arch/x86/mm/pgtable_32.c
23583@@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
23584 return;
23585 }
23586 pte = pte_offset_kernel(pmd, vaddr);
23587+
23588+ pax_open_kernel();
23589 if (pte_val(pteval))
23590 set_pte_at(&init_mm, vaddr, pte, pteval);
23591 else
23592 pte_clear(&init_mm, vaddr, pte);
23593+ pax_close_kernel();
23594
23595 /*
23596 * It's enough to flush this one mapping.
23597diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
23598index 410531d..0f16030 100644
23599--- a/arch/x86/mm/setup_nx.c
23600+++ b/arch/x86/mm/setup_nx.c
23601@@ -5,8 +5,10 @@
23602 #include <asm/pgtable.h>
23603 #include <asm/proto.h>
23604
23605+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23606 static int disable_nx __cpuinitdata;
23607
23608+#ifndef CONFIG_PAX_PAGEEXEC
23609 /*
23610 * noexec = on|off
23611 *
23612@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
23613 return 0;
23614 }
23615 early_param("noexec", noexec_setup);
23616+#endif
23617+
23618+#endif
23619
23620 void __cpuinit x86_configure_nx(void)
23621 {
23622+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23623 if (cpu_has_nx && !disable_nx)
23624 __supported_pte_mask |= _PAGE_NX;
23625 else
23626+#endif
23627 __supported_pte_mask &= ~_PAGE_NX;
23628 }
23629
23630diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
23631index d6c0418..06a0ad5 100644
23632--- a/arch/x86/mm/tlb.c
23633+++ b/arch/x86/mm/tlb.c
23634@@ -65,7 +65,11 @@ void leave_mm(int cpu)
23635 BUG();
23636 cpumask_clear_cpu(cpu,
23637 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
23638+
23639+#ifndef CONFIG_PAX_PER_CPU_PGD
23640 load_cr3(swapper_pg_dir);
23641+#endif
23642+
23643 }
23644 EXPORT_SYMBOL_GPL(leave_mm);
23645
23646diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
23647index 6687022..ceabcfa 100644
23648--- a/arch/x86/net/bpf_jit.S
23649+++ b/arch/x86/net/bpf_jit.S
23650@@ -9,6 +9,7 @@
23651 */
23652 #include <linux/linkage.h>
23653 #include <asm/dwarf2.h>
23654+#include <asm/alternative-asm.h>
23655
23656 /*
23657 * Calling convention :
23658@@ -35,6 +36,7 @@ sk_load_word:
23659 jle bpf_slow_path_word
23660 mov (SKBDATA,%rsi),%eax
23661 bswap %eax /* ntohl() */
23662+ pax_force_retaddr
23663 ret
23664
23665
23666@@ -53,6 +55,7 @@ sk_load_half:
23667 jle bpf_slow_path_half
23668 movzwl (SKBDATA,%rsi),%eax
23669 rol $8,%ax # ntohs()
23670+ pax_force_retaddr
23671 ret
23672
23673 sk_load_byte_ind:
23674@@ -66,6 +69,7 @@ sk_load_byte:
23675 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
23676 jle bpf_slow_path_byte
23677 movzbl (SKBDATA,%rsi),%eax
23678+ pax_force_retaddr
23679 ret
23680
23681 /**
23682@@ -82,6 +86,7 @@ ENTRY(sk_load_byte_msh)
23683 movzbl (SKBDATA,%rsi),%ebx
23684 and $15,%bl
23685 shl $2,%bl
23686+ pax_force_retaddr
23687 ret
23688 CFI_ENDPROC
23689 ENDPROC(sk_load_byte_msh)
23690@@ -91,6 +96,7 @@ bpf_error:
23691 xor %eax,%eax
23692 mov -8(%rbp),%rbx
23693 leaveq
23694+ pax_force_retaddr
23695 ret
23696
23697 /* rsi contains offset and can be scratched */
23698@@ -113,6 +119,7 @@ bpf_slow_path_word:
23699 js bpf_error
23700 mov -12(%rbp),%eax
23701 bswap %eax
23702+ pax_force_retaddr
23703 ret
23704
23705 bpf_slow_path_half:
23706@@ -121,12 +128,14 @@ bpf_slow_path_half:
23707 mov -12(%rbp),%ax
23708 rol $8,%ax
23709 movzwl %ax,%eax
23710+ pax_force_retaddr
23711 ret
23712
23713 bpf_slow_path_byte:
23714 bpf_slow_path_common(1)
23715 js bpf_error
23716 movzbl -12(%rbp),%eax
23717+ pax_force_retaddr
23718 ret
23719
23720 bpf_slow_path_byte_msh:
23721@@ -137,4 +146,5 @@ bpf_slow_path_byte_msh:
23722 and $15,%al
23723 shl $2,%al
23724 xchg %eax,%ebx
23725+ pax_force_retaddr
23726 ret
23727diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
23728index 7b65f75..63097f6 100644
23729--- a/arch/x86/net/bpf_jit_comp.c
23730+++ b/arch/x86/net/bpf_jit_comp.c
23731@@ -117,6 +117,10 @@ static inline void bpf_flush_icache(void *start, void *end)
23732 set_fs(old_fs);
23733 }
23734
23735+struct bpf_jit_work {
23736+ struct work_struct work;
23737+ void *image;
23738+};
23739
23740 void bpf_jit_compile(struct sk_filter *fp)
23741 {
23742@@ -141,6 +145,10 @@ void bpf_jit_compile(struct sk_filter *fp)
23743 if (addrs == NULL)
23744 return;
23745
23746+ fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
23747+ if (!fp->work)
23748+ goto out;
23749+
23750 /* Before first pass, make a rough estimation of addrs[]
23751 * each bpf instruction is translated to less than 64 bytes
23752 */
23753@@ -585,11 +593,12 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
23754 if (image) {
23755 if (unlikely(proglen + ilen > oldproglen)) {
23756 pr_err("bpb_jit_compile fatal error\n");
23757- kfree(addrs);
23758- module_free(NULL, image);
23759- return;
23760+ module_free_exec(NULL, image);
23761+ goto out;
23762 }
23763+ pax_open_kernel();
23764 memcpy(image + proglen, temp, ilen);
23765+ pax_close_kernel();
23766 }
23767 proglen += ilen;
23768 addrs[i] = proglen;
23769@@ -609,7 +618,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
23770 break;
23771 }
23772 if (proglen == oldproglen) {
23773- image = module_alloc(max_t(unsigned int,
23774+ image = module_alloc_exec(max_t(unsigned int,
23775 proglen,
23776 sizeof(struct work_struct)));
23777 if (!image)
23778@@ -631,24 +640,27 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
23779 fp->bpf_func = (void *)image;
23780 }
23781 out:
23782+ kfree(fp->work);
23783 kfree(addrs);
23784 return;
23785 }
23786
23787 static void jit_free_defer(struct work_struct *arg)
23788 {
23789- module_free(NULL, arg);
23790+ module_free_exec(NULL, ((struct bpf_jit_work*)arg)->image);
23791+ kfree(arg);
23792 }
23793
23794 /* run from softirq, we must use a work_struct to call
23795- * module_free() from process context
23796+ * module_free_exec() from process context
23797 */
23798 void bpf_jit_free(struct sk_filter *fp)
23799 {
23800 if (fp->bpf_func != sk_run_filter) {
23801- struct work_struct *work = (struct work_struct *)fp->bpf_func;
23802+ struct work_struct *work = &fp->work->work;
23803
23804 INIT_WORK(work, jit_free_defer);
23805+ fp->work->image = fp->bpf_func;
23806 schedule_work(work);
23807 }
23808 }
23809diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
23810index bff89df..377758a 100644
23811--- a/arch/x86/oprofile/backtrace.c
23812+++ b/arch/x86/oprofile/backtrace.c
23813@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
23814 struct stack_frame_ia32 *fp;
23815 unsigned long bytes;
23816
23817- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
23818+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
23819 if (bytes != sizeof(bufhead))
23820 return NULL;
23821
23822- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
23823+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
23824
23825 oprofile_add_trace(bufhead[0].return_address);
23826
23827@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
23828 struct stack_frame bufhead[2];
23829 unsigned long bytes;
23830
23831- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
23832+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
23833 if (bytes != sizeof(bufhead))
23834 return NULL;
23835
23836@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
23837 {
23838 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
23839
23840- if (!user_mode_vm(regs)) {
23841+ if (!user_mode(regs)) {
23842 unsigned long stack = kernel_stack_pointer(regs);
23843 if (depth)
23844 dump_trace(NULL, regs, (unsigned long *)stack, 0,
23845diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
23846index cb29191..036766d 100644
23847--- a/arch/x86/pci/mrst.c
23848+++ b/arch/x86/pci/mrst.c
23849@@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
23850 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
23851 pci_mmcfg_late_init();
23852 pcibios_enable_irq = mrst_pci_irq_enable;
23853- pci_root_ops = pci_mrst_ops;
23854+ pax_open_kernel();
23855+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
23856+ pax_close_kernel();
23857 /* Continue with standard init */
23858 return 1;
23859 }
23860diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
23861index db0e9a5..8844dea 100644
23862--- a/arch/x86/pci/pcbios.c
23863+++ b/arch/x86/pci/pcbios.c
23864@@ -79,50 +79,93 @@ union bios32 {
23865 static struct {
23866 unsigned long address;
23867 unsigned short segment;
23868-} bios32_indirect = { 0, __KERNEL_CS };
23869+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
23870
23871 /*
23872 * Returns the entry point for the given service, NULL on error
23873 */
23874
23875-static unsigned long bios32_service(unsigned long service)
23876+static unsigned long __devinit bios32_service(unsigned long service)
23877 {
23878 unsigned char return_code; /* %al */
23879 unsigned long address; /* %ebx */
23880 unsigned long length; /* %ecx */
23881 unsigned long entry; /* %edx */
23882 unsigned long flags;
23883+ struct desc_struct d, *gdt;
23884
23885 local_irq_save(flags);
23886- __asm__("lcall *(%%edi); cld"
23887+
23888+ gdt = get_cpu_gdt_table(smp_processor_id());
23889+
23890+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
23891+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23892+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
23893+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23894+
23895+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
23896 : "=a" (return_code),
23897 "=b" (address),
23898 "=c" (length),
23899 "=d" (entry)
23900 : "0" (service),
23901 "1" (0),
23902- "D" (&bios32_indirect));
23903+ "D" (&bios32_indirect),
23904+ "r"(__PCIBIOS_DS)
23905+ : "memory");
23906+
23907+ pax_open_kernel();
23908+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
23909+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
23910+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
23911+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
23912+ pax_close_kernel();
23913+
23914 local_irq_restore(flags);
23915
23916 switch (return_code) {
23917- case 0:
23918- return address + entry;
23919- case 0x80: /* Not present */
23920- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23921- return 0;
23922- default: /* Shouldn't happen */
23923- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23924- service, return_code);
23925+ case 0: {
23926+ int cpu;
23927+ unsigned char flags;
23928+
23929+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
23930+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
23931+ printk(KERN_WARNING "bios32_service: not valid\n");
23932 return 0;
23933+ }
23934+ address = address + PAGE_OFFSET;
23935+ length += 16UL; /* some BIOSs underreport this... */
23936+ flags = 4;
23937+ if (length >= 64*1024*1024) {
23938+ length >>= PAGE_SHIFT;
23939+ flags |= 8;
23940+ }
23941+
23942+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
23943+ gdt = get_cpu_gdt_table(cpu);
23944+ pack_descriptor(&d, address, length, 0x9b, flags);
23945+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23946+ pack_descriptor(&d, address, length, 0x93, flags);
23947+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23948+ }
23949+ return entry;
23950+ }
23951+ case 0x80: /* Not present */
23952+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23953+ return 0;
23954+ default: /* Shouldn't happen */
23955+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23956+ service, return_code);
23957+ return 0;
23958 }
23959 }
23960
23961 static struct {
23962 unsigned long address;
23963 unsigned short segment;
23964-} pci_indirect = { 0, __KERNEL_CS };
23965+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
23966
23967-static int pci_bios_present;
23968+static int pci_bios_present __read_only;
23969
23970 static int __devinit check_pcibios(void)
23971 {
23972@@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
23973 unsigned long flags, pcibios_entry;
23974
23975 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
23976- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
23977+ pci_indirect.address = pcibios_entry;
23978
23979 local_irq_save(flags);
23980- __asm__(
23981- "lcall *(%%edi); cld\n\t"
23982+ __asm__("movw %w6, %%ds\n\t"
23983+ "lcall *%%ss:(%%edi); cld\n\t"
23984+ "push %%ss\n\t"
23985+ "pop %%ds\n\t"
23986 "jc 1f\n\t"
23987 "xor %%ah, %%ah\n"
23988 "1:"
23989@@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
23990 "=b" (ebx),
23991 "=c" (ecx)
23992 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
23993- "D" (&pci_indirect)
23994+ "D" (&pci_indirect),
23995+ "r" (__PCIBIOS_DS)
23996 : "memory");
23997 local_irq_restore(flags);
23998
23999@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24000
24001 switch (len) {
24002 case 1:
24003- __asm__("lcall *(%%esi); cld\n\t"
24004+ __asm__("movw %w6, %%ds\n\t"
24005+ "lcall *%%ss:(%%esi); cld\n\t"
24006+ "push %%ss\n\t"
24007+ "pop %%ds\n\t"
24008 "jc 1f\n\t"
24009 "xor %%ah, %%ah\n"
24010 "1:"
24011@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24012 : "1" (PCIBIOS_READ_CONFIG_BYTE),
24013 "b" (bx),
24014 "D" ((long)reg),
24015- "S" (&pci_indirect));
24016+ "S" (&pci_indirect),
24017+ "r" (__PCIBIOS_DS));
24018 /*
24019 * Zero-extend the result beyond 8 bits, do not trust the
24020 * BIOS having done it:
24021@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24022 *value &= 0xff;
24023 break;
24024 case 2:
24025- __asm__("lcall *(%%esi); cld\n\t"
24026+ __asm__("movw %w6, %%ds\n\t"
24027+ "lcall *%%ss:(%%esi); cld\n\t"
24028+ "push %%ss\n\t"
24029+ "pop %%ds\n\t"
24030 "jc 1f\n\t"
24031 "xor %%ah, %%ah\n"
24032 "1:"
24033@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24034 : "1" (PCIBIOS_READ_CONFIG_WORD),
24035 "b" (bx),
24036 "D" ((long)reg),
24037- "S" (&pci_indirect));
24038+ "S" (&pci_indirect),
24039+ "r" (__PCIBIOS_DS));
24040 /*
24041 * Zero-extend the result beyond 16 bits, do not trust the
24042 * BIOS having done it:
24043@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24044 *value &= 0xffff;
24045 break;
24046 case 4:
24047- __asm__("lcall *(%%esi); cld\n\t"
24048+ __asm__("movw %w6, %%ds\n\t"
24049+ "lcall *%%ss:(%%esi); cld\n\t"
24050+ "push %%ss\n\t"
24051+ "pop %%ds\n\t"
24052 "jc 1f\n\t"
24053 "xor %%ah, %%ah\n"
24054 "1:"
24055@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
24056 : "1" (PCIBIOS_READ_CONFIG_DWORD),
24057 "b" (bx),
24058 "D" ((long)reg),
24059- "S" (&pci_indirect));
24060+ "S" (&pci_indirect),
24061+ "r" (__PCIBIOS_DS));
24062 break;
24063 }
24064
24065@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24066
24067 switch (len) {
24068 case 1:
24069- __asm__("lcall *(%%esi); cld\n\t"
24070+ __asm__("movw %w6, %%ds\n\t"
24071+ "lcall *%%ss:(%%esi); cld\n\t"
24072+ "push %%ss\n\t"
24073+ "pop %%ds\n\t"
24074 "jc 1f\n\t"
24075 "xor %%ah, %%ah\n"
24076 "1:"
24077@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24078 "c" (value),
24079 "b" (bx),
24080 "D" ((long)reg),
24081- "S" (&pci_indirect));
24082+ "S" (&pci_indirect),
24083+ "r" (__PCIBIOS_DS));
24084 break;
24085 case 2:
24086- __asm__("lcall *(%%esi); cld\n\t"
24087+ __asm__("movw %w6, %%ds\n\t"
24088+ "lcall *%%ss:(%%esi); cld\n\t"
24089+ "push %%ss\n\t"
24090+ "pop %%ds\n\t"
24091 "jc 1f\n\t"
24092 "xor %%ah, %%ah\n"
24093 "1:"
24094@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24095 "c" (value),
24096 "b" (bx),
24097 "D" ((long)reg),
24098- "S" (&pci_indirect));
24099+ "S" (&pci_indirect),
24100+ "r" (__PCIBIOS_DS));
24101 break;
24102 case 4:
24103- __asm__("lcall *(%%esi); cld\n\t"
24104+ __asm__("movw %w6, %%ds\n\t"
24105+ "lcall *%%ss:(%%esi); cld\n\t"
24106+ "push %%ss\n\t"
24107+ "pop %%ds\n\t"
24108 "jc 1f\n\t"
24109 "xor %%ah, %%ah\n"
24110 "1:"
24111@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24112 "c" (value),
24113 "b" (bx),
24114 "D" ((long)reg),
24115- "S" (&pci_indirect));
24116+ "S" (&pci_indirect),
24117+ "r" (__PCIBIOS_DS));
24118 break;
24119 }
24120
24121@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
24122
24123 DBG("PCI: Fetching IRQ routing table... ");
24124 __asm__("push %%es\n\t"
24125+ "movw %w8, %%ds\n\t"
24126 "push %%ds\n\t"
24127 "pop %%es\n\t"
24128- "lcall *(%%esi); cld\n\t"
24129+ "lcall *%%ss:(%%esi); cld\n\t"
24130 "pop %%es\n\t"
24131+ "push %%ss\n\t"
24132+ "pop %%ds\n"
24133 "jc 1f\n\t"
24134 "xor %%ah, %%ah\n"
24135 "1:"
24136@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
24137 "1" (0),
24138 "D" ((long) &opt),
24139 "S" (&pci_indirect),
24140- "m" (opt)
24141+ "m" (opt),
24142+ "r" (__PCIBIOS_DS)
24143 : "memory");
24144 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
24145 if (ret & 0xff00)
24146@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
24147 {
24148 int ret;
24149
24150- __asm__("lcall *(%%esi); cld\n\t"
24151+ __asm__("movw %w5, %%ds\n\t"
24152+ "lcall *%%ss:(%%esi); cld\n\t"
24153+ "push %%ss\n\t"
24154+ "pop %%ds\n"
24155 "jc 1f\n\t"
24156 "xor %%ah, %%ah\n"
24157 "1:"
24158@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
24159 : "0" (PCIBIOS_SET_PCI_HW_INT),
24160 "b" ((dev->bus->number << 8) | dev->devfn),
24161 "c" ((irq << 8) | (pin + 10)),
24162- "S" (&pci_indirect));
24163+ "S" (&pci_indirect),
24164+ "r" (__PCIBIOS_DS));
24165 return !(ret & 0xff00);
24166 }
24167 EXPORT_SYMBOL(pcibios_set_irq_routing);
24168diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
24169index 40e4469..1ab536e 100644
24170--- a/arch/x86/platform/efi/efi_32.c
24171+++ b/arch/x86/platform/efi/efi_32.c
24172@@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
24173 {
24174 struct desc_ptr gdt_descr;
24175
24176+#ifdef CONFIG_PAX_KERNEXEC
24177+ struct desc_struct d;
24178+#endif
24179+
24180 local_irq_save(efi_rt_eflags);
24181
24182 load_cr3(initial_page_table);
24183 __flush_tlb_all();
24184
24185+#ifdef CONFIG_PAX_KERNEXEC
24186+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
24187+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
24188+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
24189+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
24190+#endif
24191+
24192 gdt_descr.address = __pa(get_cpu_gdt_table(0));
24193 gdt_descr.size = GDT_SIZE - 1;
24194 load_gdt(&gdt_descr);
24195@@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
24196 {
24197 struct desc_ptr gdt_descr;
24198
24199+#ifdef CONFIG_PAX_KERNEXEC
24200+ struct desc_struct d;
24201+
24202+ memset(&d, 0, sizeof d);
24203+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
24204+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
24205+#endif
24206+
24207 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
24208 gdt_descr.size = GDT_SIZE - 1;
24209 load_gdt(&gdt_descr);
24210diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
24211index fbe66e6..c5c0dd2 100644
24212--- a/arch/x86/platform/efi/efi_stub_32.S
24213+++ b/arch/x86/platform/efi/efi_stub_32.S
24214@@ -6,7 +6,9 @@
24215 */
24216
24217 #include <linux/linkage.h>
24218+#include <linux/init.h>
24219 #include <asm/page_types.h>
24220+#include <asm/segment.h>
24221
24222 /*
24223 * efi_call_phys(void *, ...) is a function with variable parameters.
24224@@ -20,7 +22,7 @@
24225 * service functions will comply with gcc calling convention, too.
24226 */
24227
24228-.text
24229+__INIT
24230 ENTRY(efi_call_phys)
24231 /*
24232 * 0. The function can only be called in Linux kernel. So CS has been
24233@@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
24234 * The mapping of lower virtual memory has been created in prelog and
24235 * epilog.
24236 */
24237- movl $1f, %edx
24238- subl $__PAGE_OFFSET, %edx
24239- jmp *%edx
24240+ movl $(__KERNEXEC_EFI_DS), %edx
24241+ mov %edx, %ds
24242+ mov %edx, %es
24243+ mov %edx, %ss
24244+ ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
24245 1:
24246
24247 /*
24248@@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
24249 * parameter 2, ..., param n. To make things easy, we save the return
24250 * address of efi_call_phys in a global variable.
24251 */
24252- popl %edx
24253- movl %edx, saved_return_addr
24254- /* get the function pointer into ECX*/
24255- popl %ecx
24256- movl %ecx, efi_rt_function_ptr
24257- movl $2f, %edx
24258- subl $__PAGE_OFFSET, %edx
24259- pushl %edx
24260+ popl (saved_return_addr)
24261+ popl (efi_rt_function_ptr)
24262
24263 /*
24264 * 3. Clear PG bit in %CR0.
24265@@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
24266 /*
24267 * 5. Call the physical function.
24268 */
24269- jmp *%ecx
24270+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
24271
24272-2:
24273 /*
24274 * 6. After EFI runtime service returns, control will return to
24275 * following instruction. We'd better readjust stack pointer first.
24276@@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
24277 movl %cr0, %edx
24278 orl $0x80000000, %edx
24279 movl %edx, %cr0
24280- jmp 1f
24281-1:
24282+
24283 /*
24284 * 8. Now restore the virtual mode from flat mode by
24285 * adding EIP with PAGE_OFFSET.
24286 */
24287- movl $1f, %edx
24288- jmp *%edx
24289+ ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
24290 1:
24291+ movl $(__KERNEL_DS), %edx
24292+ mov %edx, %ds
24293+ mov %edx, %es
24294+ mov %edx, %ss
24295
24296 /*
24297 * 9. Balance the stack. And because EAX contain the return value,
24298 * we'd better not clobber it.
24299 */
24300- leal efi_rt_function_ptr, %edx
24301- movl (%edx), %ecx
24302- pushl %ecx
24303+ pushl (efi_rt_function_ptr)
24304
24305 /*
24306- * 10. Push the saved return address onto the stack and return.
24307+ * 10. Return to the saved return address.
24308 */
24309- leal saved_return_addr, %edx
24310- movl (%edx), %ecx
24311- pushl %ecx
24312- ret
24313+ jmpl *(saved_return_addr)
24314 ENDPROC(efi_call_phys)
24315 .previous
24316
24317-.data
24318+__INITDATA
24319 saved_return_addr:
24320 .long 0
24321 efi_rt_function_ptr:
24322diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
24323index 4c07cca..2c8427d 100644
24324--- a/arch/x86/platform/efi/efi_stub_64.S
24325+++ b/arch/x86/platform/efi/efi_stub_64.S
24326@@ -7,6 +7,7 @@
24327 */
24328
24329 #include <linux/linkage.h>
24330+#include <asm/alternative-asm.h>
24331
24332 #define SAVE_XMM \
24333 mov %rsp, %rax; \
24334@@ -40,6 +41,7 @@ ENTRY(efi_call0)
24335 call *%rdi
24336 addq $32, %rsp
24337 RESTORE_XMM
24338+ pax_force_retaddr 0, 1
24339 ret
24340 ENDPROC(efi_call0)
24341
24342@@ -50,6 +52,7 @@ ENTRY(efi_call1)
24343 call *%rdi
24344 addq $32, %rsp
24345 RESTORE_XMM
24346+ pax_force_retaddr 0, 1
24347 ret
24348 ENDPROC(efi_call1)
24349
24350@@ -60,6 +63,7 @@ ENTRY(efi_call2)
24351 call *%rdi
24352 addq $32, %rsp
24353 RESTORE_XMM
24354+ pax_force_retaddr 0, 1
24355 ret
24356 ENDPROC(efi_call2)
24357
24358@@ -71,6 +75,7 @@ ENTRY(efi_call3)
24359 call *%rdi
24360 addq $32, %rsp
24361 RESTORE_XMM
24362+ pax_force_retaddr 0, 1
24363 ret
24364 ENDPROC(efi_call3)
24365
24366@@ -83,6 +88,7 @@ ENTRY(efi_call4)
24367 call *%rdi
24368 addq $32, %rsp
24369 RESTORE_XMM
24370+ pax_force_retaddr 0, 1
24371 ret
24372 ENDPROC(efi_call4)
24373
24374@@ -96,6 +102,7 @@ ENTRY(efi_call5)
24375 call *%rdi
24376 addq $48, %rsp
24377 RESTORE_XMM
24378+ pax_force_retaddr 0, 1
24379 ret
24380 ENDPROC(efi_call5)
24381
24382@@ -112,5 +119,6 @@ ENTRY(efi_call6)
24383 call *%rdi
24384 addq $48, %rsp
24385 RESTORE_XMM
24386+ pax_force_retaddr 0, 1
24387 ret
24388 ENDPROC(efi_call6)
24389diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
24390index ad4ec1c..686479e 100644
24391--- a/arch/x86/platform/mrst/mrst.c
24392+++ b/arch/x86/platform/mrst/mrst.c
24393@@ -76,18 +76,20 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
24394 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
24395 int sfi_mrtc_num;
24396
24397-static void mrst_power_off(void)
24398+static __noreturn void mrst_power_off(void)
24399 {
24400 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
24401 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 1);
24402+ BUG();
24403 }
24404
24405-static void mrst_reboot(void)
24406+static __noreturn void mrst_reboot(void)
24407 {
24408 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
24409 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
24410 else
24411 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
24412+ BUG();
24413 }
24414
24415 /* parse all the mtimer info to a static mtimer array */
24416diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
24417index f10c0af..3ec1f95 100644
24418--- a/arch/x86/power/cpu.c
24419+++ b/arch/x86/power/cpu.c
24420@@ -131,7 +131,7 @@ static void do_fpu_end(void)
24421 static void fix_processor_context(void)
24422 {
24423 int cpu = smp_processor_id();
24424- struct tss_struct *t = &per_cpu(init_tss, cpu);
24425+ struct tss_struct *t = init_tss + cpu;
24426
24427 set_tss_desc(cpu, t); /*
24428 * This just modifies memory; should not be
24429@@ -141,7 +141,9 @@ static void fix_processor_context(void)
24430 */
24431
24432 #ifdef CONFIG_X86_64
24433+ pax_open_kernel();
24434 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
24435+ pax_close_kernel();
24436
24437 syscall_init(); /* This sets MSR_*STAR and related */
24438 #endif
24439diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
24440index 5d17950..2253fc9 100644
24441--- a/arch/x86/vdso/Makefile
24442+++ b/arch/x86/vdso/Makefile
24443@@ -137,7 +137,7 @@ quiet_cmd_vdso = VDSO $@
24444 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
24445 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
24446
24447-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
24448+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
24449 GCOV_PROFILE := n
24450
24451 #
24452diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
24453index 468d591..8e80a0a 100644
24454--- a/arch/x86/vdso/vdso32-setup.c
24455+++ b/arch/x86/vdso/vdso32-setup.c
24456@@ -25,6 +25,7 @@
24457 #include <asm/tlbflush.h>
24458 #include <asm/vdso.h>
24459 #include <asm/proto.h>
24460+#include <asm/mman.h>
24461
24462 enum {
24463 VDSO_DISABLED = 0,
24464@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
24465 void enable_sep_cpu(void)
24466 {
24467 int cpu = get_cpu();
24468- struct tss_struct *tss = &per_cpu(init_tss, cpu);
24469+ struct tss_struct *tss = init_tss + cpu;
24470
24471 if (!boot_cpu_has(X86_FEATURE_SEP)) {
24472 put_cpu();
24473@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
24474 gate_vma.vm_start = FIXADDR_USER_START;
24475 gate_vma.vm_end = FIXADDR_USER_END;
24476 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
24477- gate_vma.vm_page_prot = __P101;
24478+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
24479 /*
24480 * Make sure the vDSO gets into every core dump.
24481 * Dumping its contents makes post-mortem fully interpretable later
24482@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24483 if (compat)
24484 addr = VDSO_HIGH_BASE;
24485 else {
24486- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
24487+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
24488 if (IS_ERR_VALUE(addr)) {
24489 ret = addr;
24490 goto up_fail;
24491 }
24492 }
24493
24494- current->mm->context.vdso = (void *)addr;
24495+ current->mm->context.vdso = addr;
24496
24497 if (compat_uses_vma || !compat) {
24498 /*
24499@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24500 }
24501
24502 current_thread_info()->sysenter_return =
24503- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
24504+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
24505
24506 up_fail:
24507 if (ret)
24508- current->mm->context.vdso = NULL;
24509+ current->mm->context.vdso = 0;
24510
24511 up_write(&mm->mmap_sem);
24512
24513@@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
24514
24515 const char *arch_vma_name(struct vm_area_struct *vma)
24516 {
24517- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
24518+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
24519 return "[vdso]";
24520+
24521+#ifdef CONFIG_PAX_SEGMEXEC
24522+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
24523+ return "[vdso]";
24524+#endif
24525+
24526 return NULL;
24527 }
24528
24529@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
24530 * Check to see if the corresponding task was created in compat vdso
24531 * mode.
24532 */
24533- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
24534+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
24535 return &gate_vma;
24536 return NULL;
24537 }
24538diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
24539index 153407c..611cba9 100644
24540--- a/arch/x86/vdso/vma.c
24541+++ b/arch/x86/vdso/vma.c
24542@@ -16,8 +16,6 @@
24543 #include <asm/vdso.h>
24544 #include <asm/page.h>
24545
24546-unsigned int __read_mostly vdso_enabled = 1;
24547-
24548 extern char vdso_start[], vdso_end[];
24549 extern unsigned short vdso_sync_cpuid;
24550
24551@@ -96,7 +94,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
24552 * unaligned here as a result of stack start randomization.
24553 */
24554 addr = PAGE_ALIGN(addr);
24555- addr = align_addr(addr, NULL, ALIGN_VDSO);
24556
24557 return addr;
24558 }
24559@@ -106,40 +103,35 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
24560 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24561 {
24562 struct mm_struct *mm = current->mm;
24563- unsigned long addr;
24564+ unsigned long addr = 0;
24565 int ret;
24566
24567- if (!vdso_enabled)
24568- return 0;
24569-
24570 down_write(&mm->mmap_sem);
24571+
24572+#ifdef CONFIG_PAX_RANDMMAP
24573+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
24574+#endif
24575+
24576 addr = vdso_addr(mm->start_stack, vdso_size);
24577+ addr = align_addr(addr, NULL, ALIGN_VDSO);
24578 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
24579 if (IS_ERR_VALUE(addr)) {
24580 ret = addr;
24581 goto up_fail;
24582 }
24583
24584- current->mm->context.vdso = (void *)addr;
24585+ mm->context.vdso = addr;
24586
24587 ret = install_special_mapping(mm, addr, vdso_size,
24588 VM_READ|VM_EXEC|
24589 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
24590 VM_ALWAYSDUMP,
24591 vdso_pages);
24592- if (ret) {
24593- current->mm->context.vdso = NULL;
24594- goto up_fail;
24595- }
24596+
24597+ if (ret)
24598+ mm->context.vdso = 0;
24599
24600 up_fail:
24601 up_write(&mm->mmap_sem);
24602 return ret;
24603 }
24604-
24605-static __init int vdso_setup(char *s)
24606-{
24607- vdso_enabled = simple_strtoul(s, NULL, 0);
24608- return 0;
24609-}
24610-__setup("vdso=", vdso_setup);
24611diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
24612index 1f92865..c843b20 100644
24613--- a/arch/x86/xen/enlighten.c
24614+++ b/arch/x86/xen/enlighten.c
24615@@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
24616
24617 struct shared_info xen_dummy_shared_info;
24618
24619-void *xen_initial_gdt;
24620-
24621 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
24622 __read_mostly int xen_have_vector_callback;
24623 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
24624@@ -1029,7 +1027,7 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
24625 #endif
24626 };
24627
24628-static void xen_reboot(int reason)
24629+static __noreturn void xen_reboot(int reason)
24630 {
24631 struct sched_shutdown r = { .reason = reason };
24632
24633@@ -1037,17 +1035,17 @@ static void xen_reboot(int reason)
24634 BUG();
24635 }
24636
24637-static void xen_restart(char *msg)
24638+static __noreturn void xen_restart(char *msg)
24639 {
24640 xen_reboot(SHUTDOWN_reboot);
24641 }
24642
24643-static void xen_emergency_restart(void)
24644+static __noreturn void xen_emergency_restart(void)
24645 {
24646 xen_reboot(SHUTDOWN_reboot);
24647 }
24648
24649-static void xen_machine_halt(void)
24650+static __noreturn void xen_machine_halt(void)
24651 {
24652 xen_reboot(SHUTDOWN_poweroff);
24653 }
24654@@ -1153,7 +1151,17 @@ asmlinkage void __init xen_start_kernel(void)
24655 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
24656
24657 /* Work out if we support NX */
24658- x86_configure_nx();
24659+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
24660+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
24661+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
24662+ unsigned l, h;
24663+
24664+ __supported_pte_mask |= _PAGE_NX;
24665+ rdmsr(MSR_EFER, l, h);
24666+ l |= EFER_NX;
24667+ wrmsr(MSR_EFER, l, h);
24668+ }
24669+#endif
24670
24671 xen_setup_features();
24672
24673@@ -1184,13 +1192,6 @@ asmlinkage void __init xen_start_kernel(void)
24674
24675 machine_ops = xen_machine_ops;
24676
24677- /*
24678- * The only reliable way to retain the initial address of the
24679- * percpu gdt_page is to remember it here, so we can go and
24680- * mark it RW later, when the initial percpu area is freed.
24681- */
24682- xen_initial_gdt = &per_cpu(gdt_page, 0);
24683-
24684 xen_smp_init();
24685
24686 #ifdef CONFIG_ACPI_NUMA
24687diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
24688index 87f6673..e2555a6 100644
24689--- a/arch/x86/xen/mmu.c
24690+++ b/arch/x86/xen/mmu.c
24691@@ -1733,6 +1733,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
24692 convert_pfn_mfn(init_level4_pgt);
24693 convert_pfn_mfn(level3_ident_pgt);
24694 convert_pfn_mfn(level3_kernel_pgt);
24695+ convert_pfn_mfn(level3_vmalloc_start_pgt);
24696+ convert_pfn_mfn(level3_vmalloc_end_pgt);
24697+ convert_pfn_mfn(level3_vmemmap_pgt);
24698
24699 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
24700 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
24701@@ -1751,7 +1754,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
24702 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
24703 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
24704 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
24705+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
24706+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
24707+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
24708 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
24709+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
24710 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
24711 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
24712
24713@@ -1962,6 +1969,7 @@ static void __init xen_post_allocator_init(void)
24714 pv_mmu_ops.set_pud = xen_set_pud;
24715 #if PAGETABLE_LEVELS == 4
24716 pv_mmu_ops.set_pgd = xen_set_pgd;
24717+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
24718 #endif
24719
24720 /* This will work as long as patching hasn't happened yet
24721@@ -2043,6 +2051,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
24722 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
24723 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
24724 .set_pgd = xen_set_pgd_hyper,
24725+ .set_pgd_batched = xen_set_pgd_hyper,
24726
24727 .alloc_pud = xen_alloc_pmd_init,
24728 .release_pud = xen_release_pmd_init,
24729diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
24730index 041d4fe..7666b7e 100644
24731--- a/arch/x86/xen/smp.c
24732+++ b/arch/x86/xen/smp.c
24733@@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
24734 {
24735 BUG_ON(smp_processor_id() != 0);
24736 native_smp_prepare_boot_cpu();
24737-
24738- /* We've switched to the "real" per-cpu gdt, so make sure the
24739- old memory can be recycled */
24740- make_lowmem_page_readwrite(xen_initial_gdt);
24741-
24742 xen_filter_cpu_maps();
24743 xen_setup_vcpu_info_placement();
24744 }
24745@@ -275,12 +270,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
24746 gdt = get_cpu_gdt_table(cpu);
24747
24748 ctxt->flags = VGCF_IN_KERNEL;
24749- ctxt->user_regs.ds = __USER_DS;
24750- ctxt->user_regs.es = __USER_DS;
24751+ ctxt->user_regs.ds = __KERNEL_DS;
24752+ ctxt->user_regs.es = __KERNEL_DS;
24753 ctxt->user_regs.ss = __KERNEL_DS;
24754 #ifdef CONFIG_X86_32
24755 ctxt->user_regs.fs = __KERNEL_PERCPU;
24756- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
24757+ savesegment(gs, ctxt->user_regs.gs);
24758 #else
24759 ctxt->gs_base_kernel = per_cpu_offset(cpu);
24760 #endif
24761@@ -331,13 +326,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
24762 int rc;
24763
24764 per_cpu(current_task, cpu) = idle;
24765+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
24766 #ifdef CONFIG_X86_32
24767 irq_ctx_init(cpu);
24768 #else
24769 clear_tsk_thread_flag(idle, TIF_FORK);
24770- per_cpu(kernel_stack, cpu) =
24771- (unsigned long)task_stack_page(idle) -
24772- KERNEL_STACK_OFFSET + THREAD_SIZE;
24773+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
24774 #endif
24775 xen_setup_runstate_info(cpu);
24776 xen_setup_timer(cpu);
24777diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
24778index b040b0e..8cc4fe0 100644
24779--- a/arch/x86/xen/xen-asm_32.S
24780+++ b/arch/x86/xen/xen-asm_32.S
24781@@ -83,14 +83,14 @@ ENTRY(xen_iret)
24782 ESP_OFFSET=4 # bytes pushed onto stack
24783
24784 /*
24785- * Store vcpu_info pointer for easy access. Do it this way to
24786- * avoid having to reload %fs
24787+ * Store vcpu_info pointer for easy access.
24788 */
24789 #ifdef CONFIG_SMP
24790- GET_THREAD_INFO(%eax)
24791- movl TI_cpu(%eax), %eax
24792- movl __per_cpu_offset(,%eax,4), %eax
24793- mov xen_vcpu(%eax), %eax
24794+ push %fs
24795+ mov $(__KERNEL_PERCPU), %eax
24796+ mov %eax, %fs
24797+ mov PER_CPU_VAR(xen_vcpu), %eax
24798+ pop %fs
24799 #else
24800 movl xen_vcpu, %eax
24801 #endif
24802diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
24803index aaa7291..3f77960 100644
24804--- a/arch/x86/xen/xen-head.S
24805+++ b/arch/x86/xen/xen-head.S
24806@@ -19,6 +19,17 @@ ENTRY(startup_xen)
24807 #ifdef CONFIG_X86_32
24808 mov %esi,xen_start_info
24809 mov $init_thread_union+THREAD_SIZE,%esp
24810+#ifdef CONFIG_SMP
24811+ movl $cpu_gdt_table,%edi
24812+ movl $__per_cpu_load,%eax
24813+ movw %ax,__KERNEL_PERCPU + 2(%edi)
24814+ rorl $16,%eax
24815+ movb %al,__KERNEL_PERCPU + 4(%edi)
24816+ movb %ah,__KERNEL_PERCPU + 7(%edi)
24817+ movl $__per_cpu_end - 1,%eax
24818+ subl $__per_cpu_start,%eax
24819+ movw %ax,__KERNEL_PERCPU + 0(%edi)
24820+#endif
24821 #else
24822 mov %rsi,xen_start_info
24823 mov $init_thread_union+THREAD_SIZE,%rsp
24824diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
24825index b095739..8c17bcd 100644
24826--- a/arch/x86/xen/xen-ops.h
24827+++ b/arch/x86/xen/xen-ops.h
24828@@ -10,8 +10,6 @@
24829 extern const char xen_hypervisor_callback[];
24830 extern const char xen_failsafe_callback[];
24831
24832-extern void *xen_initial_gdt;
24833-
24834 struct trap_info;
24835 void xen_copy_trap_info(struct trap_info *traps);
24836
24837diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
24838index 58916af..9cb880b 100644
24839--- a/block/blk-iopoll.c
24840+++ b/block/blk-iopoll.c
24841@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
24842 }
24843 EXPORT_SYMBOL(blk_iopoll_complete);
24844
24845-static void blk_iopoll_softirq(struct softirq_action *h)
24846+static void blk_iopoll_softirq(void)
24847 {
24848 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
24849 int rearm = 0, budget = blk_iopoll_budget;
24850diff --git a/block/blk-map.c b/block/blk-map.c
24851index 623e1cd..ca1e109 100644
24852--- a/block/blk-map.c
24853+++ b/block/blk-map.c
24854@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
24855 if (!len || !kbuf)
24856 return -EINVAL;
24857
24858- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
24859+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
24860 if (do_copy)
24861 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
24862 else
24863diff --git a/block/blk-softirq.c b/block/blk-softirq.c
24864index 1366a89..e17f54b 100644
24865--- a/block/blk-softirq.c
24866+++ b/block/blk-softirq.c
24867@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
24868 * Softirq action handler - move entries to local list and loop over them
24869 * while passing them to the queue registered handler.
24870 */
24871-static void blk_done_softirq(struct softirq_action *h)
24872+static void blk_done_softirq(void)
24873 {
24874 struct list_head *cpu_list, local_list;
24875
24876diff --git a/block/bsg.c b/block/bsg.c
24877index 702f131..37808bf 100644
24878--- a/block/bsg.c
24879+++ b/block/bsg.c
24880@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
24881 struct sg_io_v4 *hdr, struct bsg_device *bd,
24882 fmode_t has_write_perm)
24883 {
24884+ unsigned char tmpcmd[sizeof(rq->__cmd)];
24885+ unsigned char *cmdptr;
24886+
24887 if (hdr->request_len > BLK_MAX_CDB) {
24888 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
24889 if (!rq->cmd)
24890 return -ENOMEM;
24891- }
24892+ cmdptr = rq->cmd;
24893+ } else
24894+ cmdptr = tmpcmd;
24895
24896- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
24897+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
24898 hdr->request_len))
24899 return -EFAULT;
24900
24901+ if (cmdptr != rq->cmd)
24902+ memcpy(rq->cmd, cmdptr, hdr->request_len);
24903+
24904 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
24905 if (blk_verify_command(rq->cmd, has_write_perm))
24906 return -EPERM;
24907diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
24908index 7b72502..646105c 100644
24909--- a/block/compat_ioctl.c
24910+++ b/block/compat_ioctl.c
24911@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
24912 err |= __get_user(f->spec1, &uf->spec1);
24913 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
24914 err |= __get_user(name, &uf->name);
24915- f->name = compat_ptr(name);
24916+ f->name = (void __force_kernel *)compat_ptr(name);
24917 if (err) {
24918 err = -EFAULT;
24919 goto out;
24920diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
24921index 688be8a..8a37d98 100644
24922--- a/block/scsi_ioctl.c
24923+++ b/block/scsi_ioctl.c
24924@@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
24925 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
24926 struct sg_io_hdr *hdr, fmode_t mode)
24927 {
24928- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
24929+ unsigned char tmpcmd[sizeof(rq->__cmd)];
24930+ unsigned char *cmdptr;
24931+
24932+ if (rq->cmd != rq->__cmd)
24933+ cmdptr = rq->cmd;
24934+ else
24935+ cmdptr = tmpcmd;
24936+
24937+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
24938 return -EFAULT;
24939+
24940+ if (cmdptr != rq->cmd)
24941+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
24942+
24943 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
24944 return -EPERM;
24945
24946@@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
24947 int err;
24948 unsigned int in_len, out_len, bytes, opcode, cmdlen;
24949 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
24950+ unsigned char tmpcmd[sizeof(rq->__cmd)];
24951+ unsigned char *cmdptr;
24952
24953 if (!sic)
24954 return -EINVAL;
24955@@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
24956 */
24957 err = -EFAULT;
24958 rq->cmd_len = cmdlen;
24959- if (copy_from_user(rq->cmd, sic->data, cmdlen))
24960+
24961+ if (rq->cmd != rq->__cmd)
24962+ cmdptr = rq->cmd;
24963+ else
24964+ cmdptr = tmpcmd;
24965+
24966+ if (copy_from_user(cmdptr, sic->data, cmdlen))
24967 goto error;
24968
24969+ if (rq->cmd != cmdptr)
24970+ memcpy(rq->cmd, cmdptr, cmdlen);
24971+
24972 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
24973 goto error;
24974
24975diff --git a/crypto/cryptd.c b/crypto/cryptd.c
24976index 671d4d6..5f24030 100644
24977--- a/crypto/cryptd.c
24978+++ b/crypto/cryptd.c
24979@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
24980
24981 struct cryptd_blkcipher_request_ctx {
24982 crypto_completion_t complete;
24983-};
24984+} __no_const;
24985
24986 struct cryptd_hash_ctx {
24987 struct crypto_shash *child;
24988@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
24989
24990 struct cryptd_aead_request_ctx {
24991 crypto_completion_t complete;
24992-};
24993+} __no_const;
24994
24995 static void cryptd_queue_worker(struct work_struct *work);
24996
24997diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
24998index 9ed9f60..88f160b 100644
24999--- a/crypto/sha512_generic.c
25000+++ b/crypto/sha512_generic.c
25001@@ -21,8 +21,6 @@
25002 #include <linux/percpu.h>
25003 #include <asm/byteorder.h>
25004
25005-static DEFINE_PER_CPU(u64[80], msg_schedule);
25006-
25007 static inline u64 Ch(u64 x, u64 y, u64 z)
25008 {
25009 return z ^ (x & (y ^ z));
25010@@ -80,7 +78,7 @@ static inline void LOAD_OP(int I, u64 *W, const u8 *input)
25011
25012 static inline void BLEND_OP(int I, u64 *W)
25013 {
25014- W[I] = s1(W[I-2]) + W[I-7] + s0(W[I-15]) + W[I-16];
25015+ W[I % 16] += s1(W[(I-2) % 16]) + W[(I-7) % 16] + s0(W[(I-15) % 16]);
25016 }
25017
25018 static void
25019@@ -89,38 +87,48 @@ sha512_transform(u64 *state, const u8 *input)
25020 u64 a, b, c, d, e, f, g, h, t1, t2;
25021
25022 int i;
25023- u64 *W = get_cpu_var(msg_schedule);
25024+ u64 W[16];
25025
25026 /* load the input */
25027 for (i = 0; i < 16; i++)
25028 LOAD_OP(i, W, input);
25029
25030- for (i = 16; i < 80; i++) {
25031- BLEND_OP(i, W);
25032- }
25033-
25034 /* load the state into our registers */
25035 a=state[0]; b=state[1]; c=state[2]; d=state[3];
25036 e=state[4]; f=state[5]; g=state[6]; h=state[7];
25037
25038- /* now iterate */
25039- for (i=0; i<80; i+=8) {
25040- t1 = h + e1(e) + Ch(e,f,g) + sha512_K[i ] + W[i ];
25041- t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2;
25042- t1 = g + e1(d) + Ch(d,e,f) + sha512_K[i+1] + W[i+1];
25043- t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2;
25044- t1 = f + e1(c) + Ch(c,d,e) + sha512_K[i+2] + W[i+2];
25045- t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2;
25046- t1 = e + e1(b) + Ch(b,c,d) + sha512_K[i+3] + W[i+3];
25047- t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2;
25048- t1 = d + e1(a) + Ch(a,b,c) + sha512_K[i+4] + W[i+4];
25049- t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2;
25050- t1 = c + e1(h) + Ch(h,a,b) + sha512_K[i+5] + W[i+5];
25051- t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2;
25052- t1 = b + e1(g) + Ch(g,h,a) + sha512_K[i+6] + W[i+6];
25053- t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2;
25054- t1 = a + e1(f) + Ch(f,g,h) + sha512_K[i+7] + W[i+7];
25055- t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2;
25056+#define SHA512_0_15(i, a, b, c, d, e, f, g, h) \
25057+ t1 = h + e1(e) + Ch(e, f, g) + sha512_K[i] + W[i]; \
25058+ t2 = e0(a) + Maj(a, b, c); \
25059+ d += t1; \
25060+ h = t1 + t2
25061+
25062+#define SHA512_16_79(i, a, b, c, d, e, f, g, h) \
25063+ BLEND_OP(i, W); \
25064+ t1 = h + e1(e) + Ch(e, f, g) + sha512_K[i] + W[(i)%16]; \
25065+ t2 = e0(a) + Maj(a, b, c); \
25066+ d += t1; \
25067+ h = t1 + t2
25068+
25069+ for (i = 0; i < 16; i += 8) {
25070+ SHA512_0_15(i, a, b, c, d, e, f, g, h);
25071+ SHA512_0_15(i + 1, h, a, b, c, d, e, f, g);
25072+ SHA512_0_15(i + 2, g, h, a, b, c, d, e, f);
25073+ SHA512_0_15(i + 3, f, g, h, a, b, c, d, e);
25074+ SHA512_0_15(i + 4, e, f, g, h, a, b, c, d);
25075+ SHA512_0_15(i + 5, d, e, f, g, h, a, b, c);
25076+ SHA512_0_15(i + 6, c, d, e, f, g, h, a, b);
25077+ SHA512_0_15(i + 7, b, c, d, e, f, g, h, a);
25078+ }
25079+ for (i = 16; i < 80; i += 8) {
25080+ SHA512_16_79(i, a, b, c, d, e, f, g, h);
25081+ SHA512_16_79(i + 1, h, a, b, c, d, e, f, g);
25082+ SHA512_16_79(i + 2, g, h, a, b, c, d, e, f);
25083+ SHA512_16_79(i + 3, f, g, h, a, b, c, d, e);
25084+ SHA512_16_79(i + 4, e, f, g, h, a, b, c, d);
25085+ SHA512_16_79(i + 5, d, e, f, g, h, a, b, c);
25086+ SHA512_16_79(i + 6, c, d, e, f, g, h, a, b);
25087+ SHA512_16_79(i + 7, b, c, d, e, f, g, h, a);
25088 }
25089
25090 state[0] += a; state[1] += b; state[2] += c; state[3] += d;
25091@@ -128,8 +136,6 @@ sha512_transform(u64 *state, const u8 *input)
25092
25093 /* erase our data */
25094 a = b = c = d = e = f = g = h = t1 = t2 = 0;
25095- memset(W, 0, sizeof(__get_cpu_var(msg_schedule)));
25096- put_cpu_var(msg_schedule);
25097 }
25098
25099 static int
25100diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
25101index 5d41894..22021e4 100644
25102--- a/drivers/acpi/apei/cper.c
25103+++ b/drivers/acpi/apei/cper.c
25104@@ -38,12 +38,12 @@
25105 */
25106 u64 cper_next_record_id(void)
25107 {
25108- static atomic64_t seq;
25109+ static atomic64_unchecked_t seq;
25110
25111- if (!atomic64_read(&seq))
25112- atomic64_set(&seq, ((u64)get_seconds()) << 32);
25113+ if (!atomic64_read_unchecked(&seq))
25114+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
25115
25116- return atomic64_inc_return(&seq);
25117+ return atomic64_inc_return_unchecked(&seq);
25118 }
25119 EXPORT_SYMBOL_GPL(cper_next_record_id);
25120
25121diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
25122index 6c47ae9..8ab9132 100644
25123--- a/drivers/acpi/ec_sys.c
25124+++ b/drivers/acpi/ec_sys.c
25125@@ -12,6 +12,7 @@
25126 #include <linux/acpi.h>
25127 #include <linux/debugfs.h>
25128 #include <linux/module.h>
25129+#include <asm/uaccess.h>
25130 #include "internal.h"
25131
25132 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
25133@@ -40,7 +41,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
25134 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
25135 */
25136 unsigned int size = EC_SPACE_SIZE;
25137- u8 *data = (u8 *) buf;
25138+ u8 data;
25139 loff_t init_off = *off;
25140 int err = 0;
25141
25142@@ -53,9 +54,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
25143 size = count;
25144
25145 while (size) {
25146- err = ec_read(*off, &data[*off - init_off]);
25147+ err = ec_read(*off, &data);
25148 if (err)
25149 return err;
25150+ if (put_user(data, &buf[*off - init_off]))
25151+ return -EFAULT;
25152 *off += 1;
25153 size--;
25154 }
25155@@ -71,7 +74,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
25156
25157 unsigned int size = count;
25158 loff_t init_off = *off;
25159- u8 *data = (u8 *) buf;
25160 int err = 0;
25161
25162 if (*off >= EC_SPACE_SIZE)
25163@@ -82,7 +84,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
25164 }
25165
25166 while (size) {
25167- u8 byte_write = data[*off - init_off];
25168+ u8 byte_write;
25169+ if (get_user(byte_write, &buf[*off - init_off]))
25170+ return -EFAULT;
25171 err = ec_write(*off, byte_write);
25172 if (err)
25173 return err;
25174diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
25175index 251c7b62..000462d 100644
25176--- a/drivers/acpi/proc.c
25177+++ b/drivers/acpi/proc.c
25178@@ -343,19 +343,13 @@ acpi_system_write_wakeup_device(struct file *file,
25179 size_t count, loff_t * ppos)
25180 {
25181 struct list_head *node, *next;
25182- char strbuf[5];
25183- char str[5] = "";
25184- unsigned int len = count;
25185+ char strbuf[5] = {0};
25186
25187- if (len > 4)
25188- len = 4;
25189- if (len < 0)
25190+ if (count > 4)
25191+ count = 4;
25192+ if (copy_from_user(strbuf, buffer, count))
25193 return -EFAULT;
25194-
25195- if (copy_from_user(strbuf, buffer, len))
25196- return -EFAULT;
25197- strbuf[len] = '\0';
25198- sscanf(strbuf, "%s", str);
25199+ strbuf[count] = '\0';
25200
25201 mutex_lock(&acpi_device_lock);
25202 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
25203@@ -364,7 +358,7 @@ acpi_system_write_wakeup_device(struct file *file,
25204 if (!dev->wakeup.flags.valid)
25205 continue;
25206
25207- if (!strncmp(dev->pnp.bus_id, str, 4)) {
25208+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
25209 if (device_can_wakeup(&dev->dev)) {
25210 bool enable = !device_may_wakeup(&dev->dev);
25211 device_set_wakeup_enable(&dev->dev, enable);
25212diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
25213index 9d7bc9f..a6fc091 100644
25214--- a/drivers/acpi/processor_driver.c
25215+++ b/drivers/acpi/processor_driver.c
25216@@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
25217 return 0;
25218 #endif
25219
25220- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
25221+ BUG_ON(pr->id >= nr_cpu_ids);
25222
25223 /*
25224 * Buggy BIOS check
25225diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
25226index c04ad68..0b99473 100644
25227--- a/drivers/ata/libata-core.c
25228+++ b/drivers/ata/libata-core.c
25229@@ -4733,7 +4733,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
25230 struct ata_port *ap;
25231 unsigned int tag;
25232
25233- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25234+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25235 ap = qc->ap;
25236
25237 qc->flags = 0;
25238@@ -4749,7 +4749,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
25239 struct ata_port *ap;
25240 struct ata_link *link;
25241
25242- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25243+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25244 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
25245 ap = qc->ap;
25246 link = qc->dev->link;
25247@@ -5754,6 +5754,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
25248 return;
25249
25250 spin_lock(&lock);
25251+ pax_open_kernel();
25252
25253 for (cur = ops->inherits; cur; cur = cur->inherits) {
25254 void **inherit = (void **)cur;
25255@@ -5767,8 +5768,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
25256 if (IS_ERR(*pp))
25257 *pp = NULL;
25258
25259- ops->inherits = NULL;
25260+ *(struct ata_port_operations **)&ops->inherits = NULL;
25261
25262+ pax_close_kernel();
25263 spin_unlock(&lock);
25264 }
25265
25266diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
25267index e8574bb..f9f6a72 100644
25268--- a/drivers/ata/pata_arasan_cf.c
25269+++ b/drivers/ata/pata_arasan_cf.c
25270@@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
25271 /* Handle platform specific quirks */
25272 if (pdata->quirk) {
25273 if (pdata->quirk & CF_BROKEN_PIO) {
25274- ap->ops->set_piomode = NULL;
25275+ pax_open_kernel();
25276+ *(void **)&ap->ops->set_piomode = NULL;
25277+ pax_close_kernel();
25278 ap->pio_mask = 0;
25279 }
25280 if (pdata->quirk & CF_BROKEN_MWDMA)
25281diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
25282index f9b983a..887b9d8 100644
25283--- a/drivers/atm/adummy.c
25284+++ b/drivers/atm/adummy.c
25285@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
25286 vcc->pop(vcc, skb);
25287 else
25288 dev_kfree_skb_any(skb);
25289- atomic_inc(&vcc->stats->tx);
25290+ atomic_inc_unchecked(&vcc->stats->tx);
25291
25292 return 0;
25293 }
25294diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
25295index f8f41e0..1f987dd 100644
25296--- a/drivers/atm/ambassador.c
25297+++ b/drivers/atm/ambassador.c
25298@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
25299 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
25300
25301 // VC layer stats
25302- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25303+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25304
25305 // free the descriptor
25306 kfree (tx_descr);
25307@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
25308 dump_skb ("<<<", vc, skb);
25309
25310 // VC layer stats
25311- atomic_inc(&atm_vcc->stats->rx);
25312+ atomic_inc_unchecked(&atm_vcc->stats->rx);
25313 __net_timestamp(skb);
25314 // end of our responsibility
25315 atm_vcc->push (atm_vcc, skb);
25316@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
25317 } else {
25318 PRINTK (KERN_INFO, "dropped over-size frame");
25319 // should we count this?
25320- atomic_inc(&atm_vcc->stats->rx_drop);
25321+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25322 }
25323
25324 } else {
25325@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
25326 }
25327
25328 if (check_area (skb->data, skb->len)) {
25329- atomic_inc(&atm_vcc->stats->tx_err);
25330+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
25331 return -ENOMEM; // ?
25332 }
25333
25334diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
25335index b22d71c..d6e1049 100644
25336--- a/drivers/atm/atmtcp.c
25337+++ b/drivers/atm/atmtcp.c
25338@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25339 if (vcc->pop) vcc->pop(vcc,skb);
25340 else dev_kfree_skb(skb);
25341 if (dev_data) return 0;
25342- atomic_inc(&vcc->stats->tx_err);
25343+ atomic_inc_unchecked(&vcc->stats->tx_err);
25344 return -ENOLINK;
25345 }
25346 size = skb->len+sizeof(struct atmtcp_hdr);
25347@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25348 if (!new_skb) {
25349 if (vcc->pop) vcc->pop(vcc,skb);
25350 else dev_kfree_skb(skb);
25351- atomic_inc(&vcc->stats->tx_err);
25352+ atomic_inc_unchecked(&vcc->stats->tx_err);
25353 return -ENOBUFS;
25354 }
25355 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
25356@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25357 if (vcc->pop) vcc->pop(vcc,skb);
25358 else dev_kfree_skb(skb);
25359 out_vcc->push(out_vcc,new_skb);
25360- atomic_inc(&vcc->stats->tx);
25361- atomic_inc(&out_vcc->stats->rx);
25362+ atomic_inc_unchecked(&vcc->stats->tx);
25363+ atomic_inc_unchecked(&out_vcc->stats->rx);
25364 return 0;
25365 }
25366
25367@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
25368 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
25369 read_unlock(&vcc_sklist_lock);
25370 if (!out_vcc) {
25371- atomic_inc(&vcc->stats->tx_err);
25372+ atomic_inc_unchecked(&vcc->stats->tx_err);
25373 goto done;
25374 }
25375 skb_pull(skb,sizeof(struct atmtcp_hdr));
25376@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
25377 __net_timestamp(new_skb);
25378 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
25379 out_vcc->push(out_vcc,new_skb);
25380- atomic_inc(&vcc->stats->tx);
25381- atomic_inc(&out_vcc->stats->rx);
25382+ atomic_inc_unchecked(&vcc->stats->tx);
25383+ atomic_inc_unchecked(&out_vcc->stats->rx);
25384 done:
25385 if (vcc->pop) vcc->pop(vcc,skb);
25386 else dev_kfree_skb(skb);
25387diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
25388index 956e9ac..133516d 100644
25389--- a/drivers/atm/eni.c
25390+++ b/drivers/atm/eni.c
25391@@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
25392 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
25393 vcc->dev->number);
25394 length = 0;
25395- atomic_inc(&vcc->stats->rx_err);
25396+ atomic_inc_unchecked(&vcc->stats->rx_err);
25397 }
25398 else {
25399 length = ATM_CELL_SIZE-1; /* no HEC */
25400@@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25401 size);
25402 }
25403 eff = length = 0;
25404- atomic_inc(&vcc->stats->rx_err);
25405+ atomic_inc_unchecked(&vcc->stats->rx_err);
25406 }
25407 else {
25408 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
25409@@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25410 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
25411 vcc->dev->number,vcc->vci,length,size << 2,descr);
25412 length = eff = 0;
25413- atomic_inc(&vcc->stats->rx_err);
25414+ atomic_inc_unchecked(&vcc->stats->rx_err);
25415 }
25416 }
25417 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
25418@@ -771,7 +771,7 @@ rx_dequeued++;
25419 vcc->push(vcc,skb);
25420 pushed++;
25421 }
25422- atomic_inc(&vcc->stats->rx);
25423+ atomic_inc_unchecked(&vcc->stats->rx);
25424 }
25425 wake_up(&eni_dev->rx_wait);
25426 }
25427@@ -1229,7 +1229,7 @@ static void dequeue_tx(struct atm_dev *dev)
25428 PCI_DMA_TODEVICE);
25429 if (vcc->pop) vcc->pop(vcc,skb);
25430 else dev_kfree_skb_irq(skb);
25431- atomic_inc(&vcc->stats->tx);
25432+ atomic_inc_unchecked(&vcc->stats->tx);
25433 wake_up(&eni_dev->tx_wait);
25434 dma_complete++;
25435 }
25436@@ -1569,7 +1569,7 @@ tx_complete++;
25437 /*--------------------------------- entries ---------------------------------*/
25438
25439
25440-static const char *media_name[] __devinitdata = {
25441+static const char *media_name[] __devinitconst = {
25442 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
25443 "UTP", "05?", "06?", "07?", /* 4- 7 */
25444 "TAXI","09?", "10?", "11?", /* 8-11 */
25445diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
25446index 5072f8a..fa52520d 100644
25447--- a/drivers/atm/firestream.c
25448+++ b/drivers/atm/firestream.c
25449@@ -750,7 +750,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
25450 }
25451 }
25452
25453- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25454+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25455
25456 fs_dprintk (FS_DEBUG_TXMEM, "i");
25457 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
25458@@ -817,7 +817,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
25459 #endif
25460 skb_put (skb, qe->p1 & 0xffff);
25461 ATM_SKB(skb)->vcc = atm_vcc;
25462- atomic_inc(&atm_vcc->stats->rx);
25463+ atomic_inc_unchecked(&atm_vcc->stats->rx);
25464 __net_timestamp(skb);
25465 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
25466 atm_vcc->push (atm_vcc, skb);
25467@@ -838,12 +838,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
25468 kfree (pe);
25469 }
25470 if (atm_vcc)
25471- atomic_inc(&atm_vcc->stats->rx_drop);
25472+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25473 break;
25474 case 0x1f: /* Reassembly abort: no buffers. */
25475 /* Silently increment error counter. */
25476 if (atm_vcc)
25477- atomic_inc(&atm_vcc->stats->rx_drop);
25478+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25479 break;
25480 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
25481 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
25482diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
25483index 361f5ae..7fc552d 100644
25484--- a/drivers/atm/fore200e.c
25485+++ b/drivers/atm/fore200e.c
25486@@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
25487 #endif
25488 /* check error condition */
25489 if (*entry->status & STATUS_ERROR)
25490- atomic_inc(&vcc->stats->tx_err);
25491+ atomic_inc_unchecked(&vcc->stats->tx_err);
25492 else
25493- atomic_inc(&vcc->stats->tx);
25494+ atomic_inc_unchecked(&vcc->stats->tx);
25495 }
25496 }
25497
25498@@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
25499 if (skb == NULL) {
25500 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
25501
25502- atomic_inc(&vcc->stats->rx_drop);
25503+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25504 return -ENOMEM;
25505 }
25506
25507@@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
25508
25509 dev_kfree_skb_any(skb);
25510
25511- atomic_inc(&vcc->stats->rx_drop);
25512+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25513 return -ENOMEM;
25514 }
25515
25516 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25517
25518 vcc->push(vcc, skb);
25519- atomic_inc(&vcc->stats->rx);
25520+ atomic_inc_unchecked(&vcc->stats->rx);
25521
25522 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25523
25524@@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
25525 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
25526 fore200e->atm_dev->number,
25527 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
25528- atomic_inc(&vcc->stats->rx_err);
25529+ atomic_inc_unchecked(&vcc->stats->rx_err);
25530 }
25531 }
25532
25533@@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
25534 goto retry_here;
25535 }
25536
25537- atomic_inc(&vcc->stats->tx_err);
25538+ atomic_inc_unchecked(&vcc->stats->tx_err);
25539
25540 fore200e->tx_sat++;
25541 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
25542diff --git a/drivers/atm/he.c b/drivers/atm/he.c
25543index 9a51df4..f3bb5f8 100644
25544--- a/drivers/atm/he.c
25545+++ b/drivers/atm/he.c
25546@@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25547
25548 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
25549 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
25550- atomic_inc(&vcc->stats->rx_drop);
25551+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25552 goto return_host_buffers;
25553 }
25554
25555@@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25556 RBRQ_LEN_ERR(he_dev->rbrq_head)
25557 ? "LEN_ERR" : "",
25558 vcc->vpi, vcc->vci);
25559- atomic_inc(&vcc->stats->rx_err);
25560+ atomic_inc_unchecked(&vcc->stats->rx_err);
25561 goto return_host_buffers;
25562 }
25563
25564@@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25565 vcc->push(vcc, skb);
25566 spin_lock(&he_dev->global_lock);
25567
25568- atomic_inc(&vcc->stats->rx);
25569+ atomic_inc_unchecked(&vcc->stats->rx);
25570
25571 return_host_buffers:
25572 ++pdus_assembled;
25573@@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
25574 tpd->vcc->pop(tpd->vcc, tpd->skb);
25575 else
25576 dev_kfree_skb_any(tpd->skb);
25577- atomic_inc(&tpd->vcc->stats->tx_err);
25578+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
25579 }
25580 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
25581 return;
25582@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25583 vcc->pop(vcc, skb);
25584 else
25585 dev_kfree_skb_any(skb);
25586- atomic_inc(&vcc->stats->tx_err);
25587+ atomic_inc_unchecked(&vcc->stats->tx_err);
25588 return -EINVAL;
25589 }
25590
25591@@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25592 vcc->pop(vcc, skb);
25593 else
25594 dev_kfree_skb_any(skb);
25595- atomic_inc(&vcc->stats->tx_err);
25596+ atomic_inc_unchecked(&vcc->stats->tx_err);
25597 return -EINVAL;
25598 }
25599 #endif
25600@@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25601 vcc->pop(vcc, skb);
25602 else
25603 dev_kfree_skb_any(skb);
25604- atomic_inc(&vcc->stats->tx_err);
25605+ atomic_inc_unchecked(&vcc->stats->tx_err);
25606 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25607 return -ENOMEM;
25608 }
25609@@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25610 vcc->pop(vcc, skb);
25611 else
25612 dev_kfree_skb_any(skb);
25613- atomic_inc(&vcc->stats->tx_err);
25614+ atomic_inc_unchecked(&vcc->stats->tx_err);
25615 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25616 return -ENOMEM;
25617 }
25618@@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25619 __enqueue_tpd(he_dev, tpd, cid);
25620 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25621
25622- atomic_inc(&vcc->stats->tx);
25623+ atomic_inc_unchecked(&vcc->stats->tx);
25624
25625 return 0;
25626 }
25627diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
25628index b812103..e391a49 100644
25629--- a/drivers/atm/horizon.c
25630+++ b/drivers/atm/horizon.c
25631@@ -1035,7 +1035,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
25632 {
25633 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
25634 // VC layer stats
25635- atomic_inc(&vcc->stats->rx);
25636+ atomic_inc_unchecked(&vcc->stats->rx);
25637 __net_timestamp(skb);
25638 // end of our responsibility
25639 vcc->push (vcc, skb);
25640@@ -1187,7 +1187,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
25641 dev->tx_iovec = NULL;
25642
25643 // VC layer stats
25644- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25645+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25646
25647 // free the skb
25648 hrz_kfree_skb (skb);
25649diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
25650index 1c05212..c28e200 100644
25651--- a/drivers/atm/idt77252.c
25652+++ b/drivers/atm/idt77252.c
25653@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
25654 else
25655 dev_kfree_skb(skb);
25656
25657- atomic_inc(&vcc->stats->tx);
25658+ atomic_inc_unchecked(&vcc->stats->tx);
25659 }
25660
25661 atomic_dec(&scq->used);
25662@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25663 if ((sb = dev_alloc_skb(64)) == NULL) {
25664 printk("%s: Can't allocate buffers for aal0.\n",
25665 card->name);
25666- atomic_add(i, &vcc->stats->rx_drop);
25667+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
25668 break;
25669 }
25670 if (!atm_charge(vcc, sb->truesize)) {
25671 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
25672 card->name);
25673- atomic_add(i - 1, &vcc->stats->rx_drop);
25674+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
25675 dev_kfree_skb(sb);
25676 break;
25677 }
25678@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25679 ATM_SKB(sb)->vcc = vcc;
25680 __net_timestamp(sb);
25681 vcc->push(vcc, sb);
25682- atomic_inc(&vcc->stats->rx);
25683+ atomic_inc_unchecked(&vcc->stats->rx);
25684
25685 cell += ATM_CELL_PAYLOAD;
25686 }
25687@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25688 "(CDC: %08x)\n",
25689 card->name, len, rpp->len, readl(SAR_REG_CDC));
25690 recycle_rx_pool_skb(card, rpp);
25691- atomic_inc(&vcc->stats->rx_err);
25692+ atomic_inc_unchecked(&vcc->stats->rx_err);
25693 return;
25694 }
25695 if (stat & SAR_RSQE_CRC) {
25696 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
25697 recycle_rx_pool_skb(card, rpp);
25698- atomic_inc(&vcc->stats->rx_err);
25699+ atomic_inc_unchecked(&vcc->stats->rx_err);
25700 return;
25701 }
25702 if (skb_queue_len(&rpp->queue) > 1) {
25703@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25704 RXPRINTK("%s: Can't alloc RX skb.\n",
25705 card->name);
25706 recycle_rx_pool_skb(card, rpp);
25707- atomic_inc(&vcc->stats->rx_err);
25708+ atomic_inc_unchecked(&vcc->stats->rx_err);
25709 return;
25710 }
25711 if (!atm_charge(vcc, skb->truesize)) {
25712@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25713 __net_timestamp(skb);
25714
25715 vcc->push(vcc, skb);
25716- atomic_inc(&vcc->stats->rx);
25717+ atomic_inc_unchecked(&vcc->stats->rx);
25718
25719 return;
25720 }
25721@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25722 __net_timestamp(skb);
25723
25724 vcc->push(vcc, skb);
25725- atomic_inc(&vcc->stats->rx);
25726+ atomic_inc_unchecked(&vcc->stats->rx);
25727
25728 if (skb->truesize > SAR_FB_SIZE_3)
25729 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
25730@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
25731 if (vcc->qos.aal != ATM_AAL0) {
25732 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
25733 card->name, vpi, vci);
25734- atomic_inc(&vcc->stats->rx_drop);
25735+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25736 goto drop;
25737 }
25738
25739 if ((sb = dev_alloc_skb(64)) == NULL) {
25740 printk("%s: Can't allocate buffers for AAL0.\n",
25741 card->name);
25742- atomic_inc(&vcc->stats->rx_err);
25743+ atomic_inc_unchecked(&vcc->stats->rx_err);
25744 goto drop;
25745 }
25746
25747@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
25748 ATM_SKB(sb)->vcc = vcc;
25749 __net_timestamp(sb);
25750 vcc->push(vcc, sb);
25751- atomic_inc(&vcc->stats->rx);
25752+ atomic_inc_unchecked(&vcc->stats->rx);
25753
25754 drop:
25755 skb_pull(queue, 64);
25756@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25757
25758 if (vc == NULL) {
25759 printk("%s: NULL connection in send().\n", card->name);
25760- atomic_inc(&vcc->stats->tx_err);
25761+ atomic_inc_unchecked(&vcc->stats->tx_err);
25762 dev_kfree_skb(skb);
25763 return -EINVAL;
25764 }
25765 if (!test_bit(VCF_TX, &vc->flags)) {
25766 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
25767- atomic_inc(&vcc->stats->tx_err);
25768+ atomic_inc_unchecked(&vcc->stats->tx_err);
25769 dev_kfree_skb(skb);
25770 return -EINVAL;
25771 }
25772@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25773 break;
25774 default:
25775 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
25776- atomic_inc(&vcc->stats->tx_err);
25777+ atomic_inc_unchecked(&vcc->stats->tx_err);
25778 dev_kfree_skb(skb);
25779 return -EINVAL;
25780 }
25781
25782 if (skb_shinfo(skb)->nr_frags != 0) {
25783 printk("%s: No scatter-gather yet.\n", card->name);
25784- atomic_inc(&vcc->stats->tx_err);
25785+ atomic_inc_unchecked(&vcc->stats->tx_err);
25786 dev_kfree_skb(skb);
25787 return -EINVAL;
25788 }
25789@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25790
25791 err = queue_skb(card, vc, skb, oam);
25792 if (err) {
25793- atomic_inc(&vcc->stats->tx_err);
25794+ atomic_inc_unchecked(&vcc->stats->tx_err);
25795 dev_kfree_skb(skb);
25796 return err;
25797 }
25798@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
25799 skb = dev_alloc_skb(64);
25800 if (!skb) {
25801 printk("%s: Out of memory in send_oam().\n", card->name);
25802- atomic_inc(&vcc->stats->tx_err);
25803+ atomic_inc_unchecked(&vcc->stats->tx_err);
25804 return -ENOMEM;
25805 }
25806 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
25807diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
25808index 3d0c2b0..45441fa 100644
25809--- a/drivers/atm/iphase.c
25810+++ b/drivers/atm/iphase.c
25811@@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
25812 status = (u_short) (buf_desc_ptr->desc_mode);
25813 if (status & (RX_CER | RX_PTE | RX_OFL))
25814 {
25815- atomic_inc(&vcc->stats->rx_err);
25816+ atomic_inc_unchecked(&vcc->stats->rx_err);
25817 IF_ERR(printk("IA: bad packet, dropping it");)
25818 if (status & RX_CER) {
25819 IF_ERR(printk(" cause: packet CRC error\n");)
25820@@ -1169,7 +1169,7 @@ static int rx_pkt(struct atm_dev *dev)
25821 len = dma_addr - buf_addr;
25822 if (len > iadev->rx_buf_sz) {
25823 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
25824- atomic_inc(&vcc->stats->rx_err);
25825+ atomic_inc_unchecked(&vcc->stats->rx_err);
25826 goto out_free_desc;
25827 }
25828
25829@@ -1319,7 +1319,7 @@ static void rx_dle_intr(struct atm_dev *dev)
25830 ia_vcc = INPH_IA_VCC(vcc);
25831 if (ia_vcc == NULL)
25832 {
25833- atomic_inc(&vcc->stats->rx_err);
25834+ atomic_inc_unchecked(&vcc->stats->rx_err);
25835 dev_kfree_skb_any(skb);
25836 atm_return(vcc, atm_guess_pdu2truesize(len));
25837 goto INCR_DLE;
25838@@ -1331,7 +1331,7 @@ static void rx_dle_intr(struct atm_dev *dev)
25839 if ((length > iadev->rx_buf_sz) || (length >
25840 (skb->len - sizeof(struct cpcs_trailer))))
25841 {
25842- atomic_inc(&vcc->stats->rx_err);
25843+ atomic_inc_unchecked(&vcc->stats->rx_err);
25844 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
25845 length, skb->len);)
25846 dev_kfree_skb_any(skb);
25847@@ -1347,7 +1347,7 @@ static void rx_dle_intr(struct atm_dev *dev)
25848
25849 IF_RX(printk("rx_dle_intr: skb push");)
25850 vcc->push(vcc,skb);
25851- atomic_inc(&vcc->stats->rx);
25852+ atomic_inc_unchecked(&vcc->stats->rx);
25853 iadev->rx_pkt_cnt++;
25854 }
25855 INCR_DLE:
25856@@ -2827,15 +2827,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
25857 {
25858 struct k_sonet_stats *stats;
25859 stats = &PRIV(_ia_dev[board])->sonet_stats;
25860- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
25861- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
25862- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
25863- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
25864- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
25865- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
25866- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
25867- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
25868- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
25869+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
25870+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
25871+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
25872+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
25873+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
25874+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
25875+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
25876+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
25877+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
25878 }
25879 ia_cmds.status = 0;
25880 break;
25881@@ -2940,7 +2940,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
25882 if ((desc == 0) || (desc > iadev->num_tx_desc))
25883 {
25884 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
25885- atomic_inc(&vcc->stats->tx);
25886+ atomic_inc_unchecked(&vcc->stats->tx);
25887 if (vcc->pop)
25888 vcc->pop(vcc, skb);
25889 else
25890@@ -3045,14 +3045,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
25891 ATM_DESC(skb) = vcc->vci;
25892 skb_queue_tail(&iadev->tx_dma_q, skb);
25893
25894- atomic_inc(&vcc->stats->tx);
25895+ atomic_inc_unchecked(&vcc->stats->tx);
25896 iadev->tx_pkt_cnt++;
25897 /* Increment transaction counter */
25898 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
25899
25900 #if 0
25901 /* add flow control logic */
25902- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
25903+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
25904 if (iavcc->vc_desc_cnt > 10) {
25905 vcc->tx_quota = vcc->tx_quota * 3 / 4;
25906 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
25907diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
25908index f556969..0da15eb 100644
25909--- a/drivers/atm/lanai.c
25910+++ b/drivers/atm/lanai.c
25911@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
25912 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
25913 lanai_endtx(lanai, lvcc);
25914 lanai_free_skb(lvcc->tx.atmvcc, skb);
25915- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
25916+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
25917 }
25918
25919 /* Try to fill the buffer - don't call unless there is backlog */
25920@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
25921 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
25922 __net_timestamp(skb);
25923 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
25924- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
25925+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
25926 out:
25927 lvcc->rx.buf.ptr = end;
25928 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
25929@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25930 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
25931 "vcc %d\n", lanai->number, (unsigned int) s, vci);
25932 lanai->stats.service_rxnotaal5++;
25933- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25934+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25935 return 0;
25936 }
25937 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
25938@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25939 int bytes;
25940 read_unlock(&vcc_sklist_lock);
25941 DPRINTK("got trashed rx pdu on vci %d\n", vci);
25942- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25943+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25944 lvcc->stats.x.aal5.service_trash++;
25945 bytes = (SERVICE_GET_END(s) * 16) -
25946 (((unsigned long) lvcc->rx.buf.ptr) -
25947@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25948 }
25949 if (s & SERVICE_STREAM) {
25950 read_unlock(&vcc_sklist_lock);
25951- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25952+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25953 lvcc->stats.x.aal5.service_stream++;
25954 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
25955 "PDU on VCI %d!\n", lanai->number, vci);
25956@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25957 return 0;
25958 }
25959 DPRINTK("got rx crc error on vci %d\n", vci);
25960- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25961+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25962 lvcc->stats.x.aal5.service_rxcrc++;
25963 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
25964 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
25965diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
25966index 1c70c45..300718d 100644
25967--- a/drivers/atm/nicstar.c
25968+++ b/drivers/atm/nicstar.c
25969@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
25970 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
25971 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
25972 card->index);
25973- atomic_inc(&vcc->stats->tx_err);
25974+ atomic_inc_unchecked(&vcc->stats->tx_err);
25975 dev_kfree_skb_any(skb);
25976 return -EINVAL;
25977 }
25978@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
25979 if (!vc->tx) {
25980 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
25981 card->index);
25982- atomic_inc(&vcc->stats->tx_err);
25983+ atomic_inc_unchecked(&vcc->stats->tx_err);
25984 dev_kfree_skb_any(skb);
25985 return -EINVAL;
25986 }
25987@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
25988 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
25989 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
25990 card->index);
25991- atomic_inc(&vcc->stats->tx_err);
25992+ atomic_inc_unchecked(&vcc->stats->tx_err);
25993 dev_kfree_skb_any(skb);
25994 return -EINVAL;
25995 }
25996
25997 if (skb_shinfo(skb)->nr_frags != 0) {
25998 printk("nicstar%d: No scatter-gather yet.\n", card->index);
25999- atomic_inc(&vcc->stats->tx_err);
26000+ atomic_inc_unchecked(&vcc->stats->tx_err);
26001 dev_kfree_skb_any(skb);
26002 return -EINVAL;
26003 }
26004@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
26005 }
26006
26007 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
26008- atomic_inc(&vcc->stats->tx_err);
26009+ atomic_inc_unchecked(&vcc->stats->tx_err);
26010 dev_kfree_skb_any(skb);
26011 return -EIO;
26012 }
26013- atomic_inc(&vcc->stats->tx);
26014+ atomic_inc_unchecked(&vcc->stats->tx);
26015
26016 return 0;
26017 }
26018@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26019 printk
26020 ("nicstar%d: Can't allocate buffers for aal0.\n",
26021 card->index);
26022- atomic_add(i, &vcc->stats->rx_drop);
26023+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
26024 break;
26025 }
26026 if (!atm_charge(vcc, sb->truesize)) {
26027 RXPRINTK
26028 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
26029 card->index);
26030- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
26031+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
26032 dev_kfree_skb_any(sb);
26033 break;
26034 }
26035@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26036 ATM_SKB(sb)->vcc = vcc;
26037 __net_timestamp(sb);
26038 vcc->push(vcc, sb);
26039- atomic_inc(&vcc->stats->rx);
26040+ atomic_inc_unchecked(&vcc->stats->rx);
26041 cell += ATM_CELL_PAYLOAD;
26042 }
26043
26044@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26045 if (iovb == NULL) {
26046 printk("nicstar%d: Out of iovec buffers.\n",
26047 card->index);
26048- atomic_inc(&vcc->stats->rx_drop);
26049+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26050 recycle_rx_buf(card, skb);
26051 return;
26052 }
26053@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26054 small or large buffer itself. */
26055 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
26056 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
26057- atomic_inc(&vcc->stats->rx_err);
26058+ atomic_inc_unchecked(&vcc->stats->rx_err);
26059 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
26060 NS_MAX_IOVECS);
26061 NS_PRV_IOVCNT(iovb) = 0;
26062@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26063 ("nicstar%d: Expected a small buffer, and this is not one.\n",
26064 card->index);
26065 which_list(card, skb);
26066- atomic_inc(&vcc->stats->rx_err);
26067+ atomic_inc_unchecked(&vcc->stats->rx_err);
26068 recycle_rx_buf(card, skb);
26069 vc->rx_iov = NULL;
26070 recycle_iov_buf(card, iovb);
26071@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26072 ("nicstar%d: Expected a large buffer, and this is not one.\n",
26073 card->index);
26074 which_list(card, skb);
26075- atomic_inc(&vcc->stats->rx_err);
26076+ atomic_inc_unchecked(&vcc->stats->rx_err);
26077 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
26078 NS_PRV_IOVCNT(iovb));
26079 vc->rx_iov = NULL;
26080@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26081 printk(" - PDU size mismatch.\n");
26082 else
26083 printk(".\n");
26084- atomic_inc(&vcc->stats->rx_err);
26085+ atomic_inc_unchecked(&vcc->stats->rx_err);
26086 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
26087 NS_PRV_IOVCNT(iovb));
26088 vc->rx_iov = NULL;
26089@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26090 /* skb points to a small buffer */
26091 if (!atm_charge(vcc, skb->truesize)) {
26092 push_rxbufs(card, skb);
26093- atomic_inc(&vcc->stats->rx_drop);
26094+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26095 } else {
26096 skb_put(skb, len);
26097 dequeue_sm_buf(card, skb);
26098@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26099 ATM_SKB(skb)->vcc = vcc;
26100 __net_timestamp(skb);
26101 vcc->push(vcc, skb);
26102- atomic_inc(&vcc->stats->rx);
26103+ atomic_inc_unchecked(&vcc->stats->rx);
26104 }
26105 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
26106 struct sk_buff *sb;
26107@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26108 if (len <= NS_SMBUFSIZE) {
26109 if (!atm_charge(vcc, sb->truesize)) {
26110 push_rxbufs(card, sb);
26111- atomic_inc(&vcc->stats->rx_drop);
26112+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26113 } else {
26114 skb_put(sb, len);
26115 dequeue_sm_buf(card, sb);
26116@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26117 ATM_SKB(sb)->vcc = vcc;
26118 __net_timestamp(sb);
26119 vcc->push(vcc, sb);
26120- atomic_inc(&vcc->stats->rx);
26121+ atomic_inc_unchecked(&vcc->stats->rx);
26122 }
26123
26124 push_rxbufs(card, skb);
26125@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26126
26127 if (!atm_charge(vcc, skb->truesize)) {
26128 push_rxbufs(card, skb);
26129- atomic_inc(&vcc->stats->rx_drop);
26130+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26131 } else {
26132 dequeue_lg_buf(card, skb);
26133 #ifdef NS_USE_DESTRUCTORS
26134@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26135 ATM_SKB(skb)->vcc = vcc;
26136 __net_timestamp(skb);
26137 vcc->push(vcc, skb);
26138- atomic_inc(&vcc->stats->rx);
26139+ atomic_inc_unchecked(&vcc->stats->rx);
26140 }
26141
26142 push_rxbufs(card, sb);
26143@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26144 printk
26145 ("nicstar%d: Out of huge buffers.\n",
26146 card->index);
26147- atomic_inc(&vcc->stats->rx_drop);
26148+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26149 recycle_iovec_rx_bufs(card,
26150 (struct iovec *)
26151 iovb->data,
26152@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26153 card->hbpool.count++;
26154 } else
26155 dev_kfree_skb_any(hb);
26156- atomic_inc(&vcc->stats->rx_drop);
26157+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26158 } else {
26159 /* Copy the small buffer to the huge buffer */
26160 sb = (struct sk_buff *)iov->iov_base;
26161@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26162 #endif /* NS_USE_DESTRUCTORS */
26163 __net_timestamp(hb);
26164 vcc->push(vcc, hb);
26165- atomic_inc(&vcc->stats->rx);
26166+ atomic_inc_unchecked(&vcc->stats->rx);
26167 }
26168 }
26169
26170diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
26171index 5d1d076..12fbca4 100644
26172--- a/drivers/atm/solos-pci.c
26173+++ b/drivers/atm/solos-pci.c
26174@@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
26175 }
26176 atm_charge(vcc, skb->truesize);
26177 vcc->push(vcc, skb);
26178- atomic_inc(&vcc->stats->rx);
26179+ atomic_inc_unchecked(&vcc->stats->rx);
26180 break;
26181
26182 case PKT_STATUS:
26183@@ -1008,7 +1008,7 @@ static uint32_t fpga_tx(struct solos_card *card)
26184 vcc = SKB_CB(oldskb)->vcc;
26185
26186 if (vcc) {
26187- atomic_inc(&vcc->stats->tx);
26188+ atomic_inc_unchecked(&vcc->stats->tx);
26189 solos_pop(vcc, oldskb);
26190 } else
26191 dev_kfree_skb_irq(oldskb);
26192diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
26193index 90f1ccc..04c4a1e 100644
26194--- a/drivers/atm/suni.c
26195+++ b/drivers/atm/suni.c
26196@@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
26197
26198
26199 #define ADD_LIMITED(s,v) \
26200- atomic_add((v),&stats->s); \
26201- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
26202+ atomic_add_unchecked((v),&stats->s); \
26203+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
26204
26205
26206 static void suni_hz(unsigned long from_timer)
26207diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
26208index 5120a96..e2572bd 100644
26209--- a/drivers/atm/uPD98402.c
26210+++ b/drivers/atm/uPD98402.c
26211@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
26212 struct sonet_stats tmp;
26213 int error = 0;
26214
26215- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26216+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26217 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
26218 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
26219 if (zero && !error) {
26220@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
26221
26222
26223 #define ADD_LIMITED(s,v) \
26224- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
26225- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
26226- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26227+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
26228+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
26229+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26230
26231
26232 static void stat_event(struct atm_dev *dev)
26233@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
26234 if (reason & uPD98402_INT_PFM) stat_event(dev);
26235 if (reason & uPD98402_INT_PCO) {
26236 (void) GET(PCOCR); /* clear interrupt cause */
26237- atomic_add(GET(HECCT),
26238+ atomic_add_unchecked(GET(HECCT),
26239 &PRIV(dev)->sonet_stats.uncorr_hcs);
26240 }
26241 if ((reason & uPD98402_INT_RFO) &&
26242@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
26243 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
26244 uPD98402_INT_LOS),PIMR); /* enable them */
26245 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
26246- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26247- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
26248- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
26249+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26250+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
26251+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
26252 return 0;
26253 }
26254
26255diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
26256index d889f56..17eb71e 100644
26257--- a/drivers/atm/zatm.c
26258+++ b/drivers/atm/zatm.c
26259@@ -460,7 +460,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
26260 }
26261 if (!size) {
26262 dev_kfree_skb_irq(skb);
26263- if (vcc) atomic_inc(&vcc->stats->rx_err);
26264+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
26265 continue;
26266 }
26267 if (!atm_charge(vcc,skb->truesize)) {
26268@@ -470,7 +470,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
26269 skb->len = size;
26270 ATM_SKB(skb)->vcc = vcc;
26271 vcc->push(vcc,skb);
26272- atomic_inc(&vcc->stats->rx);
26273+ atomic_inc_unchecked(&vcc->stats->rx);
26274 }
26275 zout(pos & 0xffff,MTA(mbx));
26276 #if 0 /* probably a stupid idea */
26277@@ -734,7 +734,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
26278 skb_queue_head(&zatm_vcc->backlog,skb);
26279 break;
26280 }
26281- atomic_inc(&vcc->stats->tx);
26282+ atomic_inc_unchecked(&vcc->stats->tx);
26283 wake_up(&zatm_vcc->tx_wait);
26284 }
26285
26286diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
26287index a4760e0..51283cf 100644
26288--- a/drivers/base/devtmpfs.c
26289+++ b/drivers/base/devtmpfs.c
26290@@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
26291 if (!thread)
26292 return 0;
26293
26294- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
26295+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
26296 if (err)
26297 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
26298 else
26299diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
26300index caf995f..6f76697 100644
26301--- a/drivers/base/power/wakeup.c
26302+++ b/drivers/base/power/wakeup.c
26303@@ -30,14 +30,14 @@ bool events_check_enabled;
26304 * They need to be modified together atomically, so it's better to use one
26305 * atomic variable to hold them both.
26306 */
26307-static atomic_t combined_event_count = ATOMIC_INIT(0);
26308+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
26309
26310 #define IN_PROGRESS_BITS (sizeof(int) * 4)
26311 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
26312
26313 static void split_counters(unsigned int *cnt, unsigned int *inpr)
26314 {
26315- unsigned int comb = atomic_read(&combined_event_count);
26316+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
26317
26318 *cnt = (comb >> IN_PROGRESS_BITS);
26319 *inpr = comb & MAX_IN_PROGRESS;
26320@@ -353,7 +353,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
26321 ws->last_time = ktime_get();
26322
26323 /* Increment the counter of events in progress. */
26324- atomic_inc(&combined_event_count);
26325+ atomic_inc_unchecked(&combined_event_count);
26326 }
26327
26328 /**
26329@@ -443,7 +443,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
26330 * Increment the counter of registered wakeup events and decrement the
26331 * couter of wakeup events in progress simultaneously.
26332 */
26333- atomic_add(MAX_IN_PROGRESS, &combined_event_count);
26334+ atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
26335 }
26336
26337 /**
26338diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
26339index b0f553b..77b928b 100644
26340--- a/drivers/block/cciss.c
26341+++ b/drivers/block/cciss.c
26342@@ -1198,6 +1198,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
26343 int err;
26344 u32 cp;
26345
26346+ memset(&arg64, 0, sizeof(arg64));
26347+
26348 err = 0;
26349 err |=
26350 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
26351@@ -3007,7 +3009,7 @@ static void start_io(ctlr_info_t *h)
26352 while (!list_empty(&h->reqQ)) {
26353 c = list_entry(h->reqQ.next, CommandList_struct, list);
26354 /* can't do anything if fifo is full */
26355- if ((h->access.fifo_full(h))) {
26356+ if ((h->access->fifo_full(h))) {
26357 dev_warn(&h->pdev->dev, "fifo full\n");
26358 break;
26359 }
26360@@ -3017,7 +3019,7 @@ static void start_io(ctlr_info_t *h)
26361 h->Qdepth--;
26362
26363 /* Tell the controller execute command */
26364- h->access.submit_command(h, c);
26365+ h->access->submit_command(h, c);
26366
26367 /* Put job onto the completed Q */
26368 addQ(&h->cmpQ, c);
26369@@ -3443,17 +3445,17 @@ startio:
26370
26371 static inline unsigned long get_next_completion(ctlr_info_t *h)
26372 {
26373- return h->access.command_completed(h);
26374+ return h->access->command_completed(h);
26375 }
26376
26377 static inline int interrupt_pending(ctlr_info_t *h)
26378 {
26379- return h->access.intr_pending(h);
26380+ return h->access->intr_pending(h);
26381 }
26382
26383 static inline long interrupt_not_for_us(ctlr_info_t *h)
26384 {
26385- return ((h->access.intr_pending(h) == 0) ||
26386+ return ((h->access->intr_pending(h) == 0) ||
26387 (h->interrupts_enabled == 0));
26388 }
26389
26390@@ -3486,7 +3488,7 @@ static inline u32 next_command(ctlr_info_t *h)
26391 u32 a;
26392
26393 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
26394- return h->access.command_completed(h);
26395+ return h->access->command_completed(h);
26396
26397 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
26398 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
26399@@ -4044,7 +4046,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
26400 trans_support & CFGTBL_Trans_use_short_tags);
26401
26402 /* Change the access methods to the performant access methods */
26403- h->access = SA5_performant_access;
26404+ h->access = &SA5_performant_access;
26405 h->transMethod = CFGTBL_Trans_Performant;
26406
26407 return;
26408@@ -4316,7 +4318,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
26409 if (prod_index < 0)
26410 return -ENODEV;
26411 h->product_name = products[prod_index].product_name;
26412- h->access = *(products[prod_index].access);
26413+ h->access = products[prod_index].access;
26414
26415 if (cciss_board_disabled(h)) {
26416 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
26417@@ -5041,7 +5043,7 @@ reinit_after_soft_reset:
26418 }
26419
26420 /* make sure the board interrupts are off */
26421- h->access.set_intr_mask(h, CCISS_INTR_OFF);
26422+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
26423 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
26424 if (rc)
26425 goto clean2;
26426@@ -5093,7 +5095,7 @@ reinit_after_soft_reset:
26427 * fake ones to scoop up any residual completions.
26428 */
26429 spin_lock_irqsave(&h->lock, flags);
26430- h->access.set_intr_mask(h, CCISS_INTR_OFF);
26431+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
26432 spin_unlock_irqrestore(&h->lock, flags);
26433 free_irq(h->intr[h->intr_mode], h);
26434 rc = cciss_request_irq(h, cciss_msix_discard_completions,
26435@@ -5113,9 +5115,9 @@ reinit_after_soft_reset:
26436 dev_info(&h->pdev->dev, "Board READY.\n");
26437 dev_info(&h->pdev->dev,
26438 "Waiting for stale completions to drain.\n");
26439- h->access.set_intr_mask(h, CCISS_INTR_ON);
26440+ h->access->set_intr_mask(h, CCISS_INTR_ON);
26441 msleep(10000);
26442- h->access.set_intr_mask(h, CCISS_INTR_OFF);
26443+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
26444
26445 rc = controller_reset_failed(h->cfgtable);
26446 if (rc)
26447@@ -5138,7 +5140,7 @@ reinit_after_soft_reset:
26448 cciss_scsi_setup(h);
26449
26450 /* Turn the interrupts on so we can service requests */
26451- h->access.set_intr_mask(h, CCISS_INTR_ON);
26452+ h->access->set_intr_mask(h, CCISS_INTR_ON);
26453
26454 /* Get the firmware version */
26455 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
26456@@ -5211,7 +5213,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
26457 kfree(flush_buf);
26458 if (return_code != IO_OK)
26459 dev_warn(&h->pdev->dev, "Error flushing cache\n");
26460- h->access.set_intr_mask(h, CCISS_INTR_OFF);
26461+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
26462 free_irq(h->intr[h->intr_mode], h);
26463 }
26464
26465diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
26466index 7fda30e..eb5dfe0 100644
26467--- a/drivers/block/cciss.h
26468+++ b/drivers/block/cciss.h
26469@@ -101,7 +101,7 @@ struct ctlr_info
26470 /* information about each logical volume */
26471 drive_info_struct *drv[CISS_MAX_LUN];
26472
26473- struct access_method access;
26474+ struct access_method *access;
26475
26476 /* queue and queue Info */
26477 struct list_head reqQ;
26478diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
26479index 9125bbe..eede5c8 100644
26480--- a/drivers/block/cpqarray.c
26481+++ b/drivers/block/cpqarray.c
26482@@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
26483 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
26484 goto Enomem4;
26485 }
26486- hba[i]->access.set_intr_mask(hba[i], 0);
26487+ hba[i]->access->set_intr_mask(hba[i], 0);
26488 if (request_irq(hba[i]->intr, do_ida_intr,
26489 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
26490 {
26491@@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
26492 add_timer(&hba[i]->timer);
26493
26494 /* Enable IRQ now that spinlock and rate limit timer are set up */
26495- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
26496+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
26497
26498 for(j=0; j<NWD; j++) {
26499 struct gendisk *disk = ida_gendisk[i][j];
26500@@ -694,7 +694,7 @@ DBGINFO(
26501 for(i=0; i<NR_PRODUCTS; i++) {
26502 if (board_id == products[i].board_id) {
26503 c->product_name = products[i].product_name;
26504- c->access = *(products[i].access);
26505+ c->access = products[i].access;
26506 break;
26507 }
26508 }
26509@@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
26510 hba[ctlr]->intr = intr;
26511 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
26512 hba[ctlr]->product_name = products[j].product_name;
26513- hba[ctlr]->access = *(products[j].access);
26514+ hba[ctlr]->access = products[j].access;
26515 hba[ctlr]->ctlr = ctlr;
26516 hba[ctlr]->board_id = board_id;
26517 hba[ctlr]->pci_dev = NULL; /* not PCI */
26518@@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
26519
26520 while((c = h->reqQ) != NULL) {
26521 /* Can't do anything if we're busy */
26522- if (h->access.fifo_full(h) == 0)
26523+ if (h->access->fifo_full(h) == 0)
26524 return;
26525
26526 /* Get the first entry from the request Q */
26527@@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
26528 h->Qdepth--;
26529
26530 /* Tell the controller to do our bidding */
26531- h->access.submit_command(h, c);
26532+ h->access->submit_command(h, c);
26533
26534 /* Get onto the completion Q */
26535 addQ(&h->cmpQ, c);
26536@@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
26537 unsigned long flags;
26538 __u32 a,a1;
26539
26540- istat = h->access.intr_pending(h);
26541+ istat = h->access->intr_pending(h);
26542 /* Is this interrupt for us? */
26543 if (istat == 0)
26544 return IRQ_NONE;
26545@@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
26546 */
26547 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
26548 if (istat & FIFO_NOT_EMPTY) {
26549- while((a = h->access.command_completed(h))) {
26550+ while((a = h->access->command_completed(h))) {
26551 a1 = a; a &= ~3;
26552 if ((c = h->cmpQ) == NULL)
26553 {
26554@@ -1449,11 +1449,11 @@ static int sendcmd(
26555 /*
26556 * Disable interrupt
26557 */
26558- info_p->access.set_intr_mask(info_p, 0);
26559+ info_p->access->set_intr_mask(info_p, 0);
26560 /* Make sure there is room in the command FIFO */
26561 /* Actually it should be completely empty at this time. */
26562 for (i = 200000; i > 0; i--) {
26563- temp = info_p->access.fifo_full(info_p);
26564+ temp = info_p->access->fifo_full(info_p);
26565 if (temp != 0) {
26566 break;
26567 }
26568@@ -1466,7 +1466,7 @@ DBG(
26569 /*
26570 * Send the cmd
26571 */
26572- info_p->access.submit_command(info_p, c);
26573+ info_p->access->submit_command(info_p, c);
26574 complete = pollcomplete(ctlr);
26575
26576 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
26577@@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
26578 * we check the new geometry. Then turn interrupts back on when
26579 * we're done.
26580 */
26581- host->access.set_intr_mask(host, 0);
26582+ host->access->set_intr_mask(host, 0);
26583 getgeometry(ctlr);
26584- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
26585+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
26586
26587 for(i=0; i<NWD; i++) {
26588 struct gendisk *disk = ida_gendisk[ctlr][i];
26589@@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
26590 /* Wait (up to 2 seconds) for a command to complete */
26591
26592 for (i = 200000; i > 0; i--) {
26593- done = hba[ctlr]->access.command_completed(hba[ctlr]);
26594+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
26595 if (done == 0) {
26596 udelay(10); /* a short fixed delay */
26597 } else
26598diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
26599index be73e9d..7fbf140 100644
26600--- a/drivers/block/cpqarray.h
26601+++ b/drivers/block/cpqarray.h
26602@@ -99,7 +99,7 @@ struct ctlr_info {
26603 drv_info_t drv[NWD];
26604 struct proc_dir_entry *proc;
26605
26606- struct access_method access;
26607+ struct access_method *access;
26608
26609 cmdlist_t *reqQ;
26610 cmdlist_t *cmpQ;
26611diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
26612index 9cf2035..bffca95 100644
26613--- a/drivers/block/drbd/drbd_int.h
26614+++ b/drivers/block/drbd/drbd_int.h
26615@@ -736,7 +736,7 @@ struct drbd_request;
26616 struct drbd_epoch {
26617 struct list_head list;
26618 unsigned int barrier_nr;
26619- atomic_t epoch_size; /* increased on every request added. */
26620+ atomic_unchecked_t epoch_size; /* increased on every request added. */
26621 atomic_t active; /* increased on every req. added, and dec on every finished. */
26622 unsigned long flags;
26623 };
26624@@ -1108,7 +1108,7 @@ struct drbd_conf {
26625 void *int_dig_in;
26626 void *int_dig_vv;
26627 wait_queue_head_t seq_wait;
26628- atomic_t packet_seq;
26629+ atomic_unchecked_t packet_seq;
26630 unsigned int peer_seq;
26631 spinlock_t peer_seq_lock;
26632 unsigned int minor;
26633@@ -1617,30 +1617,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
26634
26635 static inline void drbd_tcp_cork(struct socket *sock)
26636 {
26637- int __user val = 1;
26638+ int val = 1;
26639 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
26640- (char __user *)&val, sizeof(val));
26641+ (char __force_user *)&val, sizeof(val));
26642 }
26643
26644 static inline void drbd_tcp_uncork(struct socket *sock)
26645 {
26646- int __user val = 0;
26647+ int val = 0;
26648 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
26649- (char __user *)&val, sizeof(val));
26650+ (char __force_user *)&val, sizeof(val));
26651 }
26652
26653 static inline void drbd_tcp_nodelay(struct socket *sock)
26654 {
26655- int __user val = 1;
26656+ int val = 1;
26657 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
26658- (char __user *)&val, sizeof(val));
26659+ (char __force_user *)&val, sizeof(val));
26660 }
26661
26662 static inline void drbd_tcp_quickack(struct socket *sock)
26663 {
26664- int __user val = 2;
26665+ int val = 2;
26666 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
26667- (char __user *)&val, sizeof(val));
26668+ (char __force_user *)&val, sizeof(val));
26669 }
26670
26671 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
26672diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
26673index 0358e55..bc33689 100644
26674--- a/drivers/block/drbd/drbd_main.c
26675+++ b/drivers/block/drbd/drbd_main.c
26676@@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
26677 p.sector = sector;
26678 p.block_id = block_id;
26679 p.blksize = blksize;
26680- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
26681+ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
26682
26683 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
26684 return false;
26685@@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
26686 p.sector = cpu_to_be64(req->sector);
26687 p.block_id = (unsigned long)req;
26688 p.seq_num = cpu_to_be32(req->seq_num =
26689- atomic_add_return(1, &mdev->packet_seq));
26690+ atomic_add_return_unchecked(1, &mdev->packet_seq));
26691
26692 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
26693
26694@@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
26695 atomic_set(&mdev->unacked_cnt, 0);
26696 atomic_set(&mdev->local_cnt, 0);
26697 atomic_set(&mdev->net_cnt, 0);
26698- atomic_set(&mdev->packet_seq, 0);
26699+ atomic_set_unchecked(&mdev->packet_seq, 0);
26700 atomic_set(&mdev->pp_in_use, 0);
26701 atomic_set(&mdev->pp_in_use_by_net, 0);
26702 atomic_set(&mdev->rs_sect_in, 0);
26703@@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
26704 mdev->receiver.t_state);
26705
26706 /* no need to lock it, I'm the only thread alive */
26707- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
26708- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
26709+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
26710+ dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
26711 mdev->al_writ_cnt =
26712 mdev->bm_writ_cnt =
26713 mdev->read_cnt =
26714diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
26715index af2a250..219c74b 100644
26716--- a/drivers/block/drbd/drbd_nl.c
26717+++ b/drivers/block/drbd/drbd_nl.c
26718@@ -2359,7 +2359,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
26719 module_put(THIS_MODULE);
26720 }
26721
26722-static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
26723+static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
26724
26725 static unsigned short *
26726 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
26727@@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
26728 cn_reply->id.idx = CN_IDX_DRBD;
26729 cn_reply->id.val = CN_VAL_DRBD;
26730
26731- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26732+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26733 cn_reply->ack = 0; /* not used here. */
26734 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26735 (int)((char *)tl - (char *)reply->tag_list);
26736@@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
26737 cn_reply->id.idx = CN_IDX_DRBD;
26738 cn_reply->id.val = CN_VAL_DRBD;
26739
26740- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26741+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26742 cn_reply->ack = 0; /* not used here. */
26743 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26744 (int)((char *)tl - (char *)reply->tag_list);
26745@@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
26746 cn_reply->id.idx = CN_IDX_DRBD;
26747 cn_reply->id.val = CN_VAL_DRBD;
26748
26749- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
26750+ cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
26751 cn_reply->ack = 0; // not used here.
26752 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26753 (int)((char*)tl - (char*)reply->tag_list);
26754@@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
26755 cn_reply->id.idx = CN_IDX_DRBD;
26756 cn_reply->id.val = CN_VAL_DRBD;
26757
26758- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26759+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26760 cn_reply->ack = 0; /* not used here. */
26761 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26762 (int)((char *)tl - (char *)reply->tag_list);
26763diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
26764index 43beaca..4a5b1dd 100644
26765--- a/drivers/block/drbd/drbd_receiver.c
26766+++ b/drivers/block/drbd/drbd_receiver.c
26767@@ -894,7 +894,7 @@ retry:
26768 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
26769 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
26770
26771- atomic_set(&mdev->packet_seq, 0);
26772+ atomic_set_unchecked(&mdev->packet_seq, 0);
26773 mdev->peer_seq = 0;
26774
26775 drbd_thread_start(&mdev->asender);
26776@@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
26777 do {
26778 next_epoch = NULL;
26779
26780- epoch_size = atomic_read(&epoch->epoch_size);
26781+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
26782
26783 switch (ev & ~EV_CLEANUP) {
26784 case EV_PUT:
26785@@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
26786 rv = FE_DESTROYED;
26787 } else {
26788 epoch->flags = 0;
26789- atomic_set(&epoch->epoch_size, 0);
26790+ atomic_set_unchecked(&epoch->epoch_size, 0);
26791 /* atomic_set(&epoch->active, 0); is already zero */
26792 if (rv == FE_STILL_LIVE)
26793 rv = FE_RECYCLED;
26794@@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
26795 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
26796 drbd_flush(mdev);
26797
26798- if (atomic_read(&mdev->current_epoch->epoch_size)) {
26799+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
26800 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
26801 if (epoch)
26802 break;
26803 }
26804
26805 epoch = mdev->current_epoch;
26806- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
26807+ wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
26808
26809 D_ASSERT(atomic_read(&epoch->active) == 0);
26810 D_ASSERT(epoch->flags == 0);
26811@@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
26812 }
26813
26814 epoch->flags = 0;
26815- atomic_set(&epoch->epoch_size, 0);
26816+ atomic_set_unchecked(&epoch->epoch_size, 0);
26817 atomic_set(&epoch->active, 0);
26818
26819 spin_lock(&mdev->epoch_lock);
26820- if (atomic_read(&mdev->current_epoch->epoch_size)) {
26821+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
26822 list_add(&epoch->list, &mdev->current_epoch->list);
26823 mdev->current_epoch = epoch;
26824 mdev->epochs++;
26825@@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
26826 spin_unlock(&mdev->peer_seq_lock);
26827
26828 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
26829- atomic_inc(&mdev->current_epoch->epoch_size);
26830+ atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
26831 return drbd_drain_block(mdev, data_size);
26832 }
26833
26834@@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
26835
26836 spin_lock(&mdev->epoch_lock);
26837 e->epoch = mdev->current_epoch;
26838- atomic_inc(&e->epoch->epoch_size);
26839+ atomic_inc_unchecked(&e->epoch->epoch_size);
26840 atomic_inc(&e->epoch->active);
26841 spin_unlock(&mdev->epoch_lock);
26842
26843@@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
26844 D_ASSERT(list_empty(&mdev->done_ee));
26845
26846 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
26847- atomic_set(&mdev->current_epoch->epoch_size, 0);
26848+ atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
26849 D_ASSERT(list_empty(&mdev->current_epoch->list));
26850 }
26851
26852diff --git a/drivers/block/loop.c b/drivers/block/loop.c
26853index 1e888c9..05cf1b0 100644
26854--- a/drivers/block/loop.c
26855+++ b/drivers/block/loop.c
26856@@ -227,7 +227,7 @@ static int __do_lo_send_write(struct file *file,
26857 mm_segment_t old_fs = get_fs();
26858
26859 set_fs(get_ds());
26860- bw = file->f_op->write(file, buf, len, &pos);
26861+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
26862 set_fs(old_fs);
26863 if (likely(bw == len))
26864 return 0;
26865diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
26866index 4364303..9adf4ee 100644
26867--- a/drivers/char/Kconfig
26868+++ b/drivers/char/Kconfig
26869@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
26870
26871 config DEVKMEM
26872 bool "/dev/kmem virtual device support"
26873- default y
26874+ default n
26875+ depends on !GRKERNSEC_KMEM
26876 help
26877 Say Y here if you want to support the /dev/kmem device. The
26878 /dev/kmem device is rarely used, but can be used for certain
26879@@ -596,6 +597,7 @@ config DEVPORT
26880 bool
26881 depends on !M68K
26882 depends on ISA || PCI
26883+ depends on !GRKERNSEC_KMEM
26884 default y
26885
26886 source "drivers/s390/char/Kconfig"
26887diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
26888index 2e04433..22afc64 100644
26889--- a/drivers/char/agp/frontend.c
26890+++ b/drivers/char/agp/frontend.c
26891@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
26892 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
26893 return -EFAULT;
26894
26895- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
26896+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
26897 return -EFAULT;
26898
26899 client = agp_find_client_by_pid(reserve.pid);
26900diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
26901index 095ab90..afad0a4 100644
26902--- a/drivers/char/briq_panel.c
26903+++ b/drivers/char/briq_panel.c
26904@@ -9,6 +9,7 @@
26905 #include <linux/types.h>
26906 #include <linux/errno.h>
26907 #include <linux/tty.h>
26908+#include <linux/mutex.h>
26909 #include <linux/timer.h>
26910 #include <linux/kernel.h>
26911 #include <linux/wait.h>
26912@@ -34,6 +35,7 @@ static int vfd_is_open;
26913 static unsigned char vfd[40];
26914 static int vfd_cursor;
26915 static unsigned char ledpb, led;
26916+static DEFINE_MUTEX(vfd_mutex);
26917
26918 static void update_vfd(void)
26919 {
26920@@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
26921 if (!vfd_is_open)
26922 return -EBUSY;
26923
26924+ mutex_lock(&vfd_mutex);
26925 for (;;) {
26926 char c;
26927 if (!indx)
26928 break;
26929- if (get_user(c, buf))
26930+ if (get_user(c, buf)) {
26931+ mutex_unlock(&vfd_mutex);
26932 return -EFAULT;
26933+ }
26934 if (esc) {
26935 set_led(c);
26936 esc = 0;
26937@@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
26938 buf++;
26939 }
26940 update_vfd();
26941+ mutex_unlock(&vfd_mutex);
26942
26943 return len;
26944 }
26945diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
26946index f773a9d..65cd683 100644
26947--- a/drivers/char/genrtc.c
26948+++ b/drivers/char/genrtc.c
26949@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
26950 switch (cmd) {
26951
26952 case RTC_PLL_GET:
26953+ memset(&pll, 0, sizeof(pll));
26954 if (get_rtc_pll(&pll))
26955 return -EINVAL;
26956 else
26957diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
26958index 0833896..cccce52 100644
26959--- a/drivers/char/hpet.c
26960+++ b/drivers/char/hpet.c
26961@@ -572,7 +572,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
26962 }
26963
26964 static int
26965-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
26966+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
26967 struct hpet_info *info)
26968 {
26969 struct hpet_timer __iomem *timer;
26970diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
26971index 58c0e63..46c16bf 100644
26972--- a/drivers/char/ipmi/ipmi_msghandler.c
26973+++ b/drivers/char/ipmi/ipmi_msghandler.c
26974@@ -415,7 +415,7 @@ struct ipmi_smi {
26975 struct proc_dir_entry *proc_dir;
26976 char proc_dir_name[10];
26977
26978- atomic_t stats[IPMI_NUM_STATS];
26979+ atomic_unchecked_t stats[IPMI_NUM_STATS];
26980
26981 /*
26982 * run_to_completion duplicate of smb_info, smi_info
26983@@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
26984
26985
26986 #define ipmi_inc_stat(intf, stat) \
26987- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
26988+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
26989 #define ipmi_get_stat(intf, stat) \
26990- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
26991+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
26992
26993 static int is_lan_addr(struct ipmi_addr *addr)
26994 {
26995@@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
26996 INIT_LIST_HEAD(&intf->cmd_rcvrs);
26997 init_waitqueue_head(&intf->waitq);
26998 for (i = 0; i < IPMI_NUM_STATS; i++)
26999- atomic_set(&intf->stats[i], 0);
27000+ atomic_set_unchecked(&intf->stats[i], 0);
27001
27002 intf->proc_dir = NULL;
27003
27004diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
27005index 9397ab4..d01bee1 100644
27006--- a/drivers/char/ipmi/ipmi_si_intf.c
27007+++ b/drivers/char/ipmi/ipmi_si_intf.c
27008@@ -277,7 +277,7 @@ struct smi_info {
27009 unsigned char slave_addr;
27010
27011 /* Counters and things for the proc filesystem. */
27012- atomic_t stats[SI_NUM_STATS];
27013+ atomic_unchecked_t stats[SI_NUM_STATS];
27014
27015 struct task_struct *thread;
27016
27017@@ -286,9 +286,9 @@ struct smi_info {
27018 };
27019
27020 #define smi_inc_stat(smi, stat) \
27021- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
27022+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
27023 #define smi_get_stat(smi, stat) \
27024- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
27025+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
27026
27027 #define SI_MAX_PARMS 4
27028
27029@@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info *new_smi)
27030 atomic_set(&new_smi->req_events, 0);
27031 new_smi->run_to_completion = 0;
27032 for (i = 0; i < SI_NUM_STATS; i++)
27033- atomic_set(&new_smi->stats[i], 0);
27034+ atomic_set_unchecked(&new_smi->stats[i], 0);
27035
27036 new_smi->interrupt_disabled = 1;
27037 atomic_set(&new_smi->stop_operation, 0);
27038diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
27039index 1aeaaba..e018570 100644
27040--- a/drivers/char/mbcs.c
27041+++ b/drivers/char/mbcs.c
27042@@ -800,7 +800,7 @@ static int mbcs_remove(struct cx_dev *dev)
27043 return 0;
27044 }
27045
27046-static const struct cx_device_id __devinitdata mbcs_id_table[] = {
27047+static const struct cx_device_id __devinitconst mbcs_id_table[] = {
27048 {
27049 .part_num = MBCS_PART_NUM,
27050 .mfg_num = MBCS_MFG_NUM,
27051diff --git a/drivers/char/mem.c b/drivers/char/mem.c
27052index 1451790..f705c30 100644
27053--- a/drivers/char/mem.c
27054+++ b/drivers/char/mem.c
27055@@ -18,6 +18,7 @@
27056 #include <linux/raw.h>
27057 #include <linux/tty.h>
27058 #include <linux/capability.h>
27059+#include <linux/security.h>
27060 #include <linux/ptrace.h>
27061 #include <linux/device.h>
27062 #include <linux/highmem.h>
27063@@ -35,6 +36,10 @@
27064 # include <linux/efi.h>
27065 #endif
27066
27067+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
27068+extern const struct file_operations grsec_fops;
27069+#endif
27070+
27071 static inline unsigned long size_inside_page(unsigned long start,
27072 unsigned long size)
27073 {
27074@@ -66,9 +71,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27075
27076 while (cursor < to) {
27077 if (!devmem_is_allowed(pfn)) {
27078+#ifdef CONFIG_GRKERNSEC_KMEM
27079+ gr_handle_mem_readwrite(from, to);
27080+#else
27081 printk(KERN_INFO
27082 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
27083 current->comm, from, to);
27084+#endif
27085 return 0;
27086 }
27087 cursor += PAGE_SIZE;
27088@@ -76,6 +85,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27089 }
27090 return 1;
27091 }
27092+#elif defined(CONFIG_GRKERNSEC_KMEM)
27093+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27094+{
27095+ return 0;
27096+}
27097 #else
27098 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27099 {
27100@@ -118,6 +132,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
27101
27102 while (count > 0) {
27103 unsigned long remaining;
27104+ char *temp;
27105
27106 sz = size_inside_page(p, count);
27107
27108@@ -133,7 +148,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
27109 if (!ptr)
27110 return -EFAULT;
27111
27112- remaining = copy_to_user(buf, ptr, sz);
27113+#ifdef CONFIG_PAX_USERCOPY
27114+ temp = kmalloc(sz, GFP_KERNEL);
27115+ if (!temp) {
27116+ unxlate_dev_mem_ptr(p, ptr);
27117+ return -ENOMEM;
27118+ }
27119+ memcpy(temp, ptr, sz);
27120+#else
27121+ temp = ptr;
27122+#endif
27123+
27124+ remaining = copy_to_user(buf, temp, sz);
27125+
27126+#ifdef CONFIG_PAX_USERCOPY
27127+ kfree(temp);
27128+#endif
27129+
27130 unxlate_dev_mem_ptr(p, ptr);
27131 if (remaining)
27132 return -EFAULT;
27133@@ -396,9 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27134 size_t count, loff_t *ppos)
27135 {
27136 unsigned long p = *ppos;
27137- ssize_t low_count, read, sz;
27138+ ssize_t low_count, read, sz, err = 0;
27139 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
27140- int err = 0;
27141
27142 read = 0;
27143 if (p < (unsigned long) high_memory) {
27144@@ -420,6 +450,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27145 }
27146 #endif
27147 while (low_count > 0) {
27148+ char *temp;
27149+
27150 sz = size_inside_page(p, low_count);
27151
27152 /*
27153@@ -429,7 +461,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27154 */
27155 kbuf = xlate_dev_kmem_ptr((char *)p);
27156
27157- if (copy_to_user(buf, kbuf, sz))
27158+#ifdef CONFIG_PAX_USERCOPY
27159+ temp = kmalloc(sz, GFP_KERNEL);
27160+ if (!temp)
27161+ return -ENOMEM;
27162+ memcpy(temp, kbuf, sz);
27163+#else
27164+ temp = kbuf;
27165+#endif
27166+
27167+ err = copy_to_user(buf, temp, sz);
27168+
27169+#ifdef CONFIG_PAX_USERCOPY
27170+ kfree(temp);
27171+#endif
27172+
27173+ if (err)
27174 return -EFAULT;
27175 buf += sz;
27176 p += sz;
27177@@ -867,6 +914,9 @@ static const struct memdev {
27178 #ifdef CONFIG_CRASH_DUMP
27179 [12] = { "oldmem", 0, &oldmem_fops, NULL },
27180 #endif
27181+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
27182+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
27183+#endif
27184 };
27185
27186 static int memory_open(struct inode *inode, struct file *filp)
27187diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
27188index da3cfee..a5a6606 100644
27189--- a/drivers/char/nvram.c
27190+++ b/drivers/char/nvram.c
27191@@ -248,7 +248,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
27192
27193 spin_unlock_irq(&rtc_lock);
27194
27195- if (copy_to_user(buf, contents, tmp - contents))
27196+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
27197 return -EFAULT;
27198
27199 *ppos = i;
27200diff --git a/drivers/char/random.c b/drivers/char/random.c
27201index 6035ab8..bdfe4fd 100644
27202--- a/drivers/char/random.c
27203+++ b/drivers/char/random.c
27204@@ -261,8 +261,13 @@
27205 /*
27206 * Configuration information
27207 */
27208+#ifdef CONFIG_GRKERNSEC_RANDNET
27209+#define INPUT_POOL_WORDS 512
27210+#define OUTPUT_POOL_WORDS 128
27211+#else
27212 #define INPUT_POOL_WORDS 128
27213 #define OUTPUT_POOL_WORDS 32
27214+#endif
27215 #define SEC_XFER_SIZE 512
27216 #define EXTRACT_SIZE 10
27217
27218@@ -300,10 +305,17 @@ static struct poolinfo {
27219 int poolwords;
27220 int tap1, tap2, tap3, tap4, tap5;
27221 } poolinfo_table[] = {
27222+#ifdef CONFIG_GRKERNSEC_RANDNET
27223+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
27224+ { 512, 411, 308, 208, 104, 1 },
27225+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
27226+ { 128, 103, 76, 51, 25, 1 },
27227+#else
27228 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
27229 { 128, 103, 76, 51, 25, 1 },
27230 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
27231 { 32, 26, 20, 14, 7, 1 },
27232+#endif
27233 #if 0
27234 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
27235 { 2048, 1638, 1231, 819, 411, 1 },
27236@@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
27237
27238 extract_buf(r, tmp);
27239 i = min_t(int, nbytes, EXTRACT_SIZE);
27240- if (copy_to_user(buf, tmp, i)) {
27241+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
27242 ret = -EFAULT;
27243 break;
27244 }
27245@@ -1228,7 +1240,7 @@ EXPORT_SYMBOL(generate_random_uuid);
27246 #include <linux/sysctl.h>
27247
27248 static int min_read_thresh = 8, min_write_thresh;
27249-static int max_read_thresh = INPUT_POOL_WORDS * 32;
27250+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
27251 static int max_write_thresh = INPUT_POOL_WORDS * 32;
27252 static char sysctl_bootid[16];
27253
27254diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
27255index 1ee8ce7..b778bef 100644
27256--- a/drivers/char/sonypi.c
27257+++ b/drivers/char/sonypi.c
27258@@ -55,6 +55,7 @@
27259 #include <asm/uaccess.h>
27260 #include <asm/io.h>
27261 #include <asm/system.h>
27262+#include <asm/local.h>
27263
27264 #include <linux/sonypi.h>
27265
27266@@ -491,7 +492,7 @@ static struct sonypi_device {
27267 spinlock_t fifo_lock;
27268 wait_queue_head_t fifo_proc_list;
27269 struct fasync_struct *fifo_async;
27270- int open_count;
27271+ local_t open_count;
27272 int model;
27273 struct input_dev *input_jog_dev;
27274 struct input_dev *input_key_dev;
27275@@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
27276 static int sonypi_misc_release(struct inode *inode, struct file *file)
27277 {
27278 mutex_lock(&sonypi_device.lock);
27279- sonypi_device.open_count--;
27280+ local_dec(&sonypi_device.open_count);
27281 mutex_unlock(&sonypi_device.lock);
27282 return 0;
27283 }
27284@@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
27285 {
27286 mutex_lock(&sonypi_device.lock);
27287 /* Flush input queue on first open */
27288- if (!sonypi_device.open_count)
27289+ if (!local_read(&sonypi_device.open_count))
27290 kfifo_reset(&sonypi_device.fifo);
27291- sonypi_device.open_count++;
27292+ local_inc(&sonypi_device.open_count);
27293 mutex_unlock(&sonypi_device.lock);
27294
27295 return 0;
27296diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
27297index 361a1df..2471eee 100644
27298--- a/drivers/char/tpm/tpm.c
27299+++ b/drivers/char/tpm/tpm.c
27300@@ -414,7 +414,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
27301 chip->vendor.req_complete_val)
27302 goto out_recv;
27303
27304- if ((status == chip->vendor.req_canceled)) {
27305+ if (status == chip->vendor.req_canceled) {
27306 dev_err(chip->dev, "Operation Canceled\n");
27307 rc = -ECANCELED;
27308 goto out;
27309diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
27310index 0636520..169c1d0 100644
27311--- a/drivers/char/tpm/tpm_bios.c
27312+++ b/drivers/char/tpm/tpm_bios.c
27313@@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
27314 event = addr;
27315
27316 if ((event->event_type == 0 && event->event_size == 0) ||
27317- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
27318+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
27319 return NULL;
27320
27321 return addr;
27322@@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
27323 return NULL;
27324
27325 if ((event->event_type == 0 && event->event_size == 0) ||
27326- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
27327+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
27328 return NULL;
27329
27330 (*pos)++;
27331@@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
27332 int i;
27333
27334 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
27335- seq_putc(m, data[i]);
27336+ if (!seq_putc(m, data[i]))
27337+ return -EFAULT;
27338
27339 return 0;
27340 }
27341@@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log *log)
27342 log->bios_event_log_end = log->bios_event_log + len;
27343
27344 virt = acpi_os_map_memory(start, len);
27345+ if (!virt) {
27346+ kfree(log->bios_event_log);
27347+ log->bios_event_log = NULL;
27348+ return -EFAULT;
27349+ }
27350
27351- memcpy(log->bios_event_log, virt, len);
27352+ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
27353
27354 acpi_os_unmap_memory(virt, len);
27355 return 0;
27356diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
27357index 8e3c46d..c139b99 100644
27358--- a/drivers/char/virtio_console.c
27359+++ b/drivers/char/virtio_console.c
27360@@ -563,7 +563,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
27361 if (to_user) {
27362 ssize_t ret;
27363
27364- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
27365+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
27366 if (ret)
27367 return -EFAULT;
27368 } else {
27369@@ -662,7 +662,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
27370 if (!port_has_data(port) && !port->host_connected)
27371 return 0;
27372
27373- return fill_readbuf(port, ubuf, count, true);
27374+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
27375 }
27376
27377 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
27378diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
27379index eb1d864..39ee5a7 100644
27380--- a/drivers/dma/dmatest.c
27381+++ b/drivers/dma/dmatest.c
27382@@ -591,7 +591,7 @@ static int dmatest_add_channel(struct dma_chan *chan)
27383 }
27384 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
27385 cnt = dmatest_add_threads(dtc, DMA_PQ);
27386- thread_count += cnt > 0 ?: 0;
27387+ thread_count += cnt > 0 ? cnt : 0;
27388 }
27389
27390 pr_info("dmatest: Started %u threads using %s\n",
27391diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
27392index c9eee6d..f9d5280 100644
27393--- a/drivers/edac/amd64_edac.c
27394+++ b/drivers/edac/amd64_edac.c
27395@@ -2685,7 +2685,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
27396 * PCI core identifies what devices are on a system during boot, and then
27397 * inquiry this table to see if this driver is for a given device found.
27398 */
27399-static const struct pci_device_id amd64_pci_table[] __devinitdata = {
27400+static const struct pci_device_id amd64_pci_table[] __devinitconst = {
27401 {
27402 .vendor = PCI_VENDOR_ID_AMD,
27403 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
27404diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
27405index e47e73b..348e0bd 100644
27406--- a/drivers/edac/amd76x_edac.c
27407+++ b/drivers/edac/amd76x_edac.c
27408@@ -321,7 +321,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
27409 edac_mc_free(mci);
27410 }
27411
27412-static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
27413+static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
27414 {
27415 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27416 AMD762},
27417diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
27418index 1af531a..3a8ff27 100644
27419--- a/drivers/edac/e752x_edac.c
27420+++ b/drivers/edac/e752x_edac.c
27421@@ -1380,7 +1380,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
27422 edac_mc_free(mci);
27423 }
27424
27425-static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
27426+static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
27427 {
27428 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27429 E7520},
27430diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
27431index 6ffb6d2..383d8d7 100644
27432--- a/drivers/edac/e7xxx_edac.c
27433+++ b/drivers/edac/e7xxx_edac.c
27434@@ -525,7 +525,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
27435 edac_mc_free(mci);
27436 }
27437
27438-static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
27439+static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
27440 {
27441 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27442 E7205},
27443diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
27444index 495198a..ac08c85 100644
27445--- a/drivers/edac/edac_pci_sysfs.c
27446+++ b/drivers/edac/edac_pci_sysfs.c
27447@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
27448 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
27449 static int edac_pci_poll_msec = 1000; /* one second workq period */
27450
27451-static atomic_t pci_parity_count = ATOMIC_INIT(0);
27452-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
27453+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
27454+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
27455
27456 static struct kobject *edac_pci_top_main_kobj;
27457 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
27458@@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27459 edac_printk(KERN_CRIT, EDAC_PCI,
27460 "Signaled System Error on %s\n",
27461 pci_name(dev));
27462- atomic_inc(&pci_nonparity_count);
27463+ atomic_inc_unchecked(&pci_nonparity_count);
27464 }
27465
27466 if (status & (PCI_STATUS_PARITY)) {
27467@@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27468 "Master Data Parity Error on %s\n",
27469 pci_name(dev));
27470
27471- atomic_inc(&pci_parity_count);
27472+ atomic_inc_unchecked(&pci_parity_count);
27473 }
27474
27475 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27476@@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27477 "Detected Parity Error on %s\n",
27478 pci_name(dev));
27479
27480- atomic_inc(&pci_parity_count);
27481+ atomic_inc_unchecked(&pci_parity_count);
27482 }
27483 }
27484
27485@@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27486 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
27487 "Signaled System Error on %s\n",
27488 pci_name(dev));
27489- atomic_inc(&pci_nonparity_count);
27490+ atomic_inc_unchecked(&pci_nonparity_count);
27491 }
27492
27493 if (status & (PCI_STATUS_PARITY)) {
27494@@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27495 "Master Data Parity Error on "
27496 "%s\n", pci_name(dev));
27497
27498- atomic_inc(&pci_parity_count);
27499+ atomic_inc_unchecked(&pci_parity_count);
27500 }
27501
27502 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27503@@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27504 "Detected Parity Error on %s\n",
27505 pci_name(dev));
27506
27507- atomic_inc(&pci_parity_count);
27508+ atomic_inc_unchecked(&pci_parity_count);
27509 }
27510 }
27511 }
27512@@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
27513 if (!check_pci_errors)
27514 return;
27515
27516- before_count = atomic_read(&pci_parity_count);
27517+ before_count = atomic_read_unchecked(&pci_parity_count);
27518
27519 /* scan all PCI devices looking for a Parity Error on devices and
27520 * bridges.
27521@@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
27522 /* Only if operator has selected panic on PCI Error */
27523 if (edac_pci_get_panic_on_pe()) {
27524 /* If the count is different 'after' from 'before' */
27525- if (before_count != atomic_read(&pci_parity_count))
27526+ if (before_count != atomic_read_unchecked(&pci_parity_count))
27527 panic("EDAC: PCI Parity Error");
27528 }
27529 }
27530diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
27531index c0510b3..6e2a954 100644
27532--- a/drivers/edac/i3000_edac.c
27533+++ b/drivers/edac/i3000_edac.c
27534@@ -470,7 +470,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
27535 edac_mc_free(mci);
27536 }
27537
27538-static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
27539+static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
27540 {
27541 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27542 I3000},
27543diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
27544index aa08497..7e6822a 100644
27545--- a/drivers/edac/i3200_edac.c
27546+++ b/drivers/edac/i3200_edac.c
27547@@ -456,7 +456,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
27548 edac_mc_free(mci);
27549 }
27550
27551-static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
27552+static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
27553 {
27554 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27555 I3200},
27556diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
27557index 4dc3ac2..67d05a6 100644
27558--- a/drivers/edac/i5000_edac.c
27559+++ b/drivers/edac/i5000_edac.c
27560@@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
27561 *
27562 * The "E500P" device is the first device supported.
27563 */
27564-static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
27565+static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
27566 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
27567 .driver_data = I5000P},
27568
27569diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
27570index bcbdeec..9886d16 100644
27571--- a/drivers/edac/i5100_edac.c
27572+++ b/drivers/edac/i5100_edac.c
27573@@ -1051,7 +1051,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
27574 edac_mc_free(mci);
27575 }
27576
27577-static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
27578+static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
27579 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
27580 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
27581 { 0, }
27582diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
27583index 74d6ec34..baff517 100644
27584--- a/drivers/edac/i5400_edac.c
27585+++ b/drivers/edac/i5400_edac.c
27586@@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
27587 *
27588 * The "E500P" device is the first device supported.
27589 */
27590-static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
27591+static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
27592 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
27593 {0,} /* 0 terminated list. */
27594 };
27595diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
27596index 6104dba..e7ea8e1 100644
27597--- a/drivers/edac/i7300_edac.c
27598+++ b/drivers/edac/i7300_edac.c
27599@@ -1192,7 +1192,7 @@ static void __devexit i7300_remove_one(struct pci_dev *pdev)
27600 *
27601 * Has only 8086:360c PCI ID
27602 */
27603-static const struct pci_device_id i7300_pci_tbl[] __devinitdata = {
27604+static const struct pci_device_id i7300_pci_tbl[] __devinitconst = {
27605 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
27606 {0,} /* 0 terminated list. */
27607 };
27608diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
27609index 70ad892..178943c 100644
27610--- a/drivers/edac/i7core_edac.c
27611+++ b/drivers/edac/i7core_edac.c
27612@@ -391,7 +391,7 @@ static const struct pci_id_table pci_dev_table[] = {
27613 /*
27614 * pci_device_id table for which devices we are looking for
27615 */
27616-static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
27617+static const struct pci_device_id i7core_pci_tbl[] __devinitconst = {
27618 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
27619 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
27620 {0,} /* 0 terminated list. */
27621diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
27622index 4329d39..f3022ef 100644
27623--- a/drivers/edac/i82443bxgx_edac.c
27624+++ b/drivers/edac/i82443bxgx_edac.c
27625@@ -380,7 +380,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
27626
27627 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
27628
27629-static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
27630+static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
27631 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
27632 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
27633 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
27634diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
27635index 931a057..fd28340 100644
27636--- a/drivers/edac/i82860_edac.c
27637+++ b/drivers/edac/i82860_edac.c
27638@@ -270,7 +270,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
27639 edac_mc_free(mci);
27640 }
27641
27642-static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
27643+static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
27644 {
27645 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27646 I82860},
27647diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
27648index 33864c6..01edc61 100644
27649--- a/drivers/edac/i82875p_edac.c
27650+++ b/drivers/edac/i82875p_edac.c
27651@@ -511,7 +511,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
27652 edac_mc_free(mci);
27653 }
27654
27655-static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
27656+static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
27657 {
27658 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27659 I82875P},
27660diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
27661index a5da732..983363b 100644
27662--- a/drivers/edac/i82975x_edac.c
27663+++ b/drivers/edac/i82975x_edac.c
27664@@ -604,7 +604,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
27665 edac_mc_free(mci);
27666 }
27667
27668-static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
27669+static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
27670 {
27671 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27672 I82975X
27673diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
27674index 0106747..0b40417 100644
27675--- a/drivers/edac/mce_amd.h
27676+++ b/drivers/edac/mce_amd.h
27677@@ -83,7 +83,7 @@ struct amd_decoder_ops {
27678 bool (*dc_mce)(u16, u8);
27679 bool (*ic_mce)(u16, u8);
27680 bool (*nb_mce)(u16, u8);
27681-};
27682+} __no_const;
27683
27684 void amd_report_gart_errors(bool);
27685 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
27686diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
27687index b153674..ad2ba9b 100644
27688--- a/drivers/edac/r82600_edac.c
27689+++ b/drivers/edac/r82600_edac.c
27690@@ -373,7 +373,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
27691 edac_mc_free(mci);
27692 }
27693
27694-static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
27695+static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
27696 {
27697 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
27698 },
27699diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
27700index 7a402bf..af0b211 100644
27701--- a/drivers/edac/sb_edac.c
27702+++ b/drivers/edac/sb_edac.c
27703@@ -367,7 +367,7 @@ static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
27704 /*
27705 * pci_device_id table for which devices we are looking for
27706 */
27707-static const struct pci_device_id sbridge_pci_tbl[] __devinitdata = {
27708+static const struct pci_device_id sbridge_pci_tbl[] __devinitconst = {
27709 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)},
27710 {0,} /* 0 terminated list. */
27711 };
27712diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
27713index b6f47de..c5acf3a 100644
27714--- a/drivers/edac/x38_edac.c
27715+++ b/drivers/edac/x38_edac.c
27716@@ -440,7 +440,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
27717 edac_mc_free(mci);
27718 }
27719
27720-static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
27721+static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
27722 {
27723 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27724 X38},
27725diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
27726index 85661b0..c784559a 100644
27727--- a/drivers/firewire/core-card.c
27728+++ b/drivers/firewire/core-card.c
27729@@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
27730
27731 void fw_core_remove_card(struct fw_card *card)
27732 {
27733- struct fw_card_driver dummy_driver = dummy_driver_template;
27734+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
27735
27736 card->driver->update_phy_reg(card, 4,
27737 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
27738diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
27739index 4799393..37bd3ab 100644
27740--- a/drivers/firewire/core-cdev.c
27741+++ b/drivers/firewire/core-cdev.c
27742@@ -1331,8 +1331,7 @@ static int init_iso_resource(struct client *client,
27743 int ret;
27744
27745 if ((request->channels == 0 && request->bandwidth == 0) ||
27746- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
27747- request->bandwidth < 0)
27748+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
27749 return -EINVAL;
27750
27751 r = kmalloc(sizeof(*r), GFP_KERNEL);
27752diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
27753index 855ab3f..11f4bbd 100644
27754--- a/drivers/firewire/core-transaction.c
27755+++ b/drivers/firewire/core-transaction.c
27756@@ -37,6 +37,7 @@
27757 #include <linux/timer.h>
27758 #include <linux/types.h>
27759 #include <linux/workqueue.h>
27760+#include <linux/sched.h>
27761
27762 #include <asm/byteorder.h>
27763
27764diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
27765index b45be57..5fad18b 100644
27766--- a/drivers/firewire/core.h
27767+++ b/drivers/firewire/core.h
27768@@ -101,6 +101,7 @@ struct fw_card_driver {
27769
27770 int (*stop_iso)(struct fw_iso_context *ctx);
27771 };
27772+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
27773
27774 void fw_card_initialize(struct fw_card *card,
27775 const struct fw_card_driver *driver, struct device *device);
27776diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
27777index 153980b..4b4d046 100644
27778--- a/drivers/firmware/dmi_scan.c
27779+++ b/drivers/firmware/dmi_scan.c
27780@@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
27781 }
27782 }
27783 else {
27784- /*
27785- * no iounmap() for that ioremap(); it would be a no-op, but
27786- * it's so early in setup that sucker gets confused into doing
27787- * what it shouldn't if we actually call it.
27788- */
27789 p = dmi_ioremap(0xF0000, 0x10000);
27790 if (p == NULL)
27791 goto error;
27792@@ -723,7 +718,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
27793 if (buf == NULL)
27794 return -1;
27795
27796- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
27797+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
27798
27799 iounmap(buf);
27800 return 0;
27801diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
27802index 98723cb..10ca85b 100644
27803--- a/drivers/gpio/gpio-vr41xx.c
27804+++ b/drivers/gpio/gpio-vr41xx.c
27805@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
27806 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
27807 maskl, pendl, maskh, pendh);
27808
27809- atomic_inc(&irq_err_count);
27810+ atomic_inc_unchecked(&irq_err_count);
27811
27812 return -EINVAL;
27813 }
27814diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
27815index 8323fc3..5c1d755 100644
27816--- a/drivers/gpu/drm/drm_crtc.c
27817+++ b/drivers/gpu/drm/drm_crtc.c
27818@@ -1379,7 +1379,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
27819 */
27820 if ((out_resp->count_modes >= mode_count) && mode_count) {
27821 copied = 0;
27822- mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
27823+ mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
27824 list_for_each_entry(mode, &connector->modes, head) {
27825 drm_crtc_convert_to_umode(&u_mode, mode);
27826 if (copy_to_user(mode_ptr + copied,
27827@@ -1394,8 +1394,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
27828
27829 if ((out_resp->count_props >= props_count) && props_count) {
27830 copied = 0;
27831- prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
27832- prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
27833+ prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
27834+ prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
27835 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
27836 if (connector->property_ids[i] != 0) {
27837 if (put_user(connector->property_ids[i],
27838@@ -1417,7 +1417,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
27839
27840 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
27841 copied = 0;
27842- encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
27843+ encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
27844 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
27845 if (connector->encoder_ids[i] != 0) {
27846 if (put_user(connector->encoder_ids[i],
27847@@ -1576,7 +1576,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
27848 }
27849
27850 for (i = 0; i < crtc_req->count_connectors; i++) {
27851- set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
27852+ set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
27853 if (get_user(out_id, &set_connectors_ptr[i])) {
27854 ret = -EFAULT;
27855 goto out;
27856@@ -1857,7 +1857,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
27857 fb = obj_to_fb(obj);
27858
27859 num_clips = r->num_clips;
27860- clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
27861+ clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
27862
27863 if (!num_clips != !clips_ptr) {
27864 ret = -EINVAL;
27865@@ -2283,7 +2283,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
27866 out_resp->flags = property->flags;
27867
27868 if ((out_resp->count_values >= value_count) && value_count) {
27869- values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
27870+ values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
27871 for (i = 0; i < value_count; i++) {
27872 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
27873 ret = -EFAULT;
27874@@ -2296,7 +2296,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
27875 if (property->flags & DRM_MODE_PROP_ENUM) {
27876 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
27877 copied = 0;
27878- enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
27879+ enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
27880 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
27881
27882 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
27883@@ -2319,7 +2319,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
27884 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
27885 copied = 0;
27886 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
27887- blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
27888+ blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
27889
27890 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
27891 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
27892@@ -2380,7 +2380,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
27893 struct drm_mode_get_blob *out_resp = data;
27894 struct drm_property_blob *blob;
27895 int ret = 0;
27896- void *blob_ptr;
27897+ void __user *blob_ptr;
27898
27899 if (!drm_core_check_feature(dev, DRIVER_MODESET))
27900 return -EINVAL;
27901@@ -2394,7 +2394,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
27902 blob = obj_to_blob(obj);
27903
27904 if (out_resp->length == blob->length) {
27905- blob_ptr = (void *)(unsigned long)out_resp->data;
27906+ blob_ptr = (void __user *)(unsigned long)out_resp->data;
27907 if (copy_to_user(blob_ptr, blob->data, blob->length)){
27908 ret = -EFAULT;
27909 goto done;
27910diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
27911index d2619d7..bd6bd00 100644
27912--- a/drivers/gpu/drm/drm_crtc_helper.c
27913+++ b/drivers/gpu/drm/drm_crtc_helper.c
27914@@ -279,7 +279,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
27915 struct drm_crtc *tmp;
27916 int crtc_mask = 1;
27917
27918- WARN(!crtc, "checking null crtc?\n");
27919+ BUG_ON(!crtc);
27920
27921 dev = crtc->dev;
27922
27923diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
27924index 40c187c..5746164 100644
27925--- a/drivers/gpu/drm/drm_drv.c
27926+++ b/drivers/gpu/drm/drm_drv.c
27927@@ -308,7 +308,7 @@ module_exit(drm_core_exit);
27928 /**
27929 * Copy and IOCTL return string to user space
27930 */
27931-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
27932+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
27933 {
27934 int len;
27935
27936@@ -387,7 +387,7 @@ long drm_ioctl(struct file *filp,
27937
27938 dev = file_priv->minor->dev;
27939 atomic_inc(&dev->ioctl_count);
27940- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
27941+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
27942 ++file_priv->ioctl_count;
27943
27944 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
27945diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
27946index 4911e1d..484c8a3 100644
27947--- a/drivers/gpu/drm/drm_fops.c
27948+++ b/drivers/gpu/drm/drm_fops.c
27949@@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
27950 }
27951
27952 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
27953- atomic_set(&dev->counts[i], 0);
27954+ atomic_set_unchecked(&dev->counts[i], 0);
27955
27956 dev->sigdata.lock = NULL;
27957
27958@@ -135,8 +135,8 @@ int drm_open(struct inode *inode, struct file *filp)
27959
27960 retcode = drm_open_helper(inode, filp, dev);
27961 if (!retcode) {
27962- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
27963- if (!dev->open_count++)
27964+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
27965+ if (local_inc_return(&dev->open_count) == 1)
27966 retcode = drm_setup(dev);
27967 }
27968 if (!retcode) {
27969@@ -473,7 +473,7 @@ int drm_release(struct inode *inode, struct file *filp)
27970
27971 mutex_lock(&drm_global_mutex);
27972
27973- DRM_DEBUG("open_count = %d\n", dev->open_count);
27974+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
27975
27976 if (dev->driver->preclose)
27977 dev->driver->preclose(dev, file_priv);
27978@@ -485,7 +485,7 @@ int drm_release(struct inode *inode, struct file *filp)
27979 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
27980 task_pid_nr(current),
27981 (long)old_encode_dev(file_priv->minor->device),
27982- dev->open_count);
27983+ local_read(&dev->open_count));
27984
27985 /* if the master has gone away we can't do anything with the lock */
27986 if (file_priv->minor->master)
27987@@ -566,8 +566,8 @@ int drm_release(struct inode *inode, struct file *filp)
27988 * End inline drm_release
27989 */
27990
27991- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
27992- if (!--dev->open_count) {
27993+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
27994+ if (local_dec_and_test(&dev->open_count)) {
27995 if (atomic_read(&dev->ioctl_count)) {
27996 DRM_ERROR("Device busy: %d\n",
27997 atomic_read(&dev->ioctl_count));
27998diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
27999index c87dc96..326055d 100644
28000--- a/drivers/gpu/drm/drm_global.c
28001+++ b/drivers/gpu/drm/drm_global.c
28002@@ -36,7 +36,7 @@
28003 struct drm_global_item {
28004 struct mutex mutex;
28005 void *object;
28006- int refcount;
28007+ atomic_t refcount;
28008 };
28009
28010 static struct drm_global_item glob[DRM_GLOBAL_NUM];
28011@@ -49,7 +49,7 @@ void drm_global_init(void)
28012 struct drm_global_item *item = &glob[i];
28013 mutex_init(&item->mutex);
28014 item->object = NULL;
28015- item->refcount = 0;
28016+ atomic_set(&item->refcount, 0);
28017 }
28018 }
28019
28020@@ -59,7 +59,7 @@ void drm_global_release(void)
28021 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
28022 struct drm_global_item *item = &glob[i];
28023 BUG_ON(item->object != NULL);
28024- BUG_ON(item->refcount != 0);
28025+ BUG_ON(atomic_read(&item->refcount) != 0);
28026 }
28027 }
28028
28029@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
28030 void *object;
28031
28032 mutex_lock(&item->mutex);
28033- if (item->refcount == 0) {
28034+ if (atomic_read(&item->refcount) == 0) {
28035 item->object = kzalloc(ref->size, GFP_KERNEL);
28036 if (unlikely(item->object == NULL)) {
28037 ret = -ENOMEM;
28038@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
28039 goto out_err;
28040
28041 }
28042- ++item->refcount;
28043+ atomic_inc(&item->refcount);
28044 ref->object = item->object;
28045 object = item->object;
28046 mutex_unlock(&item->mutex);
28047@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
28048 struct drm_global_item *item = &glob[ref->global_type];
28049
28050 mutex_lock(&item->mutex);
28051- BUG_ON(item->refcount == 0);
28052+ BUG_ON(atomic_read(&item->refcount) == 0);
28053 BUG_ON(ref->object != item->object);
28054- if (--item->refcount == 0) {
28055+ if (atomic_dec_and_test(&item->refcount)) {
28056 ref->release(ref);
28057 item->object = NULL;
28058 }
28059diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
28060index ab1162d..42587b2 100644
28061--- a/drivers/gpu/drm/drm_info.c
28062+++ b/drivers/gpu/drm/drm_info.c
28063@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
28064 struct drm_local_map *map;
28065 struct drm_map_list *r_list;
28066
28067- /* Hardcoded from _DRM_FRAME_BUFFER,
28068- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
28069- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
28070- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
28071+ static const char * const types[] = {
28072+ [_DRM_FRAME_BUFFER] = "FB",
28073+ [_DRM_REGISTERS] = "REG",
28074+ [_DRM_SHM] = "SHM",
28075+ [_DRM_AGP] = "AGP",
28076+ [_DRM_SCATTER_GATHER] = "SG",
28077+ [_DRM_CONSISTENT] = "PCI",
28078+ [_DRM_GEM] = "GEM" };
28079 const char *type;
28080 int i;
28081
28082@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
28083 map = r_list->map;
28084 if (!map)
28085 continue;
28086- if (map->type < 0 || map->type > 5)
28087+ if (map->type >= ARRAY_SIZE(types))
28088 type = "??";
28089 else
28090 type = types[map->type];
28091@@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, void *data)
28092 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
28093 vma->vm_flags & VM_LOCKED ? 'l' : '-',
28094 vma->vm_flags & VM_IO ? 'i' : '-',
28095+#ifdef CONFIG_GRKERNSEC_HIDESYM
28096+ 0);
28097+#else
28098 vma->vm_pgoff);
28099+#endif
28100
28101 #if defined(__i386__)
28102 pgprot = pgprot_val(vma->vm_page_prot);
28103diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
28104index ddd70db..40321e6 100644
28105--- a/drivers/gpu/drm/drm_ioc32.c
28106+++ b/drivers/gpu/drm/drm_ioc32.c
28107@@ -456,7 +456,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
28108 request = compat_alloc_user_space(nbytes);
28109 if (!access_ok(VERIFY_WRITE, request, nbytes))
28110 return -EFAULT;
28111- list = (struct drm_buf_desc *) (request + 1);
28112+ list = (struct drm_buf_desc __user *) (request + 1);
28113
28114 if (__put_user(count, &request->count)
28115 || __put_user(list, &request->list))
28116@@ -517,7 +517,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
28117 request = compat_alloc_user_space(nbytes);
28118 if (!access_ok(VERIFY_WRITE, request, nbytes))
28119 return -EFAULT;
28120- list = (struct drm_buf_pub *) (request + 1);
28121+ list = (struct drm_buf_pub __user *) (request + 1);
28122
28123 if (__put_user(count, &request->count)
28124 || __put_user(list, &request->list))
28125diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
28126index 904d7e9..ab88581 100644
28127--- a/drivers/gpu/drm/drm_ioctl.c
28128+++ b/drivers/gpu/drm/drm_ioctl.c
28129@@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev, void *data,
28130 stats->data[i].value =
28131 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
28132 else
28133- stats->data[i].value = atomic_read(&dev->counts[i]);
28134+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
28135 stats->data[i].type = dev->types[i];
28136 }
28137
28138diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
28139index 632ae24..244cf4a 100644
28140--- a/drivers/gpu/drm/drm_lock.c
28141+++ b/drivers/gpu/drm/drm_lock.c
28142@@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
28143 if (drm_lock_take(&master->lock, lock->context)) {
28144 master->lock.file_priv = file_priv;
28145 master->lock.lock_time = jiffies;
28146- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
28147+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
28148 break; /* Got lock */
28149 }
28150
28151@@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
28152 return -EINVAL;
28153 }
28154
28155- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
28156+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
28157
28158 if (drm_lock_free(&master->lock, lock->context)) {
28159 /* FIXME: Should really bail out here. */
28160diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
28161index 8f371e8..9f85d52 100644
28162--- a/drivers/gpu/drm/i810/i810_dma.c
28163+++ b/drivers/gpu/drm/i810/i810_dma.c
28164@@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
28165 dma->buflist[vertex->idx],
28166 vertex->discard, vertex->used);
28167
28168- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28169- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28170+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28171+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28172 sarea_priv->last_enqueue = dev_priv->counter - 1;
28173 sarea_priv->last_dispatch = (int)hw_status[5];
28174
28175@@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
28176 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
28177 mc->last_render);
28178
28179- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28180- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28181+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28182+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28183 sarea_priv->last_enqueue = dev_priv->counter - 1;
28184 sarea_priv->last_dispatch = (int)hw_status[5];
28185
28186diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
28187index c9339f4..f5e1b9d 100644
28188--- a/drivers/gpu/drm/i810/i810_drv.h
28189+++ b/drivers/gpu/drm/i810/i810_drv.h
28190@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
28191 int page_flipping;
28192
28193 wait_queue_head_t irq_queue;
28194- atomic_t irq_received;
28195- atomic_t irq_emitted;
28196+ atomic_unchecked_t irq_received;
28197+ atomic_unchecked_t irq_emitted;
28198
28199 int front_offset;
28200 } drm_i810_private_t;
28201diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
28202index 004b048..7588eba 100644
28203--- a/drivers/gpu/drm/i915/i915_debugfs.c
28204+++ b/drivers/gpu/drm/i915/i915_debugfs.c
28205@@ -499,7 +499,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
28206 I915_READ(GTIMR));
28207 }
28208 seq_printf(m, "Interrupts received: %d\n",
28209- atomic_read(&dev_priv->irq_received));
28210+ atomic_read_unchecked(&dev_priv->irq_received));
28211 for (i = 0; i < I915_NUM_RINGS; i++) {
28212 if (IS_GEN6(dev) || IS_GEN7(dev)) {
28213 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
28214@@ -1232,7 +1232,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
28215 return ret;
28216
28217 if (opregion->header)
28218- seq_write(m, opregion->header, OPREGION_SIZE);
28219+ seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
28220
28221 mutex_unlock(&dev->struct_mutex);
28222
28223diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
28224index a9ae374..43c1e9e 100644
28225--- a/drivers/gpu/drm/i915/i915_dma.c
28226+++ b/drivers/gpu/drm/i915/i915_dma.c
28227@@ -1172,7 +1172,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
28228 bool can_switch;
28229
28230 spin_lock(&dev->count_lock);
28231- can_switch = (dev->open_count == 0);
28232+ can_switch = (local_read(&dev->open_count) == 0);
28233 spin_unlock(&dev->count_lock);
28234 return can_switch;
28235 }
28236diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
28237index 554bef7..d24791c 100644
28238--- a/drivers/gpu/drm/i915/i915_drv.h
28239+++ b/drivers/gpu/drm/i915/i915_drv.h
28240@@ -229,7 +229,7 @@ struct drm_i915_display_funcs {
28241 /* render clock increase/decrease */
28242 /* display clock increase/decrease */
28243 /* pll clock increase/decrease */
28244-};
28245+} __no_const;
28246
28247 struct intel_device_info {
28248 u8 gen;
28249@@ -312,7 +312,7 @@ typedef struct drm_i915_private {
28250 int current_page;
28251 int page_flipping;
28252
28253- atomic_t irq_received;
28254+ atomic_unchecked_t irq_received;
28255
28256 /* protects the irq masks */
28257 spinlock_t irq_lock;
28258@@ -887,7 +887,7 @@ struct drm_i915_gem_object {
28259 * will be page flipped away on the next vblank. When it
28260 * reaches 0, dev_priv->pending_flip_queue will be woken up.
28261 */
28262- atomic_t pending_flip;
28263+ atomic_unchecked_t pending_flip;
28264 };
28265
28266 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
28267@@ -1267,7 +1267,7 @@ extern int intel_setup_gmbus(struct drm_device *dev);
28268 extern void intel_teardown_gmbus(struct drm_device *dev);
28269 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
28270 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
28271-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
28272+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
28273 {
28274 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
28275 }
28276diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28277index b9da890..cad1d98 100644
28278--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28279+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28280@@ -189,7 +189,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
28281 i915_gem_clflush_object(obj);
28282
28283 if (obj->base.pending_write_domain)
28284- cd->flips |= atomic_read(&obj->pending_flip);
28285+ cd->flips |= atomic_read_unchecked(&obj->pending_flip);
28286
28287 /* The actual obj->write_domain will be updated with
28288 * pending_write_domain after we emit the accumulated flush for all
28289@@ -882,9 +882,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
28290
28291 static int
28292 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
28293- int count)
28294+ unsigned int count)
28295 {
28296- int i;
28297+ unsigned int i;
28298
28299 for (i = 0; i < count; i++) {
28300 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
28301diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
28302index b40004b..7c53a75 100644
28303--- a/drivers/gpu/drm/i915/i915_irq.c
28304+++ b/drivers/gpu/drm/i915/i915_irq.c
28305@@ -475,7 +475,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
28306 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
28307 struct drm_i915_master_private *master_priv;
28308
28309- atomic_inc(&dev_priv->irq_received);
28310+ atomic_inc_unchecked(&dev_priv->irq_received);
28311
28312 /* disable master interrupt before clearing iir */
28313 de_ier = I915_READ(DEIER);
28314@@ -566,7 +566,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
28315 struct drm_i915_master_private *master_priv;
28316 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
28317
28318- atomic_inc(&dev_priv->irq_received);
28319+ atomic_inc_unchecked(&dev_priv->irq_received);
28320
28321 if (IS_GEN6(dev))
28322 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
28323@@ -1231,7 +1231,7 @@ static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
28324 int ret = IRQ_NONE, pipe;
28325 bool blc_event = false;
28326
28327- atomic_inc(&dev_priv->irq_received);
28328+ atomic_inc_unchecked(&dev_priv->irq_received);
28329
28330 iir = I915_READ(IIR);
28331
28332@@ -1743,7 +1743,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
28333 {
28334 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
28335
28336- atomic_set(&dev_priv->irq_received, 0);
28337+ atomic_set_unchecked(&dev_priv->irq_received, 0);
28338
28339 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
28340 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
28341@@ -1931,7 +1931,7 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
28342 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
28343 int pipe;
28344
28345- atomic_set(&dev_priv->irq_received, 0);
28346+ atomic_set_unchecked(&dev_priv->irq_received, 0);
28347
28348 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
28349 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
28350diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
28351index daa5743..c0757a9 100644
28352--- a/drivers/gpu/drm/i915/intel_display.c
28353+++ b/drivers/gpu/drm/i915/intel_display.c
28354@@ -2230,7 +2230,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
28355
28356 wait_event(dev_priv->pending_flip_queue,
28357 atomic_read(&dev_priv->mm.wedged) ||
28358- atomic_read(&obj->pending_flip) == 0);
28359+ atomic_read_unchecked(&obj->pending_flip) == 0);
28360
28361 /* Big Hammer, we also need to ensure that any pending
28362 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
28363@@ -2851,7 +2851,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
28364 obj = to_intel_framebuffer(crtc->fb)->obj;
28365 dev_priv = crtc->dev->dev_private;
28366 wait_event(dev_priv->pending_flip_queue,
28367- atomic_read(&obj->pending_flip) == 0);
28368+ atomic_read_unchecked(&obj->pending_flip) == 0);
28369 }
28370
28371 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
28372@@ -6952,7 +6952,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
28373
28374 atomic_clear_mask(1 << intel_crtc->plane,
28375 &obj->pending_flip.counter);
28376- if (atomic_read(&obj->pending_flip) == 0)
28377+ if (atomic_read_unchecked(&obj->pending_flip) == 0)
28378 wake_up(&dev_priv->pending_flip_queue);
28379
28380 schedule_work(&work->work);
28381@@ -7242,7 +7242,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
28382 /* Block clients from rendering to the new back buffer until
28383 * the flip occurs and the object is no longer visible.
28384 */
28385- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28386+ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28387
28388 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
28389 if (ret)
28390@@ -7256,7 +7256,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
28391 return 0;
28392
28393 cleanup_pending:
28394- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28395+ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28396 drm_gem_object_unreference(&work->old_fb_obj->base);
28397 drm_gem_object_unreference(&obj->base);
28398 mutex_unlock(&dev->struct_mutex);
28399diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
28400index 54558a0..2d97005 100644
28401--- a/drivers/gpu/drm/mga/mga_drv.h
28402+++ b/drivers/gpu/drm/mga/mga_drv.h
28403@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
28404 u32 clear_cmd;
28405 u32 maccess;
28406
28407- atomic_t vbl_received; /**< Number of vblanks received. */
28408+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
28409 wait_queue_head_t fence_queue;
28410- atomic_t last_fence_retired;
28411+ atomic_unchecked_t last_fence_retired;
28412 u32 next_fence_to_post;
28413
28414 unsigned int fb_cpp;
28415diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
28416index 2581202..f230a8d9 100644
28417--- a/drivers/gpu/drm/mga/mga_irq.c
28418+++ b/drivers/gpu/drm/mga/mga_irq.c
28419@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
28420 if (crtc != 0)
28421 return 0;
28422
28423- return atomic_read(&dev_priv->vbl_received);
28424+ return atomic_read_unchecked(&dev_priv->vbl_received);
28425 }
28426
28427
28428@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
28429 /* VBLANK interrupt */
28430 if (status & MGA_VLINEPEN) {
28431 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
28432- atomic_inc(&dev_priv->vbl_received);
28433+ atomic_inc_unchecked(&dev_priv->vbl_received);
28434 drm_handle_vblank(dev, 0);
28435 handled = 1;
28436 }
28437@@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
28438 if ((prim_start & ~0x03) != (prim_end & ~0x03))
28439 MGA_WRITE(MGA_PRIMEND, prim_end);
28440
28441- atomic_inc(&dev_priv->last_fence_retired);
28442+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
28443 DRM_WAKEUP(&dev_priv->fence_queue);
28444 handled = 1;
28445 }
28446@@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
28447 * using fences.
28448 */
28449 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
28450- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
28451+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
28452 - *sequence) <= (1 << 23)));
28453
28454 *sequence = cur_fence;
28455diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
28456index 5fc201b..7b032b9 100644
28457--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
28458+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
28459@@ -201,7 +201,7 @@ struct methods {
28460 const char desc[8];
28461 void (*loadbios)(struct drm_device *, uint8_t *);
28462 const bool rw;
28463-};
28464+} __do_const;
28465
28466 static struct methods shadow_methods[] = {
28467 { "PRAMIN", load_vbios_pramin, true },
28468@@ -5474,7 +5474,7 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
28469 struct bit_table {
28470 const char id;
28471 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
28472-};
28473+} __no_const;
28474
28475 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
28476
28477diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
28478index 4c0be3a..5757582 100644
28479--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
28480+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
28481@@ -238,7 +238,7 @@ struct nouveau_channel {
28482 struct list_head pending;
28483 uint32_t sequence;
28484 uint32_t sequence_ack;
28485- atomic_t last_sequence_irq;
28486+ atomic_unchecked_t last_sequence_irq;
28487 struct nouveau_vma vma;
28488 } fence;
28489
28490@@ -319,7 +319,7 @@ struct nouveau_exec_engine {
28491 u32 handle, u16 class);
28492 void (*set_tile_region)(struct drm_device *dev, int i);
28493 void (*tlb_flush)(struct drm_device *, int engine);
28494-};
28495+} __no_const;
28496
28497 struct nouveau_instmem_engine {
28498 void *priv;
28499@@ -341,13 +341,13 @@ struct nouveau_instmem_engine {
28500 struct nouveau_mc_engine {
28501 int (*init)(struct drm_device *dev);
28502 void (*takedown)(struct drm_device *dev);
28503-};
28504+} __no_const;
28505
28506 struct nouveau_timer_engine {
28507 int (*init)(struct drm_device *dev);
28508 void (*takedown)(struct drm_device *dev);
28509 uint64_t (*read)(struct drm_device *dev);
28510-};
28511+} __no_const;
28512
28513 struct nouveau_fb_engine {
28514 int num_tiles;
28515@@ -558,7 +558,7 @@ struct nouveau_vram_engine {
28516 void (*put)(struct drm_device *, struct nouveau_mem **);
28517
28518 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
28519-};
28520+} __no_const;
28521
28522 struct nouveau_engine {
28523 struct nouveau_instmem_engine instmem;
28524@@ -706,7 +706,7 @@ struct drm_nouveau_private {
28525 struct drm_global_reference mem_global_ref;
28526 struct ttm_bo_global_ref bo_global_ref;
28527 struct ttm_bo_device bdev;
28528- atomic_t validate_sequence;
28529+ atomic_unchecked_t validate_sequence;
28530 } ttm;
28531
28532 struct {
28533diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
28534index 2f6daae..c9d7b9e 100644
28535--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
28536+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
28537@@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
28538 if (USE_REFCNT(dev))
28539 sequence = nvchan_rd32(chan, 0x48);
28540 else
28541- sequence = atomic_read(&chan->fence.last_sequence_irq);
28542+ sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
28543
28544 if (chan->fence.sequence_ack == sequence)
28545 goto out;
28546@@ -539,7 +539,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
28547 return ret;
28548 }
28549
28550- atomic_set(&chan->fence.last_sequence_irq, 0);
28551+ atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
28552 return 0;
28553 }
28554
28555diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
28556index 5f0bc57..eb9fac8 100644
28557--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
28558+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
28559@@ -314,7 +314,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
28560 int trycnt = 0;
28561 int ret, i;
28562
28563- sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
28564+ sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
28565 retry:
28566 if (++trycnt > 100000) {
28567 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
28568diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
28569index d8831ab..0ba8356 100644
28570--- a/drivers/gpu/drm/nouveau/nouveau_state.c
28571+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
28572@@ -542,7 +542,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
28573 bool can_switch;
28574
28575 spin_lock(&dev->count_lock);
28576- can_switch = (dev->open_count == 0);
28577+ can_switch = (local_read(&dev->open_count) == 0);
28578 spin_unlock(&dev->count_lock);
28579 return can_switch;
28580 }
28581diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
28582index dbdea8e..cd6eeeb 100644
28583--- a/drivers/gpu/drm/nouveau/nv04_graph.c
28584+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
28585@@ -554,7 +554,7 @@ static int
28586 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
28587 u32 class, u32 mthd, u32 data)
28588 {
28589- atomic_set(&chan->fence.last_sequence_irq, data);
28590+ atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
28591 return 0;
28592 }
28593
28594diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
28595index bcac90b..53bfc76 100644
28596--- a/drivers/gpu/drm/r128/r128_cce.c
28597+++ b/drivers/gpu/drm/r128/r128_cce.c
28598@@ -378,7 +378,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
28599
28600 /* GH: Simple idle check.
28601 */
28602- atomic_set(&dev_priv->idle_count, 0);
28603+ atomic_set_unchecked(&dev_priv->idle_count, 0);
28604
28605 /* We don't support anything other than bus-mastering ring mode,
28606 * but the ring can be in either AGP or PCI space for the ring
28607diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
28608index 930c71b..499aded 100644
28609--- a/drivers/gpu/drm/r128/r128_drv.h
28610+++ b/drivers/gpu/drm/r128/r128_drv.h
28611@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
28612 int is_pci;
28613 unsigned long cce_buffers_offset;
28614
28615- atomic_t idle_count;
28616+ atomic_unchecked_t idle_count;
28617
28618 int page_flipping;
28619 int current_page;
28620 u32 crtc_offset;
28621 u32 crtc_offset_cntl;
28622
28623- atomic_t vbl_received;
28624+ atomic_unchecked_t vbl_received;
28625
28626 u32 color_fmt;
28627 unsigned int front_offset;
28628diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
28629index 429d5a0..7e899ed 100644
28630--- a/drivers/gpu/drm/r128/r128_irq.c
28631+++ b/drivers/gpu/drm/r128/r128_irq.c
28632@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
28633 if (crtc != 0)
28634 return 0;
28635
28636- return atomic_read(&dev_priv->vbl_received);
28637+ return atomic_read_unchecked(&dev_priv->vbl_received);
28638 }
28639
28640 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
28641@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
28642 /* VBLANK interrupt */
28643 if (status & R128_CRTC_VBLANK_INT) {
28644 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
28645- atomic_inc(&dev_priv->vbl_received);
28646+ atomic_inc_unchecked(&dev_priv->vbl_received);
28647 drm_handle_vblank(dev, 0);
28648 return IRQ_HANDLED;
28649 }
28650diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
28651index a9e33ce..09edd4b 100644
28652--- a/drivers/gpu/drm/r128/r128_state.c
28653+++ b/drivers/gpu/drm/r128/r128_state.c
28654@@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
28655
28656 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
28657 {
28658- if (atomic_read(&dev_priv->idle_count) == 0)
28659+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
28660 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
28661 else
28662- atomic_set(&dev_priv->idle_count, 0);
28663+ atomic_set_unchecked(&dev_priv->idle_count, 0);
28664 }
28665
28666 #endif
28667diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
28668index 5a82b6b..9e69c73 100644
28669--- a/drivers/gpu/drm/radeon/mkregtable.c
28670+++ b/drivers/gpu/drm/radeon/mkregtable.c
28671@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
28672 regex_t mask_rex;
28673 regmatch_t match[4];
28674 char buf[1024];
28675- size_t end;
28676+ long end;
28677 int len;
28678 int done = 0;
28679 int r;
28680 unsigned o;
28681 struct offset *offset;
28682 char last_reg_s[10];
28683- int last_reg;
28684+ unsigned long last_reg;
28685
28686 if (regcomp
28687 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
28688diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
28689index 8227e76..ce0b195 100644
28690--- a/drivers/gpu/drm/radeon/radeon.h
28691+++ b/drivers/gpu/drm/radeon/radeon.h
28692@@ -192,7 +192,7 @@ extern int sumo_get_temp(struct radeon_device *rdev);
28693 */
28694 struct radeon_fence_driver {
28695 uint32_t scratch_reg;
28696- atomic_t seq;
28697+ atomic_unchecked_t seq;
28698 uint32_t last_seq;
28699 unsigned long last_jiffies;
28700 unsigned long last_timeout;
28701@@ -530,7 +530,7 @@ struct r600_blit_cp_primitives {
28702 int x2, int y2);
28703 void (*draw_auto)(struct radeon_device *rdev);
28704 void (*set_default_state)(struct radeon_device *rdev);
28705-};
28706+} __no_const;
28707
28708 struct r600_blit {
28709 struct mutex mutex;
28710@@ -954,7 +954,7 @@ struct radeon_asic {
28711 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
28712 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
28713 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
28714-};
28715+} __no_const;
28716
28717 /*
28718 * Asic structures
28719diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
28720index 9b39145..389b93b 100644
28721--- a/drivers/gpu/drm/radeon/radeon_device.c
28722+++ b/drivers/gpu/drm/radeon/radeon_device.c
28723@@ -687,7 +687,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
28724 bool can_switch;
28725
28726 spin_lock(&dev->count_lock);
28727- can_switch = (dev->open_count == 0);
28728+ can_switch = (local_read(&dev->open_count) == 0);
28729 spin_unlock(&dev->count_lock);
28730 return can_switch;
28731 }
28732diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
28733index a1b59ca..86f2d44 100644
28734--- a/drivers/gpu/drm/radeon/radeon_drv.h
28735+++ b/drivers/gpu/drm/radeon/radeon_drv.h
28736@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
28737
28738 /* SW interrupt */
28739 wait_queue_head_t swi_queue;
28740- atomic_t swi_emitted;
28741+ atomic_unchecked_t swi_emitted;
28742 int vblank_crtc;
28743 uint32_t irq_enable_reg;
28744 uint32_t r500_disp_irq_reg;
28745diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
28746index 76ec0e9..6feb1a3 100644
28747--- a/drivers/gpu/drm/radeon/radeon_fence.c
28748+++ b/drivers/gpu/drm/radeon/radeon_fence.c
28749@@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
28750 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
28751 return 0;
28752 }
28753- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
28754+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
28755 if (!rdev->cp.ready)
28756 /* FIXME: cp is not running assume everythings is done right
28757 * away
28758@@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
28759 return r;
28760 }
28761 radeon_fence_write(rdev, 0);
28762- atomic_set(&rdev->fence_drv.seq, 0);
28763+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
28764 INIT_LIST_HEAD(&rdev->fence_drv.created);
28765 INIT_LIST_HEAD(&rdev->fence_drv.emited);
28766 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
28767diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
28768index 48b7cea..342236f 100644
28769--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
28770+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
28771@@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
28772 request = compat_alloc_user_space(sizeof(*request));
28773 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
28774 || __put_user(req32.param, &request->param)
28775- || __put_user((void __user *)(unsigned long)req32.value,
28776+ || __put_user((unsigned long)req32.value,
28777 &request->value))
28778 return -EFAULT;
28779
28780diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
28781index 00da384..32f972d 100644
28782--- a/drivers/gpu/drm/radeon/radeon_irq.c
28783+++ b/drivers/gpu/drm/radeon/radeon_irq.c
28784@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
28785 unsigned int ret;
28786 RING_LOCALS;
28787
28788- atomic_inc(&dev_priv->swi_emitted);
28789- ret = atomic_read(&dev_priv->swi_emitted);
28790+ atomic_inc_unchecked(&dev_priv->swi_emitted);
28791+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
28792
28793 BEGIN_RING(4);
28794 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
28795@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
28796 drm_radeon_private_t *dev_priv =
28797 (drm_radeon_private_t *) dev->dev_private;
28798
28799- atomic_set(&dev_priv->swi_emitted, 0);
28800+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
28801 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
28802
28803 dev->max_vblank_count = 0x001fffff;
28804diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
28805index e8422ae..d22d4a8 100644
28806--- a/drivers/gpu/drm/radeon/radeon_state.c
28807+++ b/drivers/gpu/drm/radeon/radeon_state.c
28808@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
28809 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
28810 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
28811
28812- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
28813+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
28814 sarea_priv->nbox * sizeof(depth_boxes[0])))
28815 return -EFAULT;
28816
28817@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
28818 {
28819 drm_radeon_private_t *dev_priv = dev->dev_private;
28820 drm_radeon_getparam_t *param = data;
28821- int value;
28822+ int value = 0;
28823
28824 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
28825
28826diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
28827index 0b5468b..9c4b308 100644
28828--- a/drivers/gpu/drm/radeon/radeon_ttm.c
28829+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
28830@@ -672,8 +672,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
28831 }
28832 if (unlikely(ttm_vm_ops == NULL)) {
28833 ttm_vm_ops = vma->vm_ops;
28834- radeon_ttm_vm_ops = *ttm_vm_ops;
28835- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
28836+ pax_open_kernel();
28837+ memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
28838+ *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
28839+ pax_close_kernel();
28840 }
28841 vma->vm_ops = &radeon_ttm_vm_ops;
28842 return 0;
28843diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
28844index a9049ed..501f284 100644
28845--- a/drivers/gpu/drm/radeon/rs690.c
28846+++ b/drivers/gpu/drm/radeon/rs690.c
28847@@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
28848 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
28849 rdev->pm.sideport_bandwidth.full)
28850 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
28851- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
28852+ read_delay_latency.full = dfixed_const(800 * 1000);
28853 read_delay_latency.full = dfixed_div(read_delay_latency,
28854 rdev->pm.igp_sideport_mclk);
28855+ a.full = dfixed_const(370);
28856+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
28857 } else {
28858 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
28859 rdev->pm.k8_bandwidth.full)
28860diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
28861index 727e93d..1565650 100644
28862--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
28863+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
28864@@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages(void)
28865 static int ttm_pool_mm_shrink(struct shrinker *shrink,
28866 struct shrink_control *sc)
28867 {
28868- static atomic_t start_pool = ATOMIC_INIT(0);
28869+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
28870 unsigned i;
28871- unsigned pool_offset = atomic_add_return(1, &start_pool);
28872+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
28873 struct ttm_page_pool *pool;
28874 int shrink_pages = sc->nr_to_scan;
28875
28876diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
28877index 9cf87d9..2000b7d 100644
28878--- a/drivers/gpu/drm/via/via_drv.h
28879+++ b/drivers/gpu/drm/via/via_drv.h
28880@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
28881 typedef uint32_t maskarray_t[5];
28882
28883 typedef struct drm_via_irq {
28884- atomic_t irq_received;
28885+ atomic_unchecked_t irq_received;
28886 uint32_t pending_mask;
28887 uint32_t enable_mask;
28888 wait_queue_head_t irq_queue;
28889@@ -75,7 +75,7 @@ typedef struct drm_via_private {
28890 struct timeval last_vblank;
28891 int last_vblank_valid;
28892 unsigned usec_per_vblank;
28893- atomic_t vbl_received;
28894+ atomic_unchecked_t vbl_received;
28895 drm_via_state_t hc_state;
28896 char pci_buf[VIA_PCI_BUF_SIZE];
28897 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
28898diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
28899index d391f48..10c8ca3 100644
28900--- a/drivers/gpu/drm/via/via_irq.c
28901+++ b/drivers/gpu/drm/via/via_irq.c
28902@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
28903 if (crtc != 0)
28904 return 0;
28905
28906- return atomic_read(&dev_priv->vbl_received);
28907+ return atomic_read_unchecked(&dev_priv->vbl_received);
28908 }
28909
28910 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28911@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28912
28913 status = VIA_READ(VIA_REG_INTERRUPT);
28914 if (status & VIA_IRQ_VBLANK_PENDING) {
28915- atomic_inc(&dev_priv->vbl_received);
28916- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
28917+ atomic_inc_unchecked(&dev_priv->vbl_received);
28918+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
28919 do_gettimeofday(&cur_vblank);
28920 if (dev_priv->last_vblank_valid) {
28921 dev_priv->usec_per_vblank =
28922@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28923 dev_priv->last_vblank = cur_vblank;
28924 dev_priv->last_vblank_valid = 1;
28925 }
28926- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
28927+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
28928 DRM_DEBUG("US per vblank is: %u\n",
28929 dev_priv->usec_per_vblank);
28930 }
28931@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28932
28933 for (i = 0; i < dev_priv->num_irqs; ++i) {
28934 if (status & cur_irq->pending_mask) {
28935- atomic_inc(&cur_irq->irq_received);
28936+ atomic_inc_unchecked(&cur_irq->irq_received);
28937 DRM_WAKEUP(&cur_irq->irq_queue);
28938 handled = 1;
28939 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
28940@@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
28941 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
28942 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
28943 masks[irq][4]));
28944- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
28945+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
28946 } else {
28947 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
28948 (((cur_irq_sequence =
28949- atomic_read(&cur_irq->irq_received)) -
28950+ atomic_read_unchecked(&cur_irq->irq_received)) -
28951 *sequence) <= (1 << 23)));
28952 }
28953 *sequence = cur_irq_sequence;
28954@@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
28955 }
28956
28957 for (i = 0; i < dev_priv->num_irqs; ++i) {
28958- atomic_set(&cur_irq->irq_received, 0);
28959+ atomic_set_unchecked(&cur_irq->irq_received, 0);
28960 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
28961 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
28962 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
28963@@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
28964 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
28965 case VIA_IRQ_RELATIVE:
28966 irqwait->request.sequence +=
28967- atomic_read(&cur_irq->irq_received);
28968+ atomic_read_unchecked(&cur_irq->irq_received);
28969 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
28970 case VIA_IRQ_ABSOLUTE:
28971 break;
28972diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
28973index dc27970..f18b008 100644
28974--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
28975+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
28976@@ -260,7 +260,7 @@ struct vmw_private {
28977 * Fencing and IRQs.
28978 */
28979
28980- atomic_t marker_seq;
28981+ atomic_unchecked_t marker_seq;
28982 wait_queue_head_t fence_queue;
28983 wait_queue_head_t fifo_queue;
28984 int fence_queue_waiters; /* Protected by hw_mutex */
28985diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
28986index a0c2f12..68ae6cb 100644
28987--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
28988+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
28989@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
28990 (unsigned int) min,
28991 (unsigned int) fifo->capabilities);
28992
28993- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
28994+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
28995 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
28996 vmw_marker_queue_init(&fifo->marker_queue);
28997 return vmw_fifo_send_fence(dev_priv, &dummy);
28998@@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
28999 if (reserveable)
29000 iowrite32(bytes, fifo_mem +
29001 SVGA_FIFO_RESERVED);
29002- return fifo_mem + (next_cmd >> 2);
29003+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
29004 } else {
29005 need_bounce = true;
29006 }
29007@@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
29008
29009 fm = vmw_fifo_reserve(dev_priv, bytes);
29010 if (unlikely(fm == NULL)) {
29011- *seqno = atomic_read(&dev_priv->marker_seq);
29012+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
29013 ret = -ENOMEM;
29014 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
29015 false, 3*HZ);
29016@@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
29017 }
29018
29019 do {
29020- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
29021+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
29022 } while (*seqno == 0);
29023
29024 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
29025diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
29026index cabc95f..14b3d77 100644
29027--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
29028+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
29029@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
29030 * emitted. Then the fence is stale and signaled.
29031 */
29032
29033- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
29034+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
29035 > VMW_FENCE_WRAP);
29036
29037 return ret;
29038@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
29039
29040 if (fifo_idle)
29041 down_read(&fifo_state->rwsem);
29042- signal_seq = atomic_read(&dev_priv->marker_seq);
29043+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
29044 ret = 0;
29045
29046 for (;;) {
29047diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
29048index 8a8725c..afed796 100644
29049--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
29050+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
29051@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
29052 while (!vmw_lag_lt(queue, us)) {
29053 spin_lock(&queue->lock);
29054 if (list_empty(&queue->head))
29055- seqno = atomic_read(&dev_priv->marker_seq);
29056+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
29057 else {
29058 marker = list_first_entry(&queue->head,
29059 struct vmw_marker, head);
29060diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
29061index bb656d8..4169fca 100644
29062--- a/drivers/hid/hid-core.c
29063+++ b/drivers/hid/hid-core.c
29064@@ -2012,7 +2012,7 @@ static bool hid_ignore(struct hid_device *hdev)
29065
29066 int hid_add_device(struct hid_device *hdev)
29067 {
29068- static atomic_t id = ATOMIC_INIT(0);
29069+ static atomic_unchecked_t id = ATOMIC_INIT(0);
29070 int ret;
29071
29072 if (WARN_ON(hdev->status & HID_STAT_ADDED))
29073@@ -2027,7 +2027,7 @@ int hid_add_device(struct hid_device *hdev)
29074 /* XXX hack, any other cleaner solution after the driver core
29075 * is converted to allow more than 20 bytes as the device name? */
29076 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
29077- hdev->vendor, hdev->product, atomic_inc_return(&id));
29078+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
29079
29080 hid_debug_register(hdev, dev_name(&hdev->dev));
29081 ret = device_add(&hdev->dev);
29082diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
29083index 4ef02b2..8a96831 100644
29084--- a/drivers/hid/usbhid/hiddev.c
29085+++ b/drivers/hid/usbhid/hiddev.c
29086@@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
29087 break;
29088
29089 case HIDIOCAPPLICATION:
29090- if (arg < 0 || arg >= hid->maxapplication)
29091+ if (arg >= hid->maxapplication)
29092 break;
29093
29094 for (i = 0; i < hid->maxcollection; i++)
29095diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
29096index 4065374..10ed7dc 100644
29097--- a/drivers/hv/channel.c
29098+++ b/drivers/hv/channel.c
29099@@ -400,8 +400,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
29100 int ret = 0;
29101 int t;
29102
29103- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
29104- atomic_inc(&vmbus_connection.next_gpadl_handle);
29105+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
29106+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
29107
29108 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
29109 if (ret)
29110diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
29111index 0fb100e..baf87e5 100644
29112--- a/drivers/hv/hv.c
29113+++ b/drivers/hv/hv.c
29114@@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
29115 u64 output_address = (output) ? virt_to_phys(output) : 0;
29116 u32 output_address_hi = output_address >> 32;
29117 u32 output_address_lo = output_address & 0xFFFFFFFF;
29118- void *hypercall_page = hv_context.hypercall_page;
29119+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
29120
29121 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
29122 "=a"(hv_status_lo) : "d" (control_hi),
29123diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
29124index 0aee112..b72d21f 100644
29125--- a/drivers/hv/hyperv_vmbus.h
29126+++ b/drivers/hv/hyperv_vmbus.h
29127@@ -556,7 +556,7 @@ enum vmbus_connect_state {
29128 struct vmbus_connection {
29129 enum vmbus_connect_state conn_state;
29130
29131- atomic_t next_gpadl_handle;
29132+ atomic_unchecked_t next_gpadl_handle;
29133
29134 /*
29135 * Represents channel interrupts. Each bit position represents a
29136diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
29137index d2d0a2a..90b8f4d 100644
29138--- a/drivers/hv/vmbus_drv.c
29139+++ b/drivers/hv/vmbus_drv.c
29140@@ -663,10 +663,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
29141 {
29142 int ret = 0;
29143
29144- static atomic_t device_num = ATOMIC_INIT(0);
29145+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
29146
29147 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
29148- atomic_inc_return(&device_num));
29149+ atomic_inc_return_unchecked(&device_num));
29150
29151 child_device_obj->device.bus = &hv_bus;
29152 child_device_obj->device.parent = &hv_acpi_dev->dev;
29153diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
29154index 66f6729..2d6de0a 100644
29155--- a/drivers/hwmon/acpi_power_meter.c
29156+++ b/drivers/hwmon/acpi_power_meter.c
29157@@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
29158 return res;
29159
29160 temp /= 1000;
29161- if (temp < 0)
29162- return -EINVAL;
29163
29164 mutex_lock(&resource->lock);
29165 resource->trip[attr->index - 7] = temp;
29166diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
29167index fe4104c..346febb 100644
29168--- a/drivers/hwmon/sht15.c
29169+++ b/drivers/hwmon/sht15.c
29170@@ -166,7 +166,7 @@ struct sht15_data {
29171 int supply_uV;
29172 bool supply_uV_valid;
29173 struct work_struct update_supply_work;
29174- atomic_t interrupt_handled;
29175+ atomic_unchecked_t interrupt_handled;
29176 };
29177
29178 /**
29179@@ -509,13 +509,13 @@ static int sht15_measurement(struct sht15_data *data,
29180 return ret;
29181
29182 gpio_direction_input(data->pdata->gpio_data);
29183- atomic_set(&data->interrupt_handled, 0);
29184+ atomic_set_unchecked(&data->interrupt_handled, 0);
29185
29186 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29187 if (gpio_get_value(data->pdata->gpio_data) == 0) {
29188 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
29189 /* Only relevant if the interrupt hasn't occurred. */
29190- if (!atomic_read(&data->interrupt_handled))
29191+ if (!atomic_read_unchecked(&data->interrupt_handled))
29192 schedule_work(&data->read_work);
29193 }
29194 ret = wait_event_timeout(data->wait_queue,
29195@@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
29196
29197 /* First disable the interrupt */
29198 disable_irq_nosync(irq);
29199- atomic_inc(&data->interrupt_handled);
29200+ atomic_inc_unchecked(&data->interrupt_handled);
29201 /* Then schedule a reading work struct */
29202 if (data->state != SHT15_READING_NOTHING)
29203 schedule_work(&data->read_work);
29204@@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
29205 * If not, then start the interrupt again - care here as could
29206 * have gone low in meantime so verify it hasn't!
29207 */
29208- atomic_set(&data->interrupt_handled, 0);
29209+ atomic_set_unchecked(&data->interrupt_handled, 0);
29210 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29211 /* If still not occurred or another handler has been scheduled */
29212 if (gpio_get_value(data->pdata->gpio_data)
29213- || atomic_read(&data->interrupt_handled))
29214+ || atomic_read_unchecked(&data->interrupt_handled))
29215 return;
29216 }
29217
29218diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
29219index 378fcb5..5e91fa8 100644
29220--- a/drivers/i2c/busses/i2c-amd756-s4882.c
29221+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
29222@@ -43,7 +43,7 @@
29223 extern struct i2c_adapter amd756_smbus;
29224
29225 static struct i2c_adapter *s4882_adapter;
29226-static struct i2c_algorithm *s4882_algo;
29227+static i2c_algorithm_no_const *s4882_algo;
29228
29229 /* Wrapper access functions for multiplexed SMBus */
29230 static DEFINE_MUTEX(amd756_lock);
29231diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
29232index 29015eb..af2d8e9 100644
29233--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
29234+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
29235@@ -41,7 +41,7 @@
29236 extern struct i2c_adapter *nforce2_smbus;
29237
29238 static struct i2c_adapter *s4985_adapter;
29239-static struct i2c_algorithm *s4985_algo;
29240+static i2c_algorithm_no_const *s4985_algo;
29241
29242 /* Wrapper access functions for multiplexed SMBus */
29243 static DEFINE_MUTEX(nforce2_lock);
29244diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
29245index d7a4833..7fae376 100644
29246--- a/drivers/i2c/i2c-mux.c
29247+++ b/drivers/i2c/i2c-mux.c
29248@@ -28,7 +28,7 @@
29249 /* multiplexer per channel data */
29250 struct i2c_mux_priv {
29251 struct i2c_adapter adap;
29252- struct i2c_algorithm algo;
29253+ i2c_algorithm_no_const algo;
29254
29255 struct i2c_adapter *parent;
29256 void *mux_dev; /* the mux chip/device */
29257diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
29258index 57d00ca..0145194 100644
29259--- a/drivers/ide/aec62xx.c
29260+++ b/drivers/ide/aec62xx.c
29261@@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = {
29262 .cable_detect = atp86x_cable_detect,
29263 };
29264
29265-static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
29266+static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
29267 { /* 0: AEC6210 */
29268 .name = DRV_NAME,
29269 .init_chipset = init_chipset_aec62xx,
29270diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
29271index 2c8016a..911a27c 100644
29272--- a/drivers/ide/alim15x3.c
29273+++ b/drivers/ide/alim15x3.c
29274@@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_ops = {
29275 .dma_sff_read_status = ide_dma_sff_read_status,
29276 };
29277
29278-static const struct ide_port_info ali15x3_chipset __devinitdata = {
29279+static const struct ide_port_info ali15x3_chipset __devinitconst = {
29280 .name = DRV_NAME,
29281 .init_chipset = init_chipset_ali15x3,
29282 .init_hwif = init_hwif_ali15x3,
29283diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
29284index 3747b25..56fc995 100644
29285--- a/drivers/ide/amd74xx.c
29286+++ b/drivers/ide/amd74xx.c
29287@@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = {
29288 .udma_mask = udma, \
29289 }
29290
29291-static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
29292+static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
29293 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
29294 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
29295 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
29296diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
29297index 15f0ead..cb43480 100644
29298--- a/drivers/ide/atiixp.c
29299+++ b/drivers/ide/atiixp.c
29300@@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = {
29301 .cable_detect = atiixp_cable_detect,
29302 };
29303
29304-static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
29305+static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
29306 { /* 0: IXP200/300/400/700 */
29307 .name = DRV_NAME,
29308 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
29309diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
29310index 5f80312..d1fc438 100644
29311--- a/drivers/ide/cmd64x.c
29312+++ b/drivers/ide/cmd64x.c
29313@@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
29314 .dma_sff_read_status = ide_dma_sff_read_status,
29315 };
29316
29317-static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
29318+static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
29319 { /* 0: CMD643 */
29320 .name = DRV_NAME,
29321 .init_chipset = init_chipset_cmd64x,
29322diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
29323index 2c1e5f7..1444762 100644
29324--- a/drivers/ide/cs5520.c
29325+++ b/drivers/ide/cs5520.c
29326@@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = {
29327 .set_dma_mode = cs5520_set_dma_mode,
29328 };
29329
29330-static const struct ide_port_info cyrix_chipset __devinitdata = {
29331+static const struct ide_port_info cyrix_chipset __devinitconst = {
29332 .name = DRV_NAME,
29333 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
29334 .port_ops = &cs5520_port_ops,
29335diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
29336index 4dc4eb9..49b40ad 100644
29337--- a/drivers/ide/cs5530.c
29338+++ b/drivers/ide/cs5530.c
29339@@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = {
29340 .udma_filter = cs5530_udma_filter,
29341 };
29342
29343-static const struct ide_port_info cs5530_chipset __devinitdata = {
29344+static const struct ide_port_info cs5530_chipset __devinitconst = {
29345 .name = DRV_NAME,
29346 .init_chipset = init_chipset_cs5530,
29347 .init_hwif = init_hwif_cs5530,
29348diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
29349index 5059faf..18d4c85 100644
29350--- a/drivers/ide/cs5535.c
29351+++ b/drivers/ide/cs5535.c
29352@@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
29353 .cable_detect = cs5535_cable_detect,
29354 };
29355
29356-static const struct ide_port_info cs5535_chipset __devinitdata = {
29357+static const struct ide_port_info cs5535_chipset __devinitconst = {
29358 .name = DRV_NAME,
29359 .port_ops = &cs5535_port_ops,
29360 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
29361diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
29362index 847553f..3ffb49d 100644
29363--- a/drivers/ide/cy82c693.c
29364+++ b/drivers/ide/cy82c693.c
29365@@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
29366 .set_dma_mode = cy82c693_set_dma_mode,
29367 };
29368
29369-static const struct ide_port_info cy82c693_chipset __devinitdata = {
29370+static const struct ide_port_info cy82c693_chipset __devinitconst = {
29371 .name = DRV_NAME,
29372 .init_iops = init_iops_cy82c693,
29373 .port_ops = &cy82c693_port_ops,
29374diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
29375index 58c51cd..4aec3b8 100644
29376--- a/drivers/ide/hpt366.c
29377+++ b/drivers/ide/hpt366.c
29378@@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = {
29379 }
29380 };
29381
29382-static const struct hpt_info hpt36x __devinitdata = {
29383+static const struct hpt_info hpt36x __devinitconst = {
29384 .chip_name = "HPT36x",
29385 .chip_type = HPT36x,
29386 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
29387@@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitdata = {
29388 .timings = &hpt36x_timings
29389 };
29390
29391-static const struct hpt_info hpt370 __devinitdata = {
29392+static const struct hpt_info hpt370 __devinitconst = {
29393 .chip_name = "HPT370",
29394 .chip_type = HPT370,
29395 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
29396@@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitdata = {
29397 .timings = &hpt37x_timings
29398 };
29399
29400-static const struct hpt_info hpt370a __devinitdata = {
29401+static const struct hpt_info hpt370a __devinitconst = {
29402 .chip_name = "HPT370A",
29403 .chip_type = HPT370A,
29404 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
29405@@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitdata = {
29406 .timings = &hpt37x_timings
29407 };
29408
29409-static const struct hpt_info hpt374 __devinitdata = {
29410+static const struct hpt_info hpt374 __devinitconst = {
29411 .chip_name = "HPT374",
29412 .chip_type = HPT374,
29413 .udma_mask = ATA_UDMA5,
29414@@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitdata = {
29415 .timings = &hpt37x_timings
29416 };
29417
29418-static const struct hpt_info hpt372 __devinitdata = {
29419+static const struct hpt_info hpt372 __devinitconst = {
29420 .chip_name = "HPT372",
29421 .chip_type = HPT372,
29422 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29423@@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitdata = {
29424 .timings = &hpt37x_timings
29425 };
29426
29427-static const struct hpt_info hpt372a __devinitdata = {
29428+static const struct hpt_info hpt372a __devinitconst = {
29429 .chip_name = "HPT372A",
29430 .chip_type = HPT372A,
29431 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29432@@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitdata = {
29433 .timings = &hpt37x_timings
29434 };
29435
29436-static const struct hpt_info hpt302 __devinitdata = {
29437+static const struct hpt_info hpt302 __devinitconst = {
29438 .chip_name = "HPT302",
29439 .chip_type = HPT302,
29440 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29441@@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitdata = {
29442 .timings = &hpt37x_timings
29443 };
29444
29445-static const struct hpt_info hpt371 __devinitdata = {
29446+static const struct hpt_info hpt371 __devinitconst = {
29447 .chip_name = "HPT371",
29448 .chip_type = HPT371,
29449 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29450@@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitdata = {
29451 .timings = &hpt37x_timings
29452 };
29453
29454-static const struct hpt_info hpt372n __devinitdata = {
29455+static const struct hpt_info hpt372n __devinitconst = {
29456 .chip_name = "HPT372N",
29457 .chip_type = HPT372N,
29458 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29459@@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitdata = {
29460 .timings = &hpt37x_timings
29461 };
29462
29463-static const struct hpt_info hpt302n __devinitdata = {
29464+static const struct hpt_info hpt302n __devinitconst = {
29465 .chip_name = "HPT302N",
29466 .chip_type = HPT302N,
29467 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29468@@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitdata = {
29469 .timings = &hpt37x_timings
29470 };
29471
29472-static const struct hpt_info hpt371n __devinitdata = {
29473+static const struct hpt_info hpt371n __devinitconst = {
29474 .chip_name = "HPT371N",
29475 .chip_type = HPT371N,
29476 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29477@@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
29478 .dma_sff_read_status = ide_dma_sff_read_status,
29479 };
29480
29481-static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
29482+static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
29483 { /* 0: HPT36x */
29484 .name = DRV_NAME,
29485 .init_chipset = init_chipset_hpt366,
29486diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
29487index 8126824..55a2798 100644
29488--- a/drivers/ide/ide-cd.c
29489+++ b/drivers/ide/ide-cd.c
29490@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
29491 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
29492 if ((unsigned long)buf & alignment
29493 || blk_rq_bytes(rq) & q->dma_pad_mask
29494- || object_is_on_stack(buf))
29495+ || object_starts_on_stack(buf))
29496 drive->dma = 0;
29497 }
29498 }
29499diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
29500index a743e68..1cfd674 100644
29501--- a/drivers/ide/ide-pci-generic.c
29502+++ b/drivers/ide/ide-pci-generic.c
29503@@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
29504 .udma_mask = ATA_UDMA6, \
29505 }
29506
29507-static const struct ide_port_info generic_chipsets[] __devinitdata = {
29508+static const struct ide_port_info generic_chipsets[] __devinitconst = {
29509 /* 0: Unknown */
29510 DECLARE_GENERIC_PCI_DEV(0),
29511
29512diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
29513index 560e66d..d5dd180 100644
29514--- a/drivers/ide/it8172.c
29515+++ b/drivers/ide/it8172.c
29516@@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
29517 .set_dma_mode = it8172_set_dma_mode,
29518 };
29519
29520-static const struct ide_port_info it8172_port_info __devinitdata = {
29521+static const struct ide_port_info it8172_port_info __devinitconst = {
29522 .name = DRV_NAME,
29523 .port_ops = &it8172_port_ops,
29524 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
29525diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
29526index 46816ba..1847aeb 100644
29527--- a/drivers/ide/it8213.c
29528+++ b/drivers/ide/it8213.c
29529@@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
29530 .cable_detect = it8213_cable_detect,
29531 };
29532
29533-static const struct ide_port_info it8213_chipset __devinitdata = {
29534+static const struct ide_port_info it8213_chipset __devinitconst = {
29535 .name = DRV_NAME,
29536 .enablebits = { {0x41, 0x80, 0x80} },
29537 .port_ops = &it8213_port_ops,
29538diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
29539index 2e3169f..c5611db 100644
29540--- a/drivers/ide/it821x.c
29541+++ b/drivers/ide/it821x.c
29542@@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = {
29543 .cable_detect = it821x_cable_detect,
29544 };
29545
29546-static const struct ide_port_info it821x_chipset __devinitdata = {
29547+static const struct ide_port_info it821x_chipset __devinitconst = {
29548 .name = DRV_NAME,
29549 .init_chipset = init_chipset_it821x,
29550 .init_hwif = init_hwif_it821x,
29551diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
29552index 74c2c4a..efddd7d 100644
29553--- a/drivers/ide/jmicron.c
29554+++ b/drivers/ide/jmicron.c
29555@@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
29556 .cable_detect = jmicron_cable_detect,
29557 };
29558
29559-static const struct ide_port_info jmicron_chipset __devinitdata = {
29560+static const struct ide_port_info jmicron_chipset __devinitconst = {
29561 .name = DRV_NAME,
29562 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
29563 .port_ops = &jmicron_port_ops,
29564diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
29565index 95327a2..73f78d8 100644
29566--- a/drivers/ide/ns87415.c
29567+++ b/drivers/ide/ns87415.c
29568@@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
29569 .dma_sff_read_status = superio_dma_sff_read_status,
29570 };
29571
29572-static const struct ide_port_info ns87415_chipset __devinitdata = {
29573+static const struct ide_port_info ns87415_chipset __devinitconst = {
29574 .name = DRV_NAME,
29575 .init_hwif = init_hwif_ns87415,
29576 .tp_ops = &ns87415_tp_ops,
29577diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
29578index 1a53a4c..39edc66 100644
29579--- a/drivers/ide/opti621.c
29580+++ b/drivers/ide/opti621.c
29581@@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = {
29582 .set_pio_mode = opti621_set_pio_mode,
29583 };
29584
29585-static const struct ide_port_info opti621_chipset __devinitdata = {
29586+static const struct ide_port_info opti621_chipset __devinitconst = {
29587 .name = DRV_NAME,
29588 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
29589 .port_ops = &opti621_port_ops,
29590diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
29591index 9546fe2..2e5ceb6 100644
29592--- a/drivers/ide/pdc202xx_new.c
29593+++ b/drivers/ide/pdc202xx_new.c
29594@@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
29595 .udma_mask = udma, \
29596 }
29597
29598-static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
29599+static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
29600 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
29601 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
29602 };
29603diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
29604index 3a35ec6..5634510 100644
29605--- a/drivers/ide/pdc202xx_old.c
29606+++ b/drivers/ide/pdc202xx_old.c
29607@@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
29608 .max_sectors = sectors, \
29609 }
29610
29611-static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
29612+static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
29613 { /* 0: PDC20246 */
29614 .name = DRV_NAME,
29615 .init_chipset = init_chipset_pdc202xx,
29616diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
29617index 1892e81..fe0fd60 100644
29618--- a/drivers/ide/piix.c
29619+++ b/drivers/ide/piix.c
29620@@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
29621 .udma_mask = udma, \
29622 }
29623
29624-static const struct ide_port_info piix_pci_info[] __devinitdata = {
29625+static const struct ide_port_info piix_pci_info[] __devinitconst = {
29626 /* 0: MPIIX */
29627 { /*
29628 * MPIIX actually has only a single IDE channel mapped to
29629diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
29630index a6414a8..c04173e 100644
29631--- a/drivers/ide/rz1000.c
29632+++ b/drivers/ide/rz1000.c
29633@@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
29634 }
29635 }
29636
29637-static const struct ide_port_info rz1000_chipset __devinitdata = {
29638+static const struct ide_port_info rz1000_chipset __devinitconst = {
29639 .name = DRV_NAME,
29640 .host_flags = IDE_HFLAG_NO_DMA,
29641 };
29642diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
29643index 356b9b5..d4758eb 100644
29644--- a/drivers/ide/sc1200.c
29645+++ b/drivers/ide/sc1200.c
29646@@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
29647 .dma_sff_read_status = ide_dma_sff_read_status,
29648 };
29649
29650-static const struct ide_port_info sc1200_chipset __devinitdata = {
29651+static const struct ide_port_info sc1200_chipset __devinitconst = {
29652 .name = DRV_NAME,
29653 .port_ops = &sc1200_port_ops,
29654 .dma_ops = &sc1200_dma_ops,
29655diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
29656index b7f5b0c..9701038 100644
29657--- a/drivers/ide/scc_pata.c
29658+++ b/drivers/ide/scc_pata.c
29659@@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
29660 .dma_sff_read_status = scc_dma_sff_read_status,
29661 };
29662
29663-static const struct ide_port_info scc_chipset __devinitdata = {
29664+static const struct ide_port_info scc_chipset __devinitconst = {
29665 .name = "sccIDE",
29666 .init_iops = init_iops_scc,
29667 .init_dma = scc_init_dma,
29668diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
29669index 35fb8da..24d72ef 100644
29670--- a/drivers/ide/serverworks.c
29671+++ b/drivers/ide/serverworks.c
29672@@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = {
29673 .cable_detect = svwks_cable_detect,
29674 };
29675
29676-static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
29677+static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
29678 { /* 0: OSB4 */
29679 .name = DRV_NAME,
29680 .init_chipset = init_chipset_svwks,
29681diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
29682index ddeda44..46f7e30 100644
29683--- a/drivers/ide/siimage.c
29684+++ b/drivers/ide/siimage.c
29685@@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
29686 .udma_mask = ATA_UDMA6, \
29687 }
29688
29689-static const struct ide_port_info siimage_chipsets[] __devinitdata = {
29690+static const struct ide_port_info siimage_chipsets[] __devinitconst = {
29691 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
29692 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
29693 };
29694diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
29695index 4a00225..09e61b4 100644
29696--- a/drivers/ide/sis5513.c
29697+++ b/drivers/ide/sis5513.c
29698@@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
29699 .cable_detect = sis_cable_detect,
29700 };
29701
29702-static const struct ide_port_info sis5513_chipset __devinitdata = {
29703+static const struct ide_port_info sis5513_chipset __devinitconst = {
29704 .name = DRV_NAME,
29705 .init_chipset = init_chipset_sis5513,
29706 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
29707diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
29708index f21dc2a..d051cd2 100644
29709--- a/drivers/ide/sl82c105.c
29710+++ b/drivers/ide/sl82c105.c
29711@@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
29712 .dma_sff_read_status = ide_dma_sff_read_status,
29713 };
29714
29715-static const struct ide_port_info sl82c105_chipset __devinitdata = {
29716+static const struct ide_port_info sl82c105_chipset __devinitconst = {
29717 .name = DRV_NAME,
29718 .init_chipset = init_chipset_sl82c105,
29719 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
29720diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
29721index 864ffe0..863a5e9 100644
29722--- a/drivers/ide/slc90e66.c
29723+++ b/drivers/ide/slc90e66.c
29724@@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
29725 .cable_detect = slc90e66_cable_detect,
29726 };
29727
29728-static const struct ide_port_info slc90e66_chipset __devinitdata = {
29729+static const struct ide_port_info slc90e66_chipset __devinitconst = {
29730 .name = DRV_NAME,
29731 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
29732 .port_ops = &slc90e66_port_ops,
29733diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
29734index 4799d5c..1794678 100644
29735--- a/drivers/ide/tc86c001.c
29736+++ b/drivers/ide/tc86c001.c
29737@@ -192,7 +192,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
29738 .dma_sff_read_status = ide_dma_sff_read_status,
29739 };
29740
29741-static const struct ide_port_info tc86c001_chipset __devinitdata = {
29742+static const struct ide_port_info tc86c001_chipset __devinitconst = {
29743 .name = DRV_NAME,
29744 .init_hwif = init_hwif_tc86c001,
29745 .port_ops = &tc86c001_port_ops,
29746diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
29747index 281c914..55ce1b8 100644
29748--- a/drivers/ide/triflex.c
29749+++ b/drivers/ide/triflex.c
29750@@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
29751 .set_dma_mode = triflex_set_mode,
29752 };
29753
29754-static const struct ide_port_info triflex_device __devinitdata = {
29755+static const struct ide_port_info triflex_device __devinitconst = {
29756 .name = DRV_NAME,
29757 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
29758 .port_ops = &triflex_port_ops,
29759diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
29760index 4b42ca0..e494a98 100644
29761--- a/drivers/ide/trm290.c
29762+++ b/drivers/ide/trm290.c
29763@@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
29764 .dma_check = trm290_dma_check,
29765 };
29766
29767-static const struct ide_port_info trm290_chipset __devinitdata = {
29768+static const struct ide_port_info trm290_chipset __devinitconst = {
29769 .name = DRV_NAME,
29770 .init_hwif = init_hwif_trm290,
29771 .tp_ops = &trm290_tp_ops,
29772diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
29773index f46f49c..eb77678 100644
29774--- a/drivers/ide/via82cxxx.c
29775+++ b/drivers/ide/via82cxxx.c
29776@@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = {
29777 .cable_detect = via82cxxx_cable_detect,
29778 };
29779
29780-static const struct ide_port_info via82cxxx_chipset __devinitdata = {
29781+static const struct ide_port_info via82cxxx_chipset __devinitconst = {
29782 .name = DRV_NAME,
29783 .init_chipset = init_chipset_via82cxxx,
29784 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
29785diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
29786index eb0e2cc..14241c7 100644
29787--- a/drivers/ieee802154/fakehard.c
29788+++ b/drivers/ieee802154/fakehard.c
29789@@ -386,7 +386,7 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
29790 phy->transmit_power = 0xbf;
29791
29792 dev->netdev_ops = &fake_ops;
29793- dev->ml_priv = &fake_mlme;
29794+ dev->ml_priv = (void *)&fake_mlme;
29795
29796 priv = netdev_priv(dev);
29797 priv->phy = phy;
29798diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
29799index 8b72f39..55df4c8 100644
29800--- a/drivers/infiniband/core/cm.c
29801+++ b/drivers/infiniband/core/cm.c
29802@@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
29803
29804 struct cm_counter_group {
29805 struct kobject obj;
29806- atomic_long_t counter[CM_ATTR_COUNT];
29807+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
29808 };
29809
29810 struct cm_counter_attribute {
29811@@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
29812 struct ib_mad_send_buf *msg = NULL;
29813 int ret;
29814
29815- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29816+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29817 counter[CM_REQ_COUNTER]);
29818
29819 /* Quick state check to discard duplicate REQs. */
29820@@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
29821 if (!cm_id_priv)
29822 return;
29823
29824- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29825+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29826 counter[CM_REP_COUNTER]);
29827 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
29828 if (ret)
29829@@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
29830 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
29831 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
29832 spin_unlock_irq(&cm_id_priv->lock);
29833- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29834+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29835 counter[CM_RTU_COUNTER]);
29836 goto out;
29837 }
29838@@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
29839 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
29840 dreq_msg->local_comm_id);
29841 if (!cm_id_priv) {
29842- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29843+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29844 counter[CM_DREQ_COUNTER]);
29845 cm_issue_drep(work->port, work->mad_recv_wc);
29846 return -EINVAL;
29847@@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
29848 case IB_CM_MRA_REP_RCVD:
29849 break;
29850 case IB_CM_TIMEWAIT:
29851- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29852+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29853 counter[CM_DREQ_COUNTER]);
29854 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
29855 goto unlock;
29856@@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
29857 cm_free_msg(msg);
29858 goto deref;
29859 case IB_CM_DREQ_RCVD:
29860- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29861+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29862 counter[CM_DREQ_COUNTER]);
29863 goto unlock;
29864 default:
29865@@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
29866 ib_modify_mad(cm_id_priv->av.port->mad_agent,
29867 cm_id_priv->msg, timeout)) {
29868 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
29869- atomic_long_inc(&work->port->
29870+ atomic_long_inc_unchecked(&work->port->
29871 counter_group[CM_RECV_DUPLICATES].
29872 counter[CM_MRA_COUNTER]);
29873 goto out;
29874@@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
29875 break;
29876 case IB_CM_MRA_REQ_RCVD:
29877 case IB_CM_MRA_REP_RCVD:
29878- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29879+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29880 counter[CM_MRA_COUNTER]);
29881 /* fall through */
29882 default:
29883@@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
29884 case IB_CM_LAP_IDLE:
29885 break;
29886 case IB_CM_MRA_LAP_SENT:
29887- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29888+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29889 counter[CM_LAP_COUNTER]);
29890 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
29891 goto unlock;
29892@@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
29893 cm_free_msg(msg);
29894 goto deref;
29895 case IB_CM_LAP_RCVD:
29896- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29897+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29898 counter[CM_LAP_COUNTER]);
29899 goto unlock;
29900 default:
29901@@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
29902 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
29903 if (cur_cm_id_priv) {
29904 spin_unlock_irq(&cm.lock);
29905- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29906+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29907 counter[CM_SIDR_REQ_COUNTER]);
29908 goto out; /* Duplicate message. */
29909 }
29910@@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
29911 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
29912 msg->retries = 1;
29913
29914- atomic_long_add(1 + msg->retries,
29915+ atomic_long_add_unchecked(1 + msg->retries,
29916 &port->counter_group[CM_XMIT].counter[attr_index]);
29917 if (msg->retries)
29918- atomic_long_add(msg->retries,
29919+ atomic_long_add_unchecked(msg->retries,
29920 &port->counter_group[CM_XMIT_RETRIES].
29921 counter[attr_index]);
29922
29923@@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
29924 }
29925
29926 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
29927- atomic_long_inc(&port->counter_group[CM_RECV].
29928+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
29929 counter[attr_id - CM_ATTR_ID_OFFSET]);
29930
29931 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
29932@@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
29933 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
29934
29935 return sprintf(buf, "%ld\n",
29936- atomic_long_read(&group->counter[cm_attr->index]));
29937+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
29938 }
29939
29940 static const struct sysfs_ops cm_counter_ops = {
29941diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
29942index 176c8f9..2627b62 100644
29943--- a/drivers/infiniband/core/fmr_pool.c
29944+++ b/drivers/infiniband/core/fmr_pool.c
29945@@ -98,8 +98,8 @@ struct ib_fmr_pool {
29946
29947 struct task_struct *thread;
29948
29949- atomic_t req_ser;
29950- atomic_t flush_ser;
29951+ atomic_unchecked_t req_ser;
29952+ atomic_unchecked_t flush_ser;
29953
29954 wait_queue_head_t force_wait;
29955 };
29956@@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
29957 struct ib_fmr_pool *pool = pool_ptr;
29958
29959 do {
29960- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
29961+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
29962 ib_fmr_batch_release(pool);
29963
29964- atomic_inc(&pool->flush_ser);
29965+ atomic_inc_unchecked(&pool->flush_ser);
29966 wake_up_interruptible(&pool->force_wait);
29967
29968 if (pool->flush_function)
29969@@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
29970 }
29971
29972 set_current_state(TASK_INTERRUPTIBLE);
29973- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
29974+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
29975 !kthread_should_stop())
29976 schedule();
29977 __set_current_state(TASK_RUNNING);
29978@@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
29979 pool->dirty_watermark = params->dirty_watermark;
29980 pool->dirty_len = 0;
29981 spin_lock_init(&pool->pool_lock);
29982- atomic_set(&pool->req_ser, 0);
29983- atomic_set(&pool->flush_ser, 0);
29984+ atomic_set_unchecked(&pool->req_ser, 0);
29985+ atomic_set_unchecked(&pool->flush_ser, 0);
29986 init_waitqueue_head(&pool->force_wait);
29987
29988 pool->thread = kthread_run(ib_fmr_cleanup_thread,
29989@@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
29990 }
29991 spin_unlock_irq(&pool->pool_lock);
29992
29993- serial = atomic_inc_return(&pool->req_ser);
29994+ serial = atomic_inc_return_unchecked(&pool->req_ser);
29995 wake_up_process(pool->thread);
29996
29997 if (wait_event_interruptible(pool->force_wait,
29998- atomic_read(&pool->flush_ser) - serial >= 0))
29999+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
30000 return -EINTR;
30001
30002 return 0;
30003@@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
30004 } else {
30005 list_add_tail(&fmr->list, &pool->dirty_list);
30006 if (++pool->dirty_len >= pool->dirty_watermark) {
30007- atomic_inc(&pool->req_ser);
30008+ atomic_inc_unchecked(&pool->req_ser);
30009 wake_up_process(pool->thread);
30010 }
30011 }
30012diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
30013index 40c8353..946b0e4 100644
30014--- a/drivers/infiniband/hw/cxgb4/mem.c
30015+++ b/drivers/infiniband/hw/cxgb4/mem.c
30016@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
30017 int err;
30018 struct fw_ri_tpte tpt;
30019 u32 stag_idx;
30020- static atomic_t key;
30021+ static atomic_unchecked_t key;
30022
30023 if (c4iw_fatal_error(rdev))
30024 return -EIO;
30025@@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
30026 &rdev->resource.tpt_fifo_lock);
30027 if (!stag_idx)
30028 return -ENOMEM;
30029- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
30030+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
30031 }
30032 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
30033 __func__, stag_state, type, pdid, stag_idx);
30034diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
30035index 79b3dbc..96e5fcc 100644
30036--- a/drivers/infiniband/hw/ipath/ipath_rc.c
30037+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
30038@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
30039 struct ib_atomic_eth *ateth;
30040 struct ipath_ack_entry *e;
30041 u64 vaddr;
30042- atomic64_t *maddr;
30043+ atomic64_unchecked_t *maddr;
30044 u64 sdata;
30045 u32 rkey;
30046 u8 next;
30047@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
30048 IB_ACCESS_REMOTE_ATOMIC)))
30049 goto nack_acc_unlck;
30050 /* Perform atomic OP and save result. */
30051- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
30052+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
30053 sdata = be64_to_cpu(ateth->swap_data);
30054 e = &qp->s_ack_queue[qp->r_head_ack_queue];
30055 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
30056- (u64) atomic64_add_return(sdata, maddr) - sdata :
30057+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
30058 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
30059 be64_to_cpu(ateth->compare_data),
30060 sdata);
30061diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
30062index 1f95bba..9530f87 100644
30063--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
30064+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
30065@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
30066 unsigned long flags;
30067 struct ib_wc wc;
30068 u64 sdata;
30069- atomic64_t *maddr;
30070+ atomic64_unchecked_t *maddr;
30071 enum ib_wc_status send_status;
30072
30073 /*
30074@@ -382,11 +382,11 @@ again:
30075 IB_ACCESS_REMOTE_ATOMIC)))
30076 goto acc_err;
30077 /* Perform atomic OP and save result. */
30078- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
30079+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
30080 sdata = wqe->wr.wr.atomic.compare_add;
30081 *(u64 *) sqp->s_sge.sge.vaddr =
30082 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
30083- (u64) atomic64_add_return(sdata, maddr) - sdata :
30084+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
30085 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
30086 sdata, wqe->wr.wr.atomic.swap);
30087 goto send_comp;
30088diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
30089index 5965b3d..16817fb 100644
30090--- a/drivers/infiniband/hw/nes/nes.c
30091+++ b/drivers/infiniband/hw/nes/nes.c
30092@@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
30093 LIST_HEAD(nes_adapter_list);
30094 static LIST_HEAD(nes_dev_list);
30095
30096-atomic_t qps_destroyed;
30097+atomic_unchecked_t qps_destroyed;
30098
30099 static unsigned int ee_flsh_adapter;
30100 static unsigned int sysfs_nonidx_addr;
30101@@ -272,7 +272,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
30102 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
30103 struct nes_adapter *nesadapter = nesdev->nesadapter;
30104
30105- atomic_inc(&qps_destroyed);
30106+ atomic_inc_unchecked(&qps_destroyed);
30107
30108 /* Free the control structures */
30109
30110diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
30111index 568b4f1..5ea3eff 100644
30112--- a/drivers/infiniband/hw/nes/nes.h
30113+++ b/drivers/infiniband/hw/nes/nes.h
30114@@ -178,17 +178,17 @@ extern unsigned int nes_debug_level;
30115 extern unsigned int wqm_quanta;
30116 extern struct list_head nes_adapter_list;
30117
30118-extern atomic_t cm_connects;
30119-extern atomic_t cm_accepts;
30120-extern atomic_t cm_disconnects;
30121-extern atomic_t cm_closes;
30122-extern atomic_t cm_connecteds;
30123-extern atomic_t cm_connect_reqs;
30124-extern atomic_t cm_rejects;
30125-extern atomic_t mod_qp_timouts;
30126-extern atomic_t qps_created;
30127-extern atomic_t qps_destroyed;
30128-extern atomic_t sw_qps_destroyed;
30129+extern atomic_unchecked_t cm_connects;
30130+extern atomic_unchecked_t cm_accepts;
30131+extern atomic_unchecked_t cm_disconnects;
30132+extern atomic_unchecked_t cm_closes;
30133+extern atomic_unchecked_t cm_connecteds;
30134+extern atomic_unchecked_t cm_connect_reqs;
30135+extern atomic_unchecked_t cm_rejects;
30136+extern atomic_unchecked_t mod_qp_timouts;
30137+extern atomic_unchecked_t qps_created;
30138+extern atomic_unchecked_t qps_destroyed;
30139+extern atomic_unchecked_t sw_qps_destroyed;
30140 extern u32 mh_detected;
30141 extern u32 mh_pauses_sent;
30142 extern u32 cm_packets_sent;
30143@@ -197,16 +197,16 @@ extern u32 cm_packets_created;
30144 extern u32 cm_packets_received;
30145 extern u32 cm_packets_dropped;
30146 extern u32 cm_packets_retrans;
30147-extern atomic_t cm_listens_created;
30148-extern atomic_t cm_listens_destroyed;
30149+extern atomic_unchecked_t cm_listens_created;
30150+extern atomic_unchecked_t cm_listens_destroyed;
30151 extern u32 cm_backlog_drops;
30152-extern atomic_t cm_loopbacks;
30153-extern atomic_t cm_nodes_created;
30154-extern atomic_t cm_nodes_destroyed;
30155-extern atomic_t cm_accel_dropped_pkts;
30156-extern atomic_t cm_resets_recvd;
30157-extern atomic_t pau_qps_created;
30158-extern atomic_t pau_qps_destroyed;
30159+extern atomic_unchecked_t cm_loopbacks;
30160+extern atomic_unchecked_t cm_nodes_created;
30161+extern atomic_unchecked_t cm_nodes_destroyed;
30162+extern atomic_unchecked_t cm_accel_dropped_pkts;
30163+extern atomic_unchecked_t cm_resets_recvd;
30164+extern atomic_unchecked_t pau_qps_created;
30165+extern atomic_unchecked_t pau_qps_destroyed;
30166
30167 extern u32 int_mod_timer_init;
30168 extern u32 int_mod_cq_depth_256;
30169diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
30170index 0a52d72..0642f36 100644
30171--- a/drivers/infiniband/hw/nes/nes_cm.c
30172+++ b/drivers/infiniband/hw/nes/nes_cm.c
30173@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
30174 u32 cm_packets_retrans;
30175 u32 cm_packets_created;
30176 u32 cm_packets_received;
30177-atomic_t cm_listens_created;
30178-atomic_t cm_listens_destroyed;
30179+atomic_unchecked_t cm_listens_created;
30180+atomic_unchecked_t cm_listens_destroyed;
30181 u32 cm_backlog_drops;
30182-atomic_t cm_loopbacks;
30183-atomic_t cm_nodes_created;
30184-atomic_t cm_nodes_destroyed;
30185-atomic_t cm_accel_dropped_pkts;
30186-atomic_t cm_resets_recvd;
30187+atomic_unchecked_t cm_loopbacks;
30188+atomic_unchecked_t cm_nodes_created;
30189+atomic_unchecked_t cm_nodes_destroyed;
30190+atomic_unchecked_t cm_accel_dropped_pkts;
30191+atomic_unchecked_t cm_resets_recvd;
30192
30193 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
30194 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
30195@@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
30196
30197 static struct nes_cm_core *g_cm_core;
30198
30199-atomic_t cm_connects;
30200-atomic_t cm_accepts;
30201-atomic_t cm_disconnects;
30202-atomic_t cm_closes;
30203-atomic_t cm_connecteds;
30204-atomic_t cm_connect_reqs;
30205-atomic_t cm_rejects;
30206+atomic_unchecked_t cm_connects;
30207+atomic_unchecked_t cm_accepts;
30208+atomic_unchecked_t cm_disconnects;
30209+atomic_unchecked_t cm_closes;
30210+atomic_unchecked_t cm_connecteds;
30211+atomic_unchecked_t cm_connect_reqs;
30212+atomic_unchecked_t cm_rejects;
30213
30214 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
30215 {
30216@@ -1271,7 +1271,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
30217 kfree(listener);
30218 listener = NULL;
30219 ret = 0;
30220- atomic_inc(&cm_listens_destroyed);
30221+ atomic_inc_unchecked(&cm_listens_destroyed);
30222 } else {
30223 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
30224 }
30225@@ -1473,7 +1473,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
30226 cm_node->rem_mac);
30227
30228 add_hte_node(cm_core, cm_node);
30229- atomic_inc(&cm_nodes_created);
30230+ atomic_inc_unchecked(&cm_nodes_created);
30231
30232 return cm_node;
30233 }
30234@@ -1531,7 +1531,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
30235 }
30236
30237 atomic_dec(&cm_core->node_cnt);
30238- atomic_inc(&cm_nodes_destroyed);
30239+ atomic_inc_unchecked(&cm_nodes_destroyed);
30240 nesqp = cm_node->nesqp;
30241 if (nesqp) {
30242 nesqp->cm_node = NULL;
30243@@ -1595,7 +1595,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
30244
30245 static void drop_packet(struct sk_buff *skb)
30246 {
30247- atomic_inc(&cm_accel_dropped_pkts);
30248+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
30249 dev_kfree_skb_any(skb);
30250 }
30251
30252@@ -1658,7 +1658,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
30253 {
30254
30255 int reset = 0; /* whether to send reset in case of err.. */
30256- atomic_inc(&cm_resets_recvd);
30257+ atomic_inc_unchecked(&cm_resets_recvd);
30258 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
30259 " refcnt=%d\n", cm_node, cm_node->state,
30260 atomic_read(&cm_node->ref_count));
30261@@ -2299,7 +2299,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
30262 rem_ref_cm_node(cm_node->cm_core, cm_node);
30263 return NULL;
30264 }
30265- atomic_inc(&cm_loopbacks);
30266+ atomic_inc_unchecked(&cm_loopbacks);
30267 loopbackremotenode->loopbackpartner = cm_node;
30268 loopbackremotenode->tcp_cntxt.rcv_wscale =
30269 NES_CM_DEFAULT_RCV_WND_SCALE;
30270@@ -2574,7 +2574,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
30271 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
30272 else {
30273 rem_ref_cm_node(cm_core, cm_node);
30274- atomic_inc(&cm_accel_dropped_pkts);
30275+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
30276 dev_kfree_skb_any(skb);
30277 }
30278 break;
30279@@ -2880,7 +2880,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
30280
30281 if ((cm_id) && (cm_id->event_handler)) {
30282 if (issue_disconn) {
30283- atomic_inc(&cm_disconnects);
30284+ atomic_inc_unchecked(&cm_disconnects);
30285 cm_event.event = IW_CM_EVENT_DISCONNECT;
30286 cm_event.status = disconn_status;
30287 cm_event.local_addr = cm_id->local_addr;
30288@@ -2902,7 +2902,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
30289 }
30290
30291 if (issue_close) {
30292- atomic_inc(&cm_closes);
30293+ atomic_inc_unchecked(&cm_closes);
30294 nes_disconnect(nesqp, 1);
30295
30296 cm_id->provider_data = nesqp;
30297@@ -3038,7 +3038,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
30298
30299 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
30300 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
30301- atomic_inc(&cm_accepts);
30302+ atomic_inc_unchecked(&cm_accepts);
30303
30304 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
30305 netdev_refcnt_read(nesvnic->netdev));
30306@@ -3240,7 +3240,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
30307 struct nes_cm_core *cm_core;
30308 u8 *start_buff;
30309
30310- atomic_inc(&cm_rejects);
30311+ atomic_inc_unchecked(&cm_rejects);
30312 cm_node = (struct nes_cm_node *)cm_id->provider_data;
30313 loopback = cm_node->loopbackpartner;
30314 cm_core = cm_node->cm_core;
30315@@ -3300,7 +3300,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
30316 ntohl(cm_id->local_addr.sin_addr.s_addr),
30317 ntohs(cm_id->local_addr.sin_port));
30318
30319- atomic_inc(&cm_connects);
30320+ atomic_inc_unchecked(&cm_connects);
30321 nesqp->active_conn = 1;
30322
30323 /* cache the cm_id in the qp */
30324@@ -3406,7 +3406,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
30325 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
30326 return err;
30327 }
30328- atomic_inc(&cm_listens_created);
30329+ atomic_inc_unchecked(&cm_listens_created);
30330 }
30331
30332 cm_id->add_ref(cm_id);
30333@@ -3507,7 +3507,7 @@ static void cm_event_connected(struct nes_cm_event *event)
30334
30335 if (nesqp->destroyed)
30336 return;
30337- atomic_inc(&cm_connecteds);
30338+ atomic_inc_unchecked(&cm_connecteds);
30339 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
30340 " local port 0x%04X. jiffies = %lu.\n",
30341 nesqp->hwqp.qp_id,
30342@@ -3694,7 +3694,7 @@ static void cm_event_reset(struct nes_cm_event *event)
30343
30344 cm_id->add_ref(cm_id);
30345 ret = cm_id->event_handler(cm_id, &cm_event);
30346- atomic_inc(&cm_closes);
30347+ atomic_inc_unchecked(&cm_closes);
30348 cm_event.event = IW_CM_EVENT_CLOSE;
30349 cm_event.status = 0;
30350 cm_event.provider_data = cm_id->provider_data;
30351@@ -3730,7 +3730,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
30352 return;
30353 cm_id = cm_node->cm_id;
30354
30355- atomic_inc(&cm_connect_reqs);
30356+ atomic_inc_unchecked(&cm_connect_reqs);
30357 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30358 cm_node, cm_id, jiffies);
30359
30360@@ -3770,7 +3770,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
30361 return;
30362 cm_id = cm_node->cm_id;
30363
30364- atomic_inc(&cm_connect_reqs);
30365+ atomic_inc_unchecked(&cm_connect_reqs);
30366 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30367 cm_node, cm_id, jiffies);
30368
30369diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
30370index b3b2a24..7bfaf1e 100644
30371--- a/drivers/infiniband/hw/nes/nes_mgt.c
30372+++ b/drivers/infiniband/hw/nes/nes_mgt.c
30373@@ -40,8 +40,8 @@
30374 #include "nes.h"
30375 #include "nes_mgt.h"
30376
30377-atomic_t pau_qps_created;
30378-atomic_t pau_qps_destroyed;
30379+atomic_unchecked_t pau_qps_created;
30380+atomic_unchecked_t pau_qps_destroyed;
30381
30382 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
30383 {
30384@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
30385 {
30386 struct sk_buff *skb;
30387 unsigned long flags;
30388- atomic_inc(&pau_qps_destroyed);
30389+ atomic_inc_unchecked(&pau_qps_destroyed);
30390
30391 /* Free packets that have not yet been forwarded */
30392 /* Lock is acquired by skb_dequeue when removing the skb */
30393@@ -812,7 +812,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
30394 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
30395 skb_queue_head_init(&nesqp->pau_list);
30396 spin_lock_init(&nesqp->pau_lock);
30397- atomic_inc(&pau_qps_created);
30398+ atomic_inc_unchecked(&pau_qps_created);
30399 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
30400 }
30401
30402diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
30403index c00d2f3..8834298 100644
30404--- a/drivers/infiniband/hw/nes/nes_nic.c
30405+++ b/drivers/infiniband/hw/nes/nes_nic.c
30406@@ -1277,39 +1277,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
30407 target_stat_values[++index] = mh_detected;
30408 target_stat_values[++index] = mh_pauses_sent;
30409 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
30410- target_stat_values[++index] = atomic_read(&cm_connects);
30411- target_stat_values[++index] = atomic_read(&cm_accepts);
30412- target_stat_values[++index] = atomic_read(&cm_disconnects);
30413- target_stat_values[++index] = atomic_read(&cm_connecteds);
30414- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
30415- target_stat_values[++index] = atomic_read(&cm_rejects);
30416- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
30417- target_stat_values[++index] = atomic_read(&qps_created);
30418- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
30419- target_stat_values[++index] = atomic_read(&qps_destroyed);
30420- target_stat_values[++index] = atomic_read(&cm_closes);
30421+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
30422+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
30423+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
30424+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
30425+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
30426+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
30427+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
30428+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
30429+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
30430+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
30431+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
30432 target_stat_values[++index] = cm_packets_sent;
30433 target_stat_values[++index] = cm_packets_bounced;
30434 target_stat_values[++index] = cm_packets_created;
30435 target_stat_values[++index] = cm_packets_received;
30436 target_stat_values[++index] = cm_packets_dropped;
30437 target_stat_values[++index] = cm_packets_retrans;
30438- target_stat_values[++index] = atomic_read(&cm_listens_created);
30439- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
30440+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
30441+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
30442 target_stat_values[++index] = cm_backlog_drops;
30443- target_stat_values[++index] = atomic_read(&cm_loopbacks);
30444- target_stat_values[++index] = atomic_read(&cm_nodes_created);
30445- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
30446- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
30447- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
30448+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
30449+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
30450+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
30451+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
30452+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
30453 target_stat_values[++index] = nesadapter->free_4kpbl;
30454 target_stat_values[++index] = nesadapter->free_256pbl;
30455 target_stat_values[++index] = int_mod_timer_init;
30456 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
30457 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
30458 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
30459- target_stat_values[++index] = atomic_read(&pau_qps_created);
30460- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
30461+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
30462+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
30463 }
30464
30465 /**
30466diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
30467index 5095bc4..41e8fff 100644
30468--- a/drivers/infiniband/hw/nes/nes_verbs.c
30469+++ b/drivers/infiniband/hw/nes/nes_verbs.c
30470@@ -46,9 +46,9 @@
30471
30472 #include <rdma/ib_umem.h>
30473
30474-atomic_t mod_qp_timouts;
30475-atomic_t qps_created;
30476-atomic_t sw_qps_destroyed;
30477+atomic_unchecked_t mod_qp_timouts;
30478+atomic_unchecked_t qps_created;
30479+atomic_unchecked_t sw_qps_destroyed;
30480
30481 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
30482
30483@@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
30484 if (init_attr->create_flags)
30485 return ERR_PTR(-EINVAL);
30486
30487- atomic_inc(&qps_created);
30488+ atomic_inc_unchecked(&qps_created);
30489 switch (init_attr->qp_type) {
30490 case IB_QPT_RC:
30491 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
30492@@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
30493 struct iw_cm_event cm_event;
30494 int ret = 0;
30495
30496- atomic_inc(&sw_qps_destroyed);
30497+ atomic_inc_unchecked(&sw_qps_destroyed);
30498 nesqp->destroyed = 1;
30499
30500 /* Blow away the connection if it exists. */
30501diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
30502index b881bdc..c2e360c 100644
30503--- a/drivers/infiniband/hw/qib/qib.h
30504+++ b/drivers/infiniband/hw/qib/qib.h
30505@@ -51,6 +51,7 @@
30506 #include <linux/completion.h>
30507 #include <linux/kref.h>
30508 #include <linux/sched.h>
30509+#include <linux/slab.h>
30510
30511 #include "qib_common.h"
30512 #include "qib_verbs.h"
30513diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
30514index c351aa4..e6967c2 100644
30515--- a/drivers/input/gameport/gameport.c
30516+++ b/drivers/input/gameport/gameport.c
30517@@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
30518 */
30519 static void gameport_init_port(struct gameport *gameport)
30520 {
30521- static atomic_t gameport_no = ATOMIC_INIT(0);
30522+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
30523
30524 __module_get(THIS_MODULE);
30525
30526 mutex_init(&gameport->drv_mutex);
30527 device_initialize(&gameport->dev);
30528 dev_set_name(&gameport->dev, "gameport%lu",
30529- (unsigned long)atomic_inc_return(&gameport_no) - 1);
30530+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
30531 gameport->dev.bus = &gameport_bus;
30532 gameport->dev.release = gameport_release_port;
30533 if (gameport->parent)
30534diff --git a/drivers/input/input.c b/drivers/input/input.c
30535index da38d97..2aa0b79 100644
30536--- a/drivers/input/input.c
30537+++ b/drivers/input/input.c
30538@@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
30539 */
30540 int input_register_device(struct input_dev *dev)
30541 {
30542- static atomic_t input_no = ATOMIC_INIT(0);
30543+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
30544 struct input_handler *handler;
30545 const char *path;
30546 int error;
30547@@ -1851,7 +1851,7 @@ int input_register_device(struct input_dev *dev)
30548 dev->setkeycode = input_default_setkeycode;
30549
30550 dev_set_name(&dev->dev, "input%ld",
30551- (unsigned long) atomic_inc_return(&input_no) - 1);
30552+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
30553
30554 error = device_add(&dev->dev);
30555 if (error)
30556diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
30557index b8d8611..7a4a04b 100644
30558--- a/drivers/input/joystick/sidewinder.c
30559+++ b/drivers/input/joystick/sidewinder.c
30560@@ -30,6 +30,7 @@
30561 #include <linux/kernel.h>
30562 #include <linux/module.h>
30563 #include <linux/slab.h>
30564+#include <linux/sched.h>
30565 #include <linux/init.h>
30566 #include <linux/input.h>
30567 #include <linux/gameport.h>
30568diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
30569index d728875..844c89b 100644
30570--- a/drivers/input/joystick/xpad.c
30571+++ b/drivers/input/joystick/xpad.c
30572@@ -710,7 +710,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
30573
30574 static int xpad_led_probe(struct usb_xpad *xpad)
30575 {
30576- static atomic_t led_seq = ATOMIC_INIT(0);
30577+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
30578 long led_no;
30579 struct xpad_led *led;
30580 struct led_classdev *led_cdev;
30581@@ -723,7 +723,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
30582 if (!led)
30583 return -ENOMEM;
30584
30585- led_no = (long)atomic_inc_return(&led_seq) - 1;
30586+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
30587
30588 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
30589 led->xpad = xpad;
30590diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
30591index 0110b5a..d3ad144 100644
30592--- a/drivers/input/mousedev.c
30593+++ b/drivers/input/mousedev.c
30594@@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
30595
30596 spin_unlock_irq(&client->packet_lock);
30597
30598- if (copy_to_user(buffer, data, count))
30599+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
30600 return -EFAULT;
30601
30602 return count;
30603diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
30604index ba70058..571d25d 100644
30605--- a/drivers/input/serio/serio.c
30606+++ b/drivers/input/serio/serio.c
30607@@ -497,7 +497,7 @@ static void serio_release_port(struct device *dev)
30608 */
30609 static void serio_init_port(struct serio *serio)
30610 {
30611- static atomic_t serio_no = ATOMIC_INIT(0);
30612+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
30613
30614 __module_get(THIS_MODULE);
30615
30616@@ -508,7 +508,7 @@ static void serio_init_port(struct serio *serio)
30617 mutex_init(&serio->drv_mutex);
30618 device_initialize(&serio->dev);
30619 dev_set_name(&serio->dev, "serio%ld",
30620- (long)atomic_inc_return(&serio_no) - 1);
30621+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
30622 serio->dev.bus = &serio_bus;
30623 serio->dev.release = serio_release_port;
30624 serio->dev.groups = serio_device_attr_groups;
30625diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
30626index e44933d..9ba484a 100644
30627--- a/drivers/isdn/capi/capi.c
30628+++ b/drivers/isdn/capi/capi.c
30629@@ -83,8 +83,8 @@ struct capiminor {
30630
30631 struct capi20_appl *ap;
30632 u32 ncci;
30633- atomic_t datahandle;
30634- atomic_t msgid;
30635+ atomic_unchecked_t datahandle;
30636+ atomic_unchecked_t msgid;
30637
30638 struct tty_port port;
30639 int ttyinstop;
30640@@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
30641 capimsg_setu16(s, 2, mp->ap->applid);
30642 capimsg_setu8 (s, 4, CAPI_DATA_B3);
30643 capimsg_setu8 (s, 5, CAPI_RESP);
30644- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
30645+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
30646 capimsg_setu32(s, 8, mp->ncci);
30647 capimsg_setu16(s, 12, datahandle);
30648 }
30649@@ -518,14 +518,14 @@ static void handle_minor_send(struct capiminor *mp)
30650 mp->outbytes -= len;
30651 spin_unlock_bh(&mp->outlock);
30652
30653- datahandle = atomic_inc_return(&mp->datahandle);
30654+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
30655 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
30656 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
30657 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
30658 capimsg_setu16(skb->data, 2, mp->ap->applid);
30659 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
30660 capimsg_setu8 (skb->data, 5, CAPI_REQ);
30661- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
30662+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
30663 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
30664 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
30665 capimsg_setu16(skb->data, 16, len); /* Data length */
30666diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
30667index db621db..825ea1a 100644
30668--- a/drivers/isdn/gigaset/common.c
30669+++ b/drivers/isdn/gigaset/common.c
30670@@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
30671 cs->commands_pending = 0;
30672 cs->cur_at_seq = 0;
30673 cs->gotfwver = -1;
30674- cs->open_count = 0;
30675+ local_set(&cs->open_count, 0);
30676 cs->dev = NULL;
30677 cs->tty = NULL;
30678 cs->tty_dev = NULL;
30679diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
30680index 212efaf..f187c6b 100644
30681--- a/drivers/isdn/gigaset/gigaset.h
30682+++ b/drivers/isdn/gigaset/gigaset.h
30683@@ -35,6 +35,7 @@
30684 #include <linux/tty_driver.h>
30685 #include <linux/list.h>
30686 #include <linux/atomic.h>
30687+#include <asm/local.h>
30688
30689 #define GIG_VERSION {0, 5, 0, 0}
30690 #define GIG_COMPAT {0, 4, 0, 0}
30691@@ -433,7 +434,7 @@ struct cardstate {
30692 spinlock_t cmdlock;
30693 unsigned curlen, cmdbytes;
30694
30695- unsigned open_count;
30696+ local_t open_count;
30697 struct tty_struct *tty;
30698 struct tasklet_struct if_wake_tasklet;
30699 unsigned control_state;
30700diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
30701index ee0a549..a7c9798 100644
30702--- a/drivers/isdn/gigaset/interface.c
30703+++ b/drivers/isdn/gigaset/interface.c
30704@@ -163,9 +163,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
30705 }
30706 tty->driver_data = cs;
30707
30708- ++cs->open_count;
30709-
30710- if (cs->open_count == 1) {
30711+ if (local_inc_return(&cs->open_count) == 1) {
30712 spin_lock_irqsave(&cs->lock, flags);
30713 cs->tty = tty;
30714 spin_unlock_irqrestore(&cs->lock, flags);
30715@@ -193,10 +191,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
30716
30717 if (!cs->connected)
30718 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30719- else if (!cs->open_count)
30720+ else if (!local_read(&cs->open_count))
30721 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30722 else {
30723- if (!--cs->open_count) {
30724+ if (!local_dec_return(&cs->open_count)) {
30725 spin_lock_irqsave(&cs->lock, flags);
30726 cs->tty = NULL;
30727 spin_unlock_irqrestore(&cs->lock, flags);
30728@@ -231,7 +229,7 @@ static int if_ioctl(struct tty_struct *tty,
30729 if (!cs->connected) {
30730 gig_dbg(DEBUG_IF, "not connected");
30731 retval = -ENODEV;
30732- } else if (!cs->open_count)
30733+ } else if (!local_read(&cs->open_count))
30734 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30735 else {
30736 retval = 0;
30737@@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
30738 retval = -ENODEV;
30739 goto done;
30740 }
30741- if (!cs->open_count) {
30742+ if (!local_read(&cs->open_count)) {
30743 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30744 retval = -ENODEV;
30745 goto done;
30746@@ -414,7 +412,7 @@ static int if_write_room(struct tty_struct *tty)
30747 if (!cs->connected) {
30748 gig_dbg(DEBUG_IF, "not connected");
30749 retval = -ENODEV;
30750- } else if (!cs->open_count)
30751+ } else if (!local_read(&cs->open_count))
30752 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30753 else if (cs->mstate != MS_LOCKED) {
30754 dev_warn(cs->dev, "can't write to unlocked device\n");
30755@@ -444,7 +442,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
30756
30757 if (!cs->connected)
30758 gig_dbg(DEBUG_IF, "not connected");
30759- else if (!cs->open_count)
30760+ else if (!local_read(&cs->open_count))
30761 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30762 else if (cs->mstate != MS_LOCKED)
30763 dev_warn(cs->dev, "can't write to unlocked device\n");
30764@@ -472,7 +470,7 @@ static void if_throttle(struct tty_struct *tty)
30765
30766 if (!cs->connected)
30767 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30768- else if (!cs->open_count)
30769+ else if (!local_read(&cs->open_count))
30770 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30771 else
30772 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
30773@@ -496,7 +494,7 @@ static void if_unthrottle(struct tty_struct *tty)
30774
30775 if (!cs->connected)
30776 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30777- else if (!cs->open_count)
30778+ else if (!local_read(&cs->open_count))
30779 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30780 else
30781 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
30782@@ -527,7 +525,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
30783 goto out;
30784 }
30785
30786- if (!cs->open_count) {
30787+ if (!local_read(&cs->open_count)) {
30788 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30789 goto out;
30790 }
30791diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
30792index 2a57da59..e7a12ed 100644
30793--- a/drivers/isdn/hardware/avm/b1.c
30794+++ b/drivers/isdn/hardware/avm/b1.c
30795@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
30796 }
30797 if (left) {
30798 if (t4file->user) {
30799- if (copy_from_user(buf, dp, left))
30800+ if (left > sizeof buf || copy_from_user(buf, dp, left))
30801 return -EFAULT;
30802 } else {
30803 memcpy(buf, dp, left);
30804@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
30805 }
30806 if (left) {
30807 if (config->user) {
30808- if (copy_from_user(buf, dp, left))
30809+ if (left > sizeof buf || copy_from_user(buf, dp, left))
30810 return -EFAULT;
30811 } else {
30812 memcpy(buf, dp, left);
30813diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
30814index 85784a7..a19ca98 100644
30815--- a/drivers/isdn/hardware/eicon/divasync.h
30816+++ b/drivers/isdn/hardware/eicon/divasync.h
30817@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
30818 } diva_didd_add_adapter_t;
30819 typedef struct _diva_didd_remove_adapter {
30820 IDI_CALL p_request;
30821-} diva_didd_remove_adapter_t;
30822+} __no_const diva_didd_remove_adapter_t;
30823 typedef struct _diva_didd_read_adapter_array {
30824 void * buffer;
30825 dword length;
30826diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
30827index a3bd163..8956575 100644
30828--- a/drivers/isdn/hardware/eicon/xdi_adapter.h
30829+++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
30830@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
30831 typedef struct _diva_os_idi_adapter_interface {
30832 diva_init_card_proc_t cleanup_adapter_proc;
30833 diva_cmd_card_proc_t cmd_proc;
30834-} diva_os_idi_adapter_interface_t;
30835+} __no_const diva_os_idi_adapter_interface_t;
30836
30837 typedef struct _diva_os_xdi_adapter {
30838 struct list_head link;
30839diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
30840index 1f355bb..43f1fea 100644
30841--- a/drivers/isdn/icn/icn.c
30842+++ b/drivers/isdn/icn/icn.c
30843@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
30844 if (count > len)
30845 count = len;
30846 if (user) {
30847- if (copy_from_user(msg, buf, count))
30848+ if (count > sizeof msg || copy_from_user(msg, buf, count))
30849 return -EFAULT;
30850 } else
30851 memcpy(msg, buf, count);
30852diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
30853index b5fdcb7..5b6c59f 100644
30854--- a/drivers/lguest/core.c
30855+++ b/drivers/lguest/core.c
30856@@ -92,9 +92,17 @@ static __init int map_switcher(void)
30857 * it's worked so far. The end address needs +1 because __get_vm_area
30858 * allocates an extra guard page, so we need space for that.
30859 */
30860+
30861+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
30862+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
30863+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
30864+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
30865+#else
30866 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
30867 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
30868 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
30869+#endif
30870+
30871 if (!switcher_vma) {
30872 err = -ENOMEM;
30873 printk("lguest: could not map switcher pages high\n");
30874@@ -119,7 +127,7 @@ static __init int map_switcher(void)
30875 * Now the Switcher is mapped at the right address, we can't fail!
30876 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
30877 */
30878- memcpy(switcher_vma->addr, start_switcher_text,
30879+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
30880 end_switcher_text - start_switcher_text);
30881
30882 printk(KERN_INFO "lguest: mapped switcher at %p\n",
30883diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
30884index 65af42f..530c87a 100644
30885--- a/drivers/lguest/x86/core.c
30886+++ b/drivers/lguest/x86/core.c
30887@@ -59,7 +59,7 @@ static struct {
30888 /* Offset from where switcher.S was compiled to where we've copied it */
30889 static unsigned long switcher_offset(void)
30890 {
30891- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
30892+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
30893 }
30894
30895 /* This cpu's struct lguest_pages. */
30896@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
30897 * These copies are pretty cheap, so we do them unconditionally: */
30898 /* Save the current Host top-level page directory.
30899 */
30900+
30901+#ifdef CONFIG_PAX_PER_CPU_PGD
30902+ pages->state.host_cr3 = read_cr3();
30903+#else
30904 pages->state.host_cr3 = __pa(current->mm->pgd);
30905+#endif
30906+
30907 /*
30908 * Set up the Guest's page tables to see this CPU's pages (and no
30909 * other CPU's pages).
30910@@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
30911 * compiled-in switcher code and the high-mapped copy we just made.
30912 */
30913 for (i = 0; i < IDT_ENTRIES; i++)
30914- default_idt_entries[i] += switcher_offset();
30915+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
30916
30917 /*
30918 * Set up the Switcher's per-cpu areas.
30919@@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
30920 * it will be undisturbed when we switch. To change %cs and jump we
30921 * need this structure to feed to Intel's "lcall" instruction.
30922 */
30923- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
30924+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
30925 lguest_entry.segment = LGUEST_CS;
30926
30927 /*
30928diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
30929index 40634b0..4f5855e 100644
30930--- a/drivers/lguest/x86/switcher_32.S
30931+++ b/drivers/lguest/x86/switcher_32.S
30932@@ -87,6 +87,7 @@
30933 #include <asm/page.h>
30934 #include <asm/segment.h>
30935 #include <asm/lguest.h>
30936+#include <asm/processor-flags.h>
30937
30938 // We mark the start of the code to copy
30939 // It's placed in .text tho it's never run here
30940@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
30941 // Changes type when we load it: damn Intel!
30942 // For after we switch over our page tables
30943 // That entry will be read-only: we'd crash.
30944+
30945+#ifdef CONFIG_PAX_KERNEXEC
30946+ mov %cr0, %edx
30947+ xor $X86_CR0_WP, %edx
30948+ mov %edx, %cr0
30949+#endif
30950+
30951 movl $(GDT_ENTRY_TSS*8), %edx
30952 ltr %dx
30953
30954@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
30955 // Let's clear it again for our return.
30956 // The GDT descriptor of the Host
30957 // Points to the table after two "size" bytes
30958- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
30959+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
30960 // Clear "used" from type field (byte 5, bit 2)
30961- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
30962+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
30963+
30964+#ifdef CONFIG_PAX_KERNEXEC
30965+ mov %cr0, %eax
30966+ xor $X86_CR0_WP, %eax
30967+ mov %eax, %cr0
30968+#endif
30969
30970 // Once our page table's switched, the Guest is live!
30971 // The Host fades as we run this final step.
30972@@ -295,13 +309,12 @@ deliver_to_host:
30973 // I consulted gcc, and it gave
30974 // These instructions, which I gladly credit:
30975 leal (%edx,%ebx,8), %eax
30976- movzwl (%eax),%edx
30977- movl 4(%eax), %eax
30978- xorw %ax, %ax
30979- orl %eax, %edx
30980+ movl 4(%eax), %edx
30981+ movw (%eax), %dx
30982 // Now the address of the handler's in %edx
30983 // We call it now: its "iret" drops us home.
30984- jmp *%edx
30985+ ljmp $__KERNEL_CS, $1f
30986+1: jmp *%edx
30987
30988 // Every interrupt can come to us here
30989 // But we must truly tell each apart.
30990diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
30991index 4daf9e5..b8d1d0f 100644
30992--- a/drivers/macintosh/macio_asic.c
30993+++ b/drivers/macintosh/macio_asic.c
30994@@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
30995 * MacIO is matched against any Apple ID, it's probe() function
30996 * will then decide wether it applies or not
30997 */
30998-static const struct pci_device_id __devinitdata pci_ids [] = { {
30999+static const struct pci_device_id __devinitconst pci_ids [] = { {
31000 .vendor = PCI_VENDOR_ID_APPLE,
31001 .device = PCI_ANY_ID,
31002 .subvendor = PCI_ANY_ID,
31003diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
31004index 31c2dc2..a2de7a6 100644
31005--- a/drivers/md/dm-ioctl.c
31006+++ b/drivers/md/dm-ioctl.c
31007@@ -1589,7 +1589,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
31008 cmd == DM_LIST_VERSIONS_CMD)
31009 return 0;
31010
31011- if ((cmd == DM_DEV_CREATE_CMD)) {
31012+ if (cmd == DM_DEV_CREATE_CMD) {
31013 if (!*param->name) {
31014 DMWARN("name not supplied when creating device");
31015 return -EINVAL;
31016diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
31017index 9bfd057..01180bc 100644
31018--- a/drivers/md/dm-raid1.c
31019+++ b/drivers/md/dm-raid1.c
31020@@ -40,7 +40,7 @@ enum dm_raid1_error {
31021
31022 struct mirror {
31023 struct mirror_set *ms;
31024- atomic_t error_count;
31025+ atomic_unchecked_t error_count;
31026 unsigned long error_type;
31027 struct dm_dev *dev;
31028 sector_t offset;
31029@@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
31030 struct mirror *m;
31031
31032 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
31033- if (!atomic_read(&m->error_count))
31034+ if (!atomic_read_unchecked(&m->error_count))
31035 return m;
31036
31037 return NULL;
31038@@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
31039 * simple way to tell if a device has encountered
31040 * errors.
31041 */
31042- atomic_inc(&m->error_count);
31043+ atomic_inc_unchecked(&m->error_count);
31044
31045 if (test_and_set_bit(error_type, &m->error_type))
31046 return;
31047@@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
31048 struct mirror *m = get_default_mirror(ms);
31049
31050 do {
31051- if (likely(!atomic_read(&m->error_count)))
31052+ if (likely(!atomic_read_unchecked(&m->error_count)))
31053 return m;
31054
31055 if (m-- == ms->mirror)
31056@@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
31057 {
31058 struct mirror *default_mirror = get_default_mirror(m->ms);
31059
31060- return !atomic_read(&default_mirror->error_count);
31061+ return !atomic_read_unchecked(&default_mirror->error_count);
31062 }
31063
31064 static int mirror_available(struct mirror_set *ms, struct bio *bio)
31065@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
31066 */
31067 if (likely(region_in_sync(ms, region, 1)))
31068 m = choose_mirror(ms, bio->bi_sector);
31069- else if (m && atomic_read(&m->error_count))
31070+ else if (m && atomic_read_unchecked(&m->error_count))
31071 m = NULL;
31072
31073 if (likely(m))
31074@@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
31075 }
31076
31077 ms->mirror[mirror].ms = ms;
31078- atomic_set(&(ms->mirror[mirror].error_count), 0);
31079+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
31080 ms->mirror[mirror].error_type = 0;
31081 ms->mirror[mirror].offset = offset;
31082
31083@@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_target *ti)
31084 */
31085 static char device_status_char(struct mirror *m)
31086 {
31087- if (!atomic_read(&(m->error_count)))
31088+ if (!atomic_read_unchecked(&(m->error_count)))
31089 return 'A';
31090
31091 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
31092diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
31093index 3d80cf0..b77cc47 100644
31094--- a/drivers/md/dm-stripe.c
31095+++ b/drivers/md/dm-stripe.c
31096@@ -20,7 +20,7 @@ struct stripe {
31097 struct dm_dev *dev;
31098 sector_t physical_start;
31099
31100- atomic_t error_count;
31101+ atomic_unchecked_t error_count;
31102 };
31103
31104 struct stripe_c {
31105@@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
31106 kfree(sc);
31107 return r;
31108 }
31109- atomic_set(&(sc->stripe[i].error_count), 0);
31110+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
31111 }
31112
31113 ti->private = sc;
31114@@ -314,7 +314,7 @@ static int stripe_status(struct dm_target *ti,
31115 DMEMIT("%d ", sc->stripes);
31116 for (i = 0; i < sc->stripes; i++) {
31117 DMEMIT("%s ", sc->stripe[i].dev->name);
31118- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
31119+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
31120 'D' : 'A';
31121 }
31122 buffer[i] = '\0';
31123@@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
31124 */
31125 for (i = 0; i < sc->stripes; i++)
31126 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
31127- atomic_inc(&(sc->stripe[i].error_count));
31128- if (atomic_read(&(sc->stripe[i].error_count)) <
31129+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
31130+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
31131 DM_IO_ERROR_THRESHOLD)
31132 schedule_work(&sc->trigger_event);
31133 }
31134diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
31135index 8e91321..fd17aef 100644
31136--- a/drivers/md/dm-table.c
31137+++ b/drivers/md/dm-table.c
31138@@ -391,7 +391,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
31139 if (!dev_size)
31140 return 0;
31141
31142- if ((start >= dev_size) || (start + len > dev_size)) {
31143+ if ((start >= dev_size) || (len > dev_size - start)) {
31144 DMWARN("%s: %s too small for target: "
31145 "start=%llu, len=%llu, dev_size=%llu",
31146 dm_device_name(ti->table->md), bdevname(bdev, b),
31147diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
31148index 59c4f04..4c7b661 100644
31149--- a/drivers/md/dm-thin-metadata.c
31150+++ b/drivers/md/dm-thin-metadata.c
31151@@ -431,7 +431,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
31152
31153 pmd->info.tm = tm;
31154 pmd->info.levels = 2;
31155- pmd->info.value_type.context = pmd->data_sm;
31156+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
31157 pmd->info.value_type.size = sizeof(__le64);
31158 pmd->info.value_type.inc = data_block_inc;
31159 pmd->info.value_type.dec = data_block_dec;
31160@@ -450,7 +450,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
31161
31162 pmd->bl_info.tm = tm;
31163 pmd->bl_info.levels = 1;
31164- pmd->bl_info.value_type.context = pmd->data_sm;
31165+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
31166 pmd->bl_info.value_type.size = sizeof(__le64);
31167 pmd->bl_info.value_type.inc = data_block_inc;
31168 pmd->bl_info.value_type.dec = data_block_dec;
31169diff --git a/drivers/md/dm.c b/drivers/md/dm.c
31170index 4720f68..78d1df7 100644
31171--- a/drivers/md/dm.c
31172+++ b/drivers/md/dm.c
31173@@ -177,9 +177,9 @@ struct mapped_device {
31174 /*
31175 * Event handling.
31176 */
31177- atomic_t event_nr;
31178+ atomic_unchecked_t event_nr;
31179 wait_queue_head_t eventq;
31180- atomic_t uevent_seq;
31181+ atomic_unchecked_t uevent_seq;
31182 struct list_head uevent_list;
31183 spinlock_t uevent_lock; /* Protect access to uevent_list */
31184
31185@@ -1845,8 +1845,8 @@ static struct mapped_device *alloc_dev(int minor)
31186 rwlock_init(&md->map_lock);
31187 atomic_set(&md->holders, 1);
31188 atomic_set(&md->open_count, 0);
31189- atomic_set(&md->event_nr, 0);
31190- atomic_set(&md->uevent_seq, 0);
31191+ atomic_set_unchecked(&md->event_nr, 0);
31192+ atomic_set_unchecked(&md->uevent_seq, 0);
31193 INIT_LIST_HEAD(&md->uevent_list);
31194 spin_lock_init(&md->uevent_lock);
31195
31196@@ -1980,7 +1980,7 @@ static void event_callback(void *context)
31197
31198 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
31199
31200- atomic_inc(&md->event_nr);
31201+ atomic_inc_unchecked(&md->event_nr);
31202 wake_up(&md->eventq);
31203 }
31204
31205@@ -2622,18 +2622,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
31206
31207 uint32_t dm_next_uevent_seq(struct mapped_device *md)
31208 {
31209- return atomic_add_return(1, &md->uevent_seq);
31210+ return atomic_add_return_unchecked(1, &md->uevent_seq);
31211 }
31212
31213 uint32_t dm_get_event_nr(struct mapped_device *md)
31214 {
31215- return atomic_read(&md->event_nr);
31216+ return atomic_read_unchecked(&md->event_nr);
31217 }
31218
31219 int dm_wait_event(struct mapped_device *md, int event_nr)
31220 {
31221 return wait_event_interruptible(md->eventq,
31222- (event_nr != atomic_read(&md->event_nr)));
31223+ (event_nr != atomic_read_unchecked(&md->event_nr)));
31224 }
31225
31226 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
31227diff --git a/drivers/md/md.c b/drivers/md/md.c
31228index f47f1f8..b7f559e 100644
31229--- a/drivers/md/md.c
31230+++ b/drivers/md/md.c
31231@@ -278,10 +278,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
31232 * start build, activate spare
31233 */
31234 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
31235-static atomic_t md_event_count;
31236+static atomic_unchecked_t md_event_count;
31237 void md_new_event(struct mddev *mddev)
31238 {
31239- atomic_inc(&md_event_count);
31240+ atomic_inc_unchecked(&md_event_count);
31241 wake_up(&md_event_waiters);
31242 }
31243 EXPORT_SYMBOL_GPL(md_new_event);
31244@@ -291,7 +291,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
31245 */
31246 static void md_new_event_inintr(struct mddev *mddev)
31247 {
31248- atomic_inc(&md_event_count);
31249+ atomic_inc_unchecked(&md_event_count);
31250 wake_up(&md_event_waiters);
31251 }
31252
31253@@ -1525,7 +1525,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
31254
31255 rdev->preferred_minor = 0xffff;
31256 rdev->data_offset = le64_to_cpu(sb->data_offset);
31257- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31258+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31259
31260 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
31261 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
31262@@ -1742,7 +1742,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
31263 else
31264 sb->resync_offset = cpu_to_le64(0);
31265
31266- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
31267+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
31268
31269 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
31270 sb->size = cpu_to_le64(mddev->dev_sectors);
31271@@ -2639,7 +2639,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
31272 static ssize_t
31273 errors_show(struct md_rdev *rdev, char *page)
31274 {
31275- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
31276+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
31277 }
31278
31279 static ssize_t
31280@@ -2648,7 +2648,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
31281 char *e;
31282 unsigned long n = simple_strtoul(buf, &e, 10);
31283 if (*buf && (*e == 0 || *e == '\n')) {
31284- atomic_set(&rdev->corrected_errors, n);
31285+ atomic_set_unchecked(&rdev->corrected_errors, n);
31286 return len;
31287 }
31288 return -EINVAL;
31289@@ -3039,8 +3039,8 @@ int md_rdev_init(struct md_rdev *rdev)
31290 rdev->sb_loaded = 0;
31291 rdev->bb_page = NULL;
31292 atomic_set(&rdev->nr_pending, 0);
31293- atomic_set(&rdev->read_errors, 0);
31294- atomic_set(&rdev->corrected_errors, 0);
31295+ atomic_set_unchecked(&rdev->read_errors, 0);
31296+ atomic_set_unchecked(&rdev->corrected_errors, 0);
31297
31298 INIT_LIST_HEAD(&rdev->same_set);
31299 init_waitqueue_head(&rdev->blocked_wait);
31300@@ -6683,7 +6683,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
31301
31302 spin_unlock(&pers_lock);
31303 seq_printf(seq, "\n");
31304- seq->poll_event = atomic_read(&md_event_count);
31305+ seq->poll_event = atomic_read_unchecked(&md_event_count);
31306 return 0;
31307 }
31308 if (v == (void*)2) {
31309@@ -6772,7 +6772,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
31310 chunk_kb ? "KB" : "B");
31311 if (bitmap->file) {
31312 seq_printf(seq, ", file: ");
31313- seq_path(seq, &bitmap->file->f_path, " \t\n");
31314+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
31315 }
31316
31317 seq_printf(seq, "\n");
31318@@ -6803,7 +6803,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
31319 return error;
31320
31321 seq = file->private_data;
31322- seq->poll_event = atomic_read(&md_event_count);
31323+ seq->poll_event = atomic_read_unchecked(&md_event_count);
31324 return error;
31325 }
31326
31327@@ -6817,7 +6817,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
31328 /* always allow read */
31329 mask = POLLIN | POLLRDNORM;
31330
31331- if (seq->poll_event != atomic_read(&md_event_count))
31332+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
31333 mask |= POLLERR | POLLPRI;
31334 return mask;
31335 }
31336@@ -6861,7 +6861,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
31337 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
31338 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
31339 (int)part_stat_read(&disk->part0, sectors[1]) -
31340- atomic_read(&disk->sync_io);
31341+ atomic_read_unchecked(&disk->sync_io);
31342 /* sync IO will cause sync_io to increase before the disk_stats
31343 * as sync_io is counted when a request starts, and
31344 * disk_stats is counted when it completes.
31345diff --git a/drivers/md/md.h b/drivers/md/md.h
31346index cf742d9..7c7c745 100644
31347--- a/drivers/md/md.h
31348+++ b/drivers/md/md.h
31349@@ -120,13 +120,13 @@ struct md_rdev {
31350 * only maintained for arrays that
31351 * support hot removal
31352 */
31353- atomic_t read_errors; /* number of consecutive read errors that
31354+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
31355 * we have tried to ignore.
31356 */
31357 struct timespec last_read_error; /* monotonic time since our
31358 * last read error
31359 */
31360- atomic_t corrected_errors; /* number of corrected read errors,
31361+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
31362 * for reporting to userspace and storing
31363 * in superblock.
31364 */
31365@@ -410,7 +410,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
31366
31367 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
31368 {
31369- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31370+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31371 }
31372
31373 struct md_personality
31374diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
31375index 50ed53b..4f29d7d 100644
31376--- a/drivers/md/persistent-data/dm-space-map-checker.c
31377+++ b/drivers/md/persistent-data/dm-space-map-checker.c
31378@@ -159,7 +159,7 @@ static void ca_destroy(struct count_array *ca)
31379 /*----------------------------------------------------------------*/
31380
31381 struct sm_checker {
31382- struct dm_space_map sm;
31383+ dm_space_map_no_const sm;
31384
31385 struct count_array old_counts;
31386 struct count_array counts;
31387diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
31388index fc469ba..2d91555 100644
31389--- a/drivers/md/persistent-data/dm-space-map-disk.c
31390+++ b/drivers/md/persistent-data/dm-space-map-disk.c
31391@@ -23,7 +23,7 @@
31392 * Space map interface.
31393 */
31394 struct sm_disk {
31395- struct dm_space_map sm;
31396+ dm_space_map_no_const sm;
31397
31398 struct ll_disk ll;
31399 struct ll_disk old_ll;
31400diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
31401index e89ae5e..062e4c2 100644
31402--- a/drivers/md/persistent-data/dm-space-map-metadata.c
31403+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
31404@@ -43,7 +43,7 @@ struct block_op {
31405 };
31406
31407 struct sm_metadata {
31408- struct dm_space_map sm;
31409+ dm_space_map_no_const sm;
31410
31411 struct ll_disk ll;
31412 struct ll_disk old_ll;
31413diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
31414index 1cbfc6b..56e1dbb 100644
31415--- a/drivers/md/persistent-data/dm-space-map.h
31416+++ b/drivers/md/persistent-data/dm-space-map.h
31417@@ -60,6 +60,7 @@ struct dm_space_map {
31418 int (*root_size)(struct dm_space_map *sm, size_t *result);
31419 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
31420 };
31421+typedef struct dm_space_map __no_const dm_space_map_no_const;
31422
31423 /*----------------------------------------------------------------*/
31424
31425diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
31426index 7d9e071..015b1d5 100644
31427--- a/drivers/md/raid1.c
31428+++ b/drivers/md/raid1.c
31429@@ -1568,7 +1568,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
31430 if (r1_sync_page_io(rdev, sect, s,
31431 bio->bi_io_vec[idx].bv_page,
31432 READ) != 0)
31433- atomic_add(s, &rdev->corrected_errors);
31434+ atomic_add_unchecked(s, &rdev->corrected_errors);
31435 }
31436 sectors -= s;
31437 sect += s;
31438@@ -1781,7 +1781,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
31439 test_bit(In_sync, &rdev->flags)) {
31440 if (r1_sync_page_io(rdev, sect, s,
31441 conf->tmppage, READ)) {
31442- atomic_add(s, &rdev->corrected_errors);
31443+ atomic_add_unchecked(s, &rdev->corrected_errors);
31444 printk(KERN_INFO
31445 "md/raid1:%s: read error corrected "
31446 "(%d sectors at %llu on %s)\n",
31447diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
31448index 685ddf3..955b087 100644
31449--- a/drivers/md/raid10.c
31450+++ b/drivers/md/raid10.c
31451@@ -1440,7 +1440,7 @@ static void end_sync_read(struct bio *bio, int error)
31452 /* The write handler will notice the lack of
31453 * R10BIO_Uptodate and record any errors etc
31454 */
31455- atomic_add(r10_bio->sectors,
31456+ atomic_add_unchecked(r10_bio->sectors,
31457 &conf->mirrors[d].rdev->corrected_errors);
31458
31459 /* for reconstruct, we always reschedule after a read.
31460@@ -1740,7 +1740,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
31461 {
31462 struct timespec cur_time_mon;
31463 unsigned long hours_since_last;
31464- unsigned int read_errors = atomic_read(&rdev->read_errors);
31465+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
31466
31467 ktime_get_ts(&cur_time_mon);
31468
31469@@ -1762,9 +1762,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
31470 * overflowing the shift of read_errors by hours_since_last.
31471 */
31472 if (hours_since_last >= 8 * sizeof(read_errors))
31473- atomic_set(&rdev->read_errors, 0);
31474+ atomic_set_unchecked(&rdev->read_errors, 0);
31475 else
31476- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
31477+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
31478 }
31479
31480 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
31481@@ -1814,8 +1814,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
31482 return;
31483
31484 check_decay_read_errors(mddev, rdev);
31485- atomic_inc(&rdev->read_errors);
31486- if (atomic_read(&rdev->read_errors) > max_read_errors) {
31487+ atomic_inc_unchecked(&rdev->read_errors);
31488+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
31489 char b[BDEVNAME_SIZE];
31490 bdevname(rdev->bdev, b);
31491
31492@@ -1823,7 +1823,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
31493 "md/raid10:%s: %s: Raid device exceeded "
31494 "read_error threshold [cur %d:max %d]\n",
31495 mdname(mddev), b,
31496- atomic_read(&rdev->read_errors), max_read_errors);
31497+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
31498 printk(KERN_NOTICE
31499 "md/raid10:%s: %s: Failing raid device\n",
31500 mdname(mddev), b);
31501@@ -1968,7 +1968,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
31502 (unsigned long long)(
31503 sect + rdev->data_offset),
31504 bdevname(rdev->bdev, b));
31505- atomic_add(s, &rdev->corrected_errors);
31506+ atomic_add_unchecked(s, &rdev->corrected_errors);
31507 }
31508
31509 rdev_dec_pending(rdev, mddev);
31510diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
31511index 858fdbb..b2dac95 100644
31512--- a/drivers/md/raid5.c
31513+++ b/drivers/md/raid5.c
31514@@ -1610,19 +1610,19 @@ static void raid5_end_read_request(struct bio * bi, int error)
31515 (unsigned long long)(sh->sector
31516 + rdev->data_offset),
31517 bdevname(rdev->bdev, b));
31518- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
31519+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
31520 clear_bit(R5_ReadError, &sh->dev[i].flags);
31521 clear_bit(R5_ReWrite, &sh->dev[i].flags);
31522 }
31523- if (atomic_read(&conf->disks[i].rdev->read_errors))
31524- atomic_set(&conf->disks[i].rdev->read_errors, 0);
31525+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
31526+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
31527 } else {
31528 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
31529 int retry = 0;
31530 rdev = conf->disks[i].rdev;
31531
31532 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
31533- atomic_inc(&rdev->read_errors);
31534+ atomic_inc_unchecked(&rdev->read_errors);
31535 if (conf->mddev->degraded >= conf->max_degraded)
31536 printk_ratelimited(
31537 KERN_WARNING
31538@@ -1642,7 +1642,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
31539 (unsigned long long)(sh->sector
31540 + rdev->data_offset),
31541 bdn);
31542- else if (atomic_read(&rdev->read_errors)
31543+ else if (atomic_read_unchecked(&rdev->read_errors)
31544 > conf->max_nr_stripes)
31545 printk(KERN_WARNING
31546 "md/raid:%s: Too many read errors, failing device %s.\n",
31547diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
31548index ba9a643..e474ab5 100644
31549--- a/drivers/media/dvb/ddbridge/ddbridge-core.c
31550+++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
31551@@ -1678,7 +1678,7 @@ static struct ddb_info ddb_v6 = {
31552 .subvendor = _subvend, .subdevice = _subdev, \
31553 .driver_data = (unsigned long)&_driverdata }
31554
31555-static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
31556+static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
31557 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
31558 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
31559 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
31560diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
31561index a7d876f..8c21b61 100644
31562--- a/drivers/media/dvb/dvb-core/dvb_demux.h
31563+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
31564@@ -73,7 +73,7 @@ struct dvb_demux_feed {
31565 union {
31566 dmx_ts_cb ts;
31567 dmx_section_cb sec;
31568- } cb;
31569+ } __no_const cb;
31570
31571 struct dvb_demux *demux;
31572 void *priv;
31573diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
31574index f732877..d38c35a 100644
31575--- a/drivers/media/dvb/dvb-core/dvbdev.c
31576+++ b/drivers/media/dvb/dvb-core/dvbdev.c
31577@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
31578 const struct dvb_device *template, void *priv, int type)
31579 {
31580 struct dvb_device *dvbdev;
31581- struct file_operations *dvbdevfops;
31582+ file_operations_no_const *dvbdevfops;
31583 struct device *clsdev;
31584 int minor;
31585 int id;
31586diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
31587index 9f2a02c..5920f88 100644
31588--- a/drivers/media/dvb/dvb-usb/cxusb.c
31589+++ b/drivers/media/dvb/dvb-usb/cxusb.c
31590@@ -1069,7 +1069,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
31591 struct dib0700_adapter_state {
31592 int (*set_param_save) (struct dvb_frontend *,
31593 struct dvb_frontend_parameters *);
31594-};
31595+} __no_const;
31596
31597 static int dib7070_set_param_override(struct dvb_frontend *fe,
31598 struct dvb_frontend_parameters *fep)
31599diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
31600index f103ec1..5e8968b 100644
31601--- a/drivers/media/dvb/dvb-usb/dw2102.c
31602+++ b/drivers/media/dvb/dvb-usb/dw2102.c
31603@@ -95,7 +95,7 @@ struct su3000_state {
31604
31605 struct s6x0_state {
31606 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
31607-};
31608+} __no_const;
31609
31610 /* debug */
31611 static int dvb_usb_dw2102_debug;
31612diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
31613index 404f63a..4796533 100644
31614--- a/drivers/media/dvb/frontends/dib3000.h
31615+++ b/drivers/media/dvb/frontends/dib3000.h
31616@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
31617 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
31618 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
31619 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
31620-};
31621+} __no_const;
31622
31623 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
31624 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
31625diff --git a/drivers/media/dvb/frontends/ds3000.c b/drivers/media/dvb/frontends/ds3000.c
31626index 90bf573..e8463da 100644
31627--- a/drivers/media/dvb/frontends/ds3000.c
31628+++ b/drivers/media/dvb/frontends/ds3000.c
31629@@ -1210,7 +1210,7 @@ static int ds3000_set_frontend(struct dvb_frontend *fe,
31630
31631 for (i = 0; i < 30 ; i++) {
31632 ds3000_read_status(fe, &status);
31633- if (status && FE_HAS_LOCK)
31634+ if (status & FE_HAS_LOCK)
31635 break;
31636
31637 msleep(10);
31638diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
31639index 0564192..75b16f5 100644
31640--- a/drivers/media/dvb/ngene/ngene-cards.c
31641+++ b/drivers/media/dvb/ngene/ngene-cards.c
31642@@ -477,7 +477,7 @@ static struct ngene_info ngene_info_m780 = {
31643
31644 /****************************************************************************/
31645
31646-static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
31647+static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
31648 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
31649 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
31650 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
31651diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
31652index 16a089f..ab1667d 100644
31653--- a/drivers/media/radio/radio-cadet.c
31654+++ b/drivers/media/radio/radio-cadet.c
31655@@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
31656 unsigned char readbuf[RDS_BUFFER];
31657 int i = 0;
31658
31659+ if (count > RDS_BUFFER)
31660+ return -EFAULT;
31661 mutex_lock(&dev->lock);
31662 if (dev->rdsstat == 0) {
31663 dev->rdsstat = 1;
31664diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
31665index 61287fc..8b08712 100644
31666--- a/drivers/media/rc/redrat3.c
31667+++ b/drivers/media/rc/redrat3.c
31668@@ -905,7 +905,7 @@ static int redrat3_set_tx_carrier(struct rc_dev *dev, u32 carrier)
31669 return carrier;
31670 }
31671
31672-static int redrat3_transmit_ir(struct rc_dev *rcdev, int *txbuf, u32 n)
31673+static int redrat3_transmit_ir(struct rc_dev *rcdev, unsigned *txbuf, u32 n)
31674 {
31675 struct redrat3_dev *rr3 = rcdev->priv;
31676 struct device *dev = rr3->dev;
31677diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
31678index 9cde353..8c6a1c3 100644
31679--- a/drivers/media/video/au0828/au0828.h
31680+++ b/drivers/media/video/au0828/au0828.h
31681@@ -191,7 +191,7 @@ struct au0828_dev {
31682
31683 /* I2C */
31684 struct i2c_adapter i2c_adap;
31685- struct i2c_algorithm i2c_algo;
31686+ i2c_algorithm_no_const i2c_algo;
31687 struct i2c_client i2c_client;
31688 u32 i2c_rc;
31689
31690diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
31691index 68d1240..46b32eb 100644
31692--- a/drivers/media/video/cx88/cx88-alsa.c
31693+++ b/drivers/media/video/cx88/cx88-alsa.c
31694@@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
31695 * Only boards with eeprom and byte 1 at eeprom=1 have it
31696 */
31697
31698-static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
31699+static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
31700 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
31701 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
31702 {0, }
31703diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31704index 305e6aa..0143317 100644
31705--- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31706+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31707@@ -196,7 +196,7 @@ struct pvr2_hdw {
31708
31709 /* I2C stuff */
31710 struct i2c_adapter i2c_adap;
31711- struct i2c_algorithm i2c_algo;
31712+ i2c_algorithm_no_const i2c_algo;
31713 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
31714 int i2c_cx25840_hack_state;
31715 int i2c_linked;
31716diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
31717index a0895bf..b7ebb1b 100644
31718--- a/drivers/media/video/timblogiw.c
31719+++ b/drivers/media/video/timblogiw.c
31720@@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
31721
31722 /* Platform device functions */
31723
31724-static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
31725+static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
31726 .vidioc_querycap = timblogiw_querycap,
31727 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
31728 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
31729@@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
31730 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
31731 };
31732
31733-static __devinitconst struct v4l2_file_operations timblogiw_fops = {
31734+static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
31735 .owner = THIS_MODULE,
31736 .open = timblogiw_open,
31737 .release = timblogiw_close,
31738diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
31739index e9c6a60..daf6a33 100644
31740--- a/drivers/message/fusion/mptbase.c
31741+++ b/drivers/message/fusion/mptbase.c
31742@@ -6753,8 +6753,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
31743 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
31744 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
31745
31746+#ifdef CONFIG_GRKERNSEC_HIDESYM
31747+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
31748+#else
31749 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
31750 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
31751+#endif
31752+
31753 /*
31754 * Rounding UP to nearest 4-kB boundary here...
31755 */
31756diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
31757index 9d95042..b808101 100644
31758--- a/drivers/message/fusion/mptsas.c
31759+++ b/drivers/message/fusion/mptsas.c
31760@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
31761 return 0;
31762 }
31763
31764+static inline void
31765+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31766+{
31767+ if (phy_info->port_details) {
31768+ phy_info->port_details->rphy = rphy;
31769+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31770+ ioc->name, rphy));
31771+ }
31772+
31773+ if (rphy) {
31774+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31775+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31776+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31777+ ioc->name, rphy, rphy->dev.release));
31778+ }
31779+}
31780+
31781 /* no mutex */
31782 static void
31783 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
31784@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
31785 return NULL;
31786 }
31787
31788-static inline void
31789-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31790-{
31791- if (phy_info->port_details) {
31792- phy_info->port_details->rphy = rphy;
31793- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31794- ioc->name, rphy));
31795- }
31796-
31797- if (rphy) {
31798- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31799- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31800- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31801- ioc->name, rphy, rphy->dev.release));
31802- }
31803-}
31804-
31805 static inline struct sas_port *
31806 mptsas_get_port(struct mptsas_phyinfo *phy_info)
31807 {
31808diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
31809index 0c3ced7..1fe34ec 100644
31810--- a/drivers/message/fusion/mptscsih.c
31811+++ b/drivers/message/fusion/mptscsih.c
31812@@ -1270,15 +1270,16 @@ mptscsih_info(struct Scsi_Host *SChost)
31813
31814 h = shost_priv(SChost);
31815
31816- if (h) {
31817- if (h->info_kbuf == NULL)
31818- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
31819- return h->info_kbuf;
31820- h->info_kbuf[0] = '\0';
31821+ if (!h)
31822+ return NULL;
31823
31824- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
31825- h->info_kbuf[size-1] = '\0';
31826- }
31827+ if (h->info_kbuf == NULL)
31828+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
31829+ return h->info_kbuf;
31830+ h->info_kbuf[0] = '\0';
31831+
31832+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
31833+ h->info_kbuf[size-1] = '\0';
31834
31835 return h->info_kbuf;
31836 }
31837diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
31838index 07dbeaf..5533142 100644
31839--- a/drivers/message/i2o/i2o_proc.c
31840+++ b/drivers/message/i2o/i2o_proc.c
31841@@ -255,13 +255,6 @@ static char *scsi_devices[] = {
31842 "Array Controller Device"
31843 };
31844
31845-static char *chtostr(u8 * chars, int n)
31846-{
31847- char tmp[256];
31848- tmp[0] = 0;
31849- return strncat(tmp, (char *)chars, n);
31850-}
31851-
31852 static int i2o_report_query_status(struct seq_file *seq, int block_status,
31853 char *group)
31854 {
31855@@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
31856
31857 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
31858 seq_printf(seq, "%-#8x", ddm_table.module_id);
31859- seq_printf(seq, "%-29s",
31860- chtostr(ddm_table.module_name_version, 28));
31861+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
31862 seq_printf(seq, "%9d ", ddm_table.data_size);
31863 seq_printf(seq, "%8d", ddm_table.code_size);
31864
31865@@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
31866
31867 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
31868 seq_printf(seq, "%-#8x", dst->module_id);
31869- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
31870- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
31871+ seq_printf(seq, "%-.28s", dst->module_name_version);
31872+ seq_printf(seq, "%-.8s", dst->date);
31873 seq_printf(seq, "%8d ", dst->module_size);
31874 seq_printf(seq, "%8d ", dst->mpb_size);
31875 seq_printf(seq, "0x%04x", dst->module_flags);
31876@@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
31877 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
31878 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
31879 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
31880- seq_printf(seq, "Vendor info : %s\n",
31881- chtostr((u8 *) (work32 + 2), 16));
31882- seq_printf(seq, "Product info : %s\n",
31883- chtostr((u8 *) (work32 + 6), 16));
31884- seq_printf(seq, "Description : %s\n",
31885- chtostr((u8 *) (work32 + 10), 16));
31886- seq_printf(seq, "Product rev. : %s\n",
31887- chtostr((u8 *) (work32 + 14), 8));
31888+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
31889+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
31890+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
31891+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
31892
31893 seq_printf(seq, "Serial number : ");
31894 print_serial_number(seq, (u8 *) (work32 + 16),
31895@@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
31896 }
31897
31898 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
31899- seq_printf(seq, "Module name : %s\n",
31900- chtostr(result.module_name, 24));
31901- seq_printf(seq, "Module revision : %s\n",
31902- chtostr(result.module_rev, 8));
31903+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
31904+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
31905
31906 seq_printf(seq, "Serial number : ");
31907 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
31908@@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
31909 return 0;
31910 }
31911
31912- seq_printf(seq, "Device name : %s\n",
31913- chtostr(result.device_name, 64));
31914- seq_printf(seq, "Service name : %s\n",
31915- chtostr(result.service_name, 64));
31916- seq_printf(seq, "Physical name : %s\n",
31917- chtostr(result.physical_location, 64));
31918- seq_printf(seq, "Instance number : %s\n",
31919- chtostr(result.instance_number, 4));
31920+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
31921+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
31922+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
31923+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
31924
31925 return 0;
31926 }
31927diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
31928index a8c08f3..155fe3d 100644
31929--- a/drivers/message/i2o/iop.c
31930+++ b/drivers/message/i2o/iop.c
31931@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
31932
31933 spin_lock_irqsave(&c->context_list_lock, flags);
31934
31935- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
31936- atomic_inc(&c->context_list_counter);
31937+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
31938+ atomic_inc_unchecked(&c->context_list_counter);
31939
31940- entry->context = atomic_read(&c->context_list_counter);
31941+ entry->context = atomic_read_unchecked(&c->context_list_counter);
31942
31943 list_add(&entry->list, &c->context_list);
31944
31945@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
31946
31947 #if BITS_PER_LONG == 64
31948 spin_lock_init(&c->context_list_lock);
31949- atomic_set(&c->context_list_counter, 0);
31950+ atomic_set_unchecked(&c->context_list_counter, 0);
31951 INIT_LIST_HEAD(&c->context_list);
31952 #endif
31953
31954diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
31955index 7ce65f4..e66e9bc 100644
31956--- a/drivers/mfd/abx500-core.c
31957+++ b/drivers/mfd/abx500-core.c
31958@@ -15,7 +15,7 @@ static LIST_HEAD(abx500_list);
31959
31960 struct abx500_device_entry {
31961 struct list_head list;
31962- struct abx500_ops ops;
31963+ abx500_ops_no_const ops;
31964 struct device *dev;
31965 };
31966
31967diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
31968index 5c2a06a..8fa077c 100644
31969--- a/drivers/mfd/janz-cmodio.c
31970+++ b/drivers/mfd/janz-cmodio.c
31971@@ -13,6 +13,7 @@
31972
31973 #include <linux/kernel.h>
31974 #include <linux/module.h>
31975+#include <linux/slab.h>
31976 #include <linux/init.h>
31977 #include <linux/pci.h>
31978 #include <linux/interrupt.h>
31979diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
31980index 29d12a7..f900ba4 100644
31981--- a/drivers/misc/lis3lv02d/lis3lv02d.c
31982+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
31983@@ -464,7 +464,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
31984 * the lid is closed. This leads to interrupts as soon as a little move
31985 * is done.
31986 */
31987- atomic_inc(&lis3->count);
31988+ atomic_inc_unchecked(&lis3->count);
31989
31990 wake_up_interruptible(&lis3->misc_wait);
31991 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
31992@@ -550,7 +550,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
31993 if (lis3->pm_dev)
31994 pm_runtime_get_sync(lis3->pm_dev);
31995
31996- atomic_set(&lis3->count, 0);
31997+ atomic_set_unchecked(&lis3->count, 0);
31998 return 0;
31999 }
32000
32001@@ -583,7 +583,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
32002 add_wait_queue(&lis3->misc_wait, &wait);
32003 while (true) {
32004 set_current_state(TASK_INTERRUPTIBLE);
32005- data = atomic_xchg(&lis3->count, 0);
32006+ data = atomic_xchg_unchecked(&lis3->count, 0);
32007 if (data)
32008 break;
32009
32010@@ -624,7 +624,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
32011 struct lis3lv02d, miscdev);
32012
32013 poll_wait(file, &lis3->misc_wait, wait);
32014- if (atomic_read(&lis3->count))
32015+ if (atomic_read_unchecked(&lis3->count))
32016 return POLLIN | POLLRDNORM;
32017 return 0;
32018 }
32019diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
32020index 2b1482a..5d33616 100644
32021--- a/drivers/misc/lis3lv02d/lis3lv02d.h
32022+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
32023@@ -266,7 +266,7 @@ struct lis3lv02d {
32024 struct input_polled_dev *idev; /* input device */
32025 struct platform_device *pdev; /* platform device */
32026 struct regulator_bulk_data regulators[2];
32027- atomic_t count; /* interrupt count after last read */
32028+ atomic_unchecked_t count; /* interrupt count after last read */
32029 union axis_conversion ac; /* hw -> logical axis */
32030 int mapped_btns[3];
32031
32032diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
32033index 2f30bad..c4c13d0 100644
32034--- a/drivers/misc/sgi-gru/gruhandles.c
32035+++ b/drivers/misc/sgi-gru/gruhandles.c
32036@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
32037 unsigned long nsec;
32038
32039 nsec = CLKS2NSEC(clks);
32040- atomic_long_inc(&mcs_op_statistics[op].count);
32041- atomic_long_add(nsec, &mcs_op_statistics[op].total);
32042+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
32043+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
32044 if (mcs_op_statistics[op].max < nsec)
32045 mcs_op_statistics[op].max = nsec;
32046 }
32047diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
32048index 7768b87..f8aac38 100644
32049--- a/drivers/misc/sgi-gru/gruprocfs.c
32050+++ b/drivers/misc/sgi-gru/gruprocfs.c
32051@@ -32,9 +32,9 @@
32052
32053 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
32054
32055-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
32056+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
32057 {
32058- unsigned long val = atomic_long_read(v);
32059+ unsigned long val = atomic_long_read_unchecked(v);
32060
32061 seq_printf(s, "%16lu %s\n", val, id);
32062 }
32063@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
32064
32065 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
32066 for (op = 0; op < mcsop_last; op++) {
32067- count = atomic_long_read(&mcs_op_statistics[op].count);
32068- total = atomic_long_read(&mcs_op_statistics[op].total);
32069+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
32070+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
32071 max = mcs_op_statistics[op].max;
32072 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
32073 count ? total / count : 0, max);
32074diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
32075index 5c3ce24..4915ccb 100644
32076--- a/drivers/misc/sgi-gru/grutables.h
32077+++ b/drivers/misc/sgi-gru/grutables.h
32078@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
32079 * GRU statistics.
32080 */
32081 struct gru_stats_s {
32082- atomic_long_t vdata_alloc;
32083- atomic_long_t vdata_free;
32084- atomic_long_t gts_alloc;
32085- atomic_long_t gts_free;
32086- atomic_long_t gms_alloc;
32087- atomic_long_t gms_free;
32088- atomic_long_t gts_double_allocate;
32089- atomic_long_t assign_context;
32090- atomic_long_t assign_context_failed;
32091- atomic_long_t free_context;
32092- atomic_long_t load_user_context;
32093- atomic_long_t load_kernel_context;
32094- atomic_long_t lock_kernel_context;
32095- atomic_long_t unlock_kernel_context;
32096- atomic_long_t steal_user_context;
32097- atomic_long_t steal_kernel_context;
32098- atomic_long_t steal_context_failed;
32099- atomic_long_t nopfn;
32100- atomic_long_t asid_new;
32101- atomic_long_t asid_next;
32102- atomic_long_t asid_wrap;
32103- atomic_long_t asid_reuse;
32104- atomic_long_t intr;
32105- atomic_long_t intr_cbr;
32106- atomic_long_t intr_tfh;
32107- atomic_long_t intr_spurious;
32108- atomic_long_t intr_mm_lock_failed;
32109- atomic_long_t call_os;
32110- atomic_long_t call_os_wait_queue;
32111- atomic_long_t user_flush_tlb;
32112- atomic_long_t user_unload_context;
32113- atomic_long_t user_exception;
32114- atomic_long_t set_context_option;
32115- atomic_long_t check_context_retarget_intr;
32116- atomic_long_t check_context_unload;
32117- atomic_long_t tlb_dropin;
32118- atomic_long_t tlb_preload_page;
32119- atomic_long_t tlb_dropin_fail_no_asid;
32120- atomic_long_t tlb_dropin_fail_upm;
32121- atomic_long_t tlb_dropin_fail_invalid;
32122- atomic_long_t tlb_dropin_fail_range_active;
32123- atomic_long_t tlb_dropin_fail_idle;
32124- atomic_long_t tlb_dropin_fail_fmm;
32125- atomic_long_t tlb_dropin_fail_no_exception;
32126- atomic_long_t tfh_stale_on_fault;
32127- atomic_long_t mmu_invalidate_range;
32128- atomic_long_t mmu_invalidate_page;
32129- atomic_long_t flush_tlb;
32130- atomic_long_t flush_tlb_gru;
32131- atomic_long_t flush_tlb_gru_tgh;
32132- atomic_long_t flush_tlb_gru_zero_asid;
32133+ atomic_long_unchecked_t vdata_alloc;
32134+ atomic_long_unchecked_t vdata_free;
32135+ atomic_long_unchecked_t gts_alloc;
32136+ atomic_long_unchecked_t gts_free;
32137+ atomic_long_unchecked_t gms_alloc;
32138+ atomic_long_unchecked_t gms_free;
32139+ atomic_long_unchecked_t gts_double_allocate;
32140+ atomic_long_unchecked_t assign_context;
32141+ atomic_long_unchecked_t assign_context_failed;
32142+ atomic_long_unchecked_t free_context;
32143+ atomic_long_unchecked_t load_user_context;
32144+ atomic_long_unchecked_t load_kernel_context;
32145+ atomic_long_unchecked_t lock_kernel_context;
32146+ atomic_long_unchecked_t unlock_kernel_context;
32147+ atomic_long_unchecked_t steal_user_context;
32148+ atomic_long_unchecked_t steal_kernel_context;
32149+ atomic_long_unchecked_t steal_context_failed;
32150+ atomic_long_unchecked_t nopfn;
32151+ atomic_long_unchecked_t asid_new;
32152+ atomic_long_unchecked_t asid_next;
32153+ atomic_long_unchecked_t asid_wrap;
32154+ atomic_long_unchecked_t asid_reuse;
32155+ atomic_long_unchecked_t intr;
32156+ atomic_long_unchecked_t intr_cbr;
32157+ atomic_long_unchecked_t intr_tfh;
32158+ atomic_long_unchecked_t intr_spurious;
32159+ atomic_long_unchecked_t intr_mm_lock_failed;
32160+ atomic_long_unchecked_t call_os;
32161+ atomic_long_unchecked_t call_os_wait_queue;
32162+ atomic_long_unchecked_t user_flush_tlb;
32163+ atomic_long_unchecked_t user_unload_context;
32164+ atomic_long_unchecked_t user_exception;
32165+ atomic_long_unchecked_t set_context_option;
32166+ atomic_long_unchecked_t check_context_retarget_intr;
32167+ atomic_long_unchecked_t check_context_unload;
32168+ atomic_long_unchecked_t tlb_dropin;
32169+ atomic_long_unchecked_t tlb_preload_page;
32170+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
32171+ atomic_long_unchecked_t tlb_dropin_fail_upm;
32172+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
32173+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
32174+ atomic_long_unchecked_t tlb_dropin_fail_idle;
32175+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
32176+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
32177+ atomic_long_unchecked_t tfh_stale_on_fault;
32178+ atomic_long_unchecked_t mmu_invalidate_range;
32179+ atomic_long_unchecked_t mmu_invalidate_page;
32180+ atomic_long_unchecked_t flush_tlb;
32181+ atomic_long_unchecked_t flush_tlb_gru;
32182+ atomic_long_unchecked_t flush_tlb_gru_tgh;
32183+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
32184
32185- atomic_long_t copy_gpa;
32186- atomic_long_t read_gpa;
32187+ atomic_long_unchecked_t copy_gpa;
32188+ atomic_long_unchecked_t read_gpa;
32189
32190- atomic_long_t mesq_receive;
32191- atomic_long_t mesq_receive_none;
32192- atomic_long_t mesq_send;
32193- atomic_long_t mesq_send_failed;
32194- atomic_long_t mesq_noop;
32195- atomic_long_t mesq_send_unexpected_error;
32196- atomic_long_t mesq_send_lb_overflow;
32197- atomic_long_t mesq_send_qlimit_reached;
32198- atomic_long_t mesq_send_amo_nacked;
32199- atomic_long_t mesq_send_put_nacked;
32200- atomic_long_t mesq_page_overflow;
32201- atomic_long_t mesq_qf_locked;
32202- atomic_long_t mesq_qf_noop_not_full;
32203- atomic_long_t mesq_qf_switch_head_failed;
32204- atomic_long_t mesq_qf_unexpected_error;
32205- atomic_long_t mesq_noop_unexpected_error;
32206- atomic_long_t mesq_noop_lb_overflow;
32207- atomic_long_t mesq_noop_qlimit_reached;
32208- atomic_long_t mesq_noop_amo_nacked;
32209- atomic_long_t mesq_noop_put_nacked;
32210- atomic_long_t mesq_noop_page_overflow;
32211+ atomic_long_unchecked_t mesq_receive;
32212+ atomic_long_unchecked_t mesq_receive_none;
32213+ atomic_long_unchecked_t mesq_send;
32214+ atomic_long_unchecked_t mesq_send_failed;
32215+ atomic_long_unchecked_t mesq_noop;
32216+ atomic_long_unchecked_t mesq_send_unexpected_error;
32217+ atomic_long_unchecked_t mesq_send_lb_overflow;
32218+ atomic_long_unchecked_t mesq_send_qlimit_reached;
32219+ atomic_long_unchecked_t mesq_send_amo_nacked;
32220+ atomic_long_unchecked_t mesq_send_put_nacked;
32221+ atomic_long_unchecked_t mesq_page_overflow;
32222+ atomic_long_unchecked_t mesq_qf_locked;
32223+ atomic_long_unchecked_t mesq_qf_noop_not_full;
32224+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
32225+ atomic_long_unchecked_t mesq_qf_unexpected_error;
32226+ atomic_long_unchecked_t mesq_noop_unexpected_error;
32227+ atomic_long_unchecked_t mesq_noop_lb_overflow;
32228+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
32229+ atomic_long_unchecked_t mesq_noop_amo_nacked;
32230+ atomic_long_unchecked_t mesq_noop_put_nacked;
32231+ atomic_long_unchecked_t mesq_noop_page_overflow;
32232
32233 };
32234
32235@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
32236 tghop_invalidate, mcsop_last};
32237
32238 struct mcs_op_statistic {
32239- atomic_long_t count;
32240- atomic_long_t total;
32241+ atomic_long_unchecked_t count;
32242+ atomic_long_unchecked_t total;
32243 unsigned long max;
32244 };
32245
32246@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
32247
32248 #define STAT(id) do { \
32249 if (gru_options & OPT_STATS) \
32250- atomic_long_inc(&gru_stats.id); \
32251+ atomic_long_inc_unchecked(&gru_stats.id); \
32252 } while (0)
32253
32254 #ifdef CONFIG_SGI_GRU_DEBUG
32255diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
32256index 851b2f2..a4ec097 100644
32257--- a/drivers/misc/sgi-xp/xp.h
32258+++ b/drivers/misc/sgi-xp/xp.h
32259@@ -289,7 +289,7 @@ struct xpc_interface {
32260 xpc_notify_func, void *);
32261 void (*received) (short, int, void *);
32262 enum xp_retval (*partid_to_nasids) (short, void *);
32263-};
32264+} __no_const;
32265
32266 extern struct xpc_interface xpc_interface;
32267
32268diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
32269index b94d5f7..7f494c5 100644
32270--- a/drivers/misc/sgi-xp/xpc.h
32271+++ b/drivers/misc/sgi-xp/xpc.h
32272@@ -835,6 +835,7 @@ struct xpc_arch_operations {
32273 void (*received_payload) (struct xpc_channel *, void *);
32274 void (*notify_senders_of_disconnect) (struct xpc_channel *);
32275 };
32276+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
32277
32278 /* struct xpc_partition act_state values (for XPC HB) */
32279
32280@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
32281 /* found in xpc_main.c */
32282 extern struct device *xpc_part;
32283 extern struct device *xpc_chan;
32284-extern struct xpc_arch_operations xpc_arch_ops;
32285+extern xpc_arch_operations_no_const xpc_arch_ops;
32286 extern int xpc_disengage_timelimit;
32287 extern int xpc_disengage_timedout;
32288 extern int xpc_activate_IRQ_rcvd;
32289diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
32290index 8d082b4..aa749ae 100644
32291--- a/drivers/misc/sgi-xp/xpc_main.c
32292+++ b/drivers/misc/sgi-xp/xpc_main.c
32293@@ -162,7 +162,7 @@ static struct notifier_block xpc_die_notifier = {
32294 .notifier_call = xpc_system_die,
32295 };
32296
32297-struct xpc_arch_operations xpc_arch_ops;
32298+xpc_arch_operations_no_const xpc_arch_ops;
32299
32300 /*
32301 * Timer function to enforce the timelimit on the partition disengage.
32302diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
32303index 6878a94..fe5c5f1 100644
32304--- a/drivers/mmc/host/sdhci-pci.c
32305+++ b/drivers/mmc/host/sdhci-pci.c
32306@@ -673,7 +673,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
32307 .probe = via_probe,
32308 };
32309
32310-static const struct pci_device_id pci_ids[] __devinitdata = {
32311+static const struct pci_device_id pci_ids[] __devinitconst = {
32312 {
32313 .vendor = PCI_VENDOR_ID_RICOH,
32314 .device = PCI_DEVICE_ID_RICOH_R5C822,
32315diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
32316index e9fad91..0a7a16a 100644
32317--- a/drivers/mtd/devices/doc2000.c
32318+++ b/drivers/mtd/devices/doc2000.c
32319@@ -773,7 +773,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
32320
32321 /* The ECC will not be calculated correctly if less than 512 is written */
32322 /* DBB-
32323- if (len != 0x200 && eccbuf)
32324+ if (len != 0x200)
32325 printk(KERN_WARNING
32326 "ECC needs a full sector write (adr: %lx size %lx)\n",
32327 (long) to, (long) len);
32328diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
32329index a3f7a27..234016e 100644
32330--- a/drivers/mtd/devices/doc2001.c
32331+++ b/drivers/mtd/devices/doc2001.c
32332@@ -392,7 +392,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
32333 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
32334
32335 /* Don't allow read past end of device */
32336- if (from >= this->totlen)
32337+ if (from >= this->totlen || !len)
32338 return -EINVAL;
32339
32340 /* Don't allow a single read to cross a 512-byte block boundary */
32341diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
32342index 3984d48..28aa897 100644
32343--- a/drivers/mtd/nand/denali.c
32344+++ b/drivers/mtd/nand/denali.c
32345@@ -26,6 +26,7 @@
32346 #include <linux/pci.h>
32347 #include <linux/mtd/mtd.h>
32348 #include <linux/module.h>
32349+#include <linux/slab.h>
32350
32351 #include "denali.h"
32352
32353diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
32354index ac40925..483b753 100644
32355--- a/drivers/mtd/nftlmount.c
32356+++ b/drivers/mtd/nftlmount.c
32357@@ -24,6 +24,7 @@
32358 #include <asm/errno.h>
32359 #include <linux/delay.h>
32360 #include <linux/slab.h>
32361+#include <linux/sched.h>
32362 #include <linux/mtd/mtd.h>
32363 #include <linux/mtd/nand.h>
32364 #include <linux/mtd/nftl.h>
32365diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
32366index 6c3fb5a..c542a81 100644
32367--- a/drivers/mtd/ubi/build.c
32368+++ b/drivers/mtd/ubi/build.c
32369@@ -1311,7 +1311,7 @@ module_exit(ubi_exit);
32370 static int __init bytes_str_to_int(const char *str)
32371 {
32372 char *endp;
32373- unsigned long result;
32374+ unsigned long result, scale = 1;
32375
32376 result = simple_strtoul(str, &endp, 0);
32377 if (str == endp || result >= INT_MAX) {
32378@@ -1322,11 +1322,11 @@ static int __init bytes_str_to_int(const char *str)
32379
32380 switch (*endp) {
32381 case 'G':
32382- result *= 1024;
32383+ scale *= 1024;
32384 case 'M':
32385- result *= 1024;
32386+ scale *= 1024;
32387 case 'K':
32388- result *= 1024;
32389+ scale *= 1024;
32390 if (endp[1] == 'i' && endp[2] == 'B')
32391 endp += 2;
32392 case '\0':
32393@@ -1337,7 +1337,13 @@ static int __init bytes_str_to_int(const char *str)
32394 return -EINVAL;
32395 }
32396
32397- return result;
32398+ if ((intoverflow_t)result*scale >= INT_MAX) {
32399+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
32400+ str);
32401+ return -EINVAL;
32402+ }
32403+
32404+ return result*scale;
32405 }
32406
32407 /**
32408diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
32409index 1feae59..c2a61d2 100644
32410--- a/drivers/net/ethernet/atheros/atlx/atl2.c
32411+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
32412@@ -2857,7 +2857,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
32413 */
32414
32415 #define ATL2_PARAM(X, desc) \
32416- static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
32417+ static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
32418 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
32419 MODULE_PARM_DESC(X, desc);
32420 #else
32421diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
32422index 9a517c2..a50cfcb 100644
32423--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
32424+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
32425@@ -449,7 +449,7 @@ struct bnx2x_rx_mode_obj {
32426
32427 int (*wait_comp)(struct bnx2x *bp,
32428 struct bnx2x_rx_mode_ramrod_params *p);
32429-};
32430+} __no_const;
32431
32432 /********************** Set multicast group ***********************************/
32433
32434diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
32435index 94b4bd0..73c02de 100644
32436--- a/drivers/net/ethernet/broadcom/tg3.h
32437+++ b/drivers/net/ethernet/broadcom/tg3.h
32438@@ -134,6 +134,7 @@
32439 #define CHIPREV_ID_5750_A0 0x4000
32440 #define CHIPREV_ID_5750_A1 0x4001
32441 #define CHIPREV_ID_5750_A3 0x4003
32442+#define CHIPREV_ID_5750_C1 0x4201
32443 #define CHIPREV_ID_5750_C2 0x4202
32444 #define CHIPREV_ID_5752_A0_HW 0x5000
32445 #define CHIPREV_ID_5752_A0 0x6000
32446diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
32447index c5f5479..2e8c260 100644
32448--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
32449+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
32450@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
32451 */
32452 struct l2t_skb_cb {
32453 arp_failure_handler_func arp_failure_handler;
32454-};
32455+} __no_const;
32456
32457 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
32458
32459diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
32460index 871bcaa..4043505 100644
32461--- a/drivers/net/ethernet/dec/tulip/de4x5.c
32462+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
32463@@ -5397,7 +5397,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
32464 for (i=0; i<ETH_ALEN; i++) {
32465 tmp.addr[i] = dev->dev_addr[i];
32466 }
32467- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32468+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32469 break;
32470
32471 case DE4X5_SET_HWADDR: /* Set the hardware address */
32472@@ -5437,7 +5437,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
32473 spin_lock_irqsave(&lp->lock, flags);
32474 memcpy(&statbuf, &lp->pktStats, ioc->len);
32475 spin_unlock_irqrestore(&lp->lock, flags);
32476- if (copy_to_user(ioc->data, &statbuf, ioc->len))
32477+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
32478 return -EFAULT;
32479 break;
32480 }
32481diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
32482index 14d5b61..1398636 100644
32483--- a/drivers/net/ethernet/dec/tulip/eeprom.c
32484+++ b/drivers/net/ethernet/dec/tulip/eeprom.c
32485@@ -79,7 +79,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
32486 {NULL}};
32487
32488
32489-static const char *block_name[] __devinitdata = {
32490+static const char *block_name[] __devinitconst = {
32491 "21140 non-MII",
32492 "21140 MII PHY",
32493 "21142 Serial PHY",
32494diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
32495index 4d01219..b58d26d 100644
32496--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
32497+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
32498@@ -236,7 +236,7 @@ struct pci_id_info {
32499 int drv_flags; /* Driver use, intended as capability flags. */
32500 };
32501
32502-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
32503+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
32504 { /* Sometime a Level-One switch card. */
32505 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
32506 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
32507diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
32508index dcd7f7a..ecb7fb3 100644
32509--- a/drivers/net/ethernet/dlink/sundance.c
32510+++ b/drivers/net/ethernet/dlink/sundance.c
32511@@ -218,7 +218,7 @@ enum {
32512 struct pci_id_info {
32513 const char *name;
32514 };
32515-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
32516+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
32517 {"D-Link DFE-550TX FAST Ethernet Adapter"},
32518 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
32519 {"D-Link DFE-580TX 4 port Server Adapter"},
32520diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
32521index bf266a0..e024af7 100644
32522--- a/drivers/net/ethernet/emulex/benet/be_main.c
32523+++ b/drivers/net/ethernet/emulex/benet/be_main.c
32524@@ -397,7 +397,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
32525
32526 if (wrapped)
32527 newacc += 65536;
32528- ACCESS_ONCE(*acc) = newacc;
32529+ ACCESS_ONCE_RW(*acc) = newacc;
32530 }
32531
32532 void be_parse_stats(struct be_adapter *adapter)
32533diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
32534index 61d2bdd..7f1154a 100644
32535--- a/drivers/net/ethernet/fealnx.c
32536+++ b/drivers/net/ethernet/fealnx.c
32537@@ -150,7 +150,7 @@ struct chip_info {
32538 int flags;
32539 };
32540
32541-static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
32542+static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
32543 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
32544 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
32545 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
32546diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
32547index e1159e5..e18684d 100644
32548--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
32549+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
32550@@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
32551 {
32552 struct e1000_hw *hw = &adapter->hw;
32553 struct e1000_mac_info *mac = &hw->mac;
32554- struct e1000_mac_operations *func = &mac->ops;
32555+ e1000_mac_operations_no_const *func = &mac->ops;
32556
32557 /* Set media type */
32558 switch (adapter->pdev->device) {
32559diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
32560index a3e65fd..f451444 100644
32561--- a/drivers/net/ethernet/intel/e1000e/82571.c
32562+++ b/drivers/net/ethernet/intel/e1000e/82571.c
32563@@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
32564 {
32565 struct e1000_hw *hw = &adapter->hw;
32566 struct e1000_mac_info *mac = &hw->mac;
32567- struct e1000_mac_operations *func = &mac->ops;
32568+ e1000_mac_operations_no_const *func = &mac->ops;
32569 u32 swsm = 0;
32570 u32 swsm2 = 0;
32571 bool force_clear_smbi = false;
32572diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
32573index 2967039..ca8c40c 100644
32574--- a/drivers/net/ethernet/intel/e1000e/hw.h
32575+++ b/drivers/net/ethernet/intel/e1000e/hw.h
32576@@ -778,6 +778,7 @@ struct e1000_mac_operations {
32577 void (*write_vfta)(struct e1000_hw *, u32, u32);
32578 s32 (*read_mac_addr)(struct e1000_hw *);
32579 };
32580+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
32581
32582 /*
32583 * When to use various PHY register access functions:
32584@@ -818,6 +819,7 @@ struct e1000_phy_operations {
32585 void (*power_up)(struct e1000_hw *);
32586 void (*power_down)(struct e1000_hw *);
32587 };
32588+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
32589
32590 /* Function pointers for the NVM. */
32591 struct e1000_nvm_operations {
32592@@ -829,9 +831,10 @@ struct e1000_nvm_operations {
32593 s32 (*validate)(struct e1000_hw *);
32594 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
32595 };
32596+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
32597
32598 struct e1000_mac_info {
32599- struct e1000_mac_operations ops;
32600+ e1000_mac_operations_no_const ops;
32601 u8 addr[ETH_ALEN];
32602 u8 perm_addr[ETH_ALEN];
32603
32604@@ -872,7 +875,7 @@ struct e1000_mac_info {
32605 };
32606
32607 struct e1000_phy_info {
32608- struct e1000_phy_operations ops;
32609+ e1000_phy_operations_no_const ops;
32610
32611 enum e1000_phy_type type;
32612
32613@@ -906,7 +909,7 @@ struct e1000_phy_info {
32614 };
32615
32616 struct e1000_nvm_info {
32617- struct e1000_nvm_operations ops;
32618+ e1000_nvm_operations_no_const ops;
32619
32620 enum e1000_nvm_type type;
32621 enum e1000_nvm_override override;
32622diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
32623index 4519a13..f97fcd0 100644
32624--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
32625+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
32626@@ -314,6 +314,7 @@ struct e1000_mac_operations {
32627 s32 (*read_mac_addr)(struct e1000_hw *);
32628 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
32629 };
32630+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
32631
32632 struct e1000_phy_operations {
32633 s32 (*acquire)(struct e1000_hw *);
32634@@ -330,6 +331,7 @@ struct e1000_phy_operations {
32635 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
32636 s32 (*write_reg)(struct e1000_hw *, u32, u16);
32637 };
32638+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
32639
32640 struct e1000_nvm_operations {
32641 s32 (*acquire)(struct e1000_hw *);
32642@@ -339,6 +341,7 @@ struct e1000_nvm_operations {
32643 s32 (*update)(struct e1000_hw *);
32644 s32 (*validate)(struct e1000_hw *);
32645 };
32646+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
32647
32648 struct e1000_info {
32649 s32 (*get_invariants)(struct e1000_hw *);
32650@@ -350,7 +353,7 @@ struct e1000_info {
32651 extern const struct e1000_info e1000_82575_info;
32652
32653 struct e1000_mac_info {
32654- struct e1000_mac_operations ops;
32655+ e1000_mac_operations_no_const ops;
32656
32657 u8 addr[6];
32658 u8 perm_addr[6];
32659@@ -388,7 +391,7 @@ struct e1000_mac_info {
32660 };
32661
32662 struct e1000_phy_info {
32663- struct e1000_phy_operations ops;
32664+ e1000_phy_operations_no_const ops;
32665
32666 enum e1000_phy_type type;
32667
32668@@ -423,7 +426,7 @@ struct e1000_phy_info {
32669 };
32670
32671 struct e1000_nvm_info {
32672- struct e1000_nvm_operations ops;
32673+ e1000_nvm_operations_no_const ops;
32674 enum e1000_nvm_type type;
32675 enum e1000_nvm_override override;
32676
32677@@ -468,6 +471,7 @@ struct e1000_mbx_operations {
32678 s32 (*check_for_ack)(struct e1000_hw *, u16);
32679 s32 (*check_for_rst)(struct e1000_hw *, u16);
32680 };
32681+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
32682
32683 struct e1000_mbx_stats {
32684 u32 msgs_tx;
32685@@ -479,7 +483,7 @@ struct e1000_mbx_stats {
32686 };
32687
32688 struct e1000_mbx_info {
32689- struct e1000_mbx_operations ops;
32690+ e1000_mbx_operations_no_const ops;
32691 struct e1000_mbx_stats stats;
32692 u32 timeout;
32693 u32 usec_delay;
32694diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
32695index d7ed58f..64cde36 100644
32696--- a/drivers/net/ethernet/intel/igbvf/vf.h
32697+++ b/drivers/net/ethernet/intel/igbvf/vf.h
32698@@ -189,9 +189,10 @@ struct e1000_mac_operations {
32699 s32 (*read_mac_addr)(struct e1000_hw *);
32700 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
32701 };
32702+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
32703
32704 struct e1000_mac_info {
32705- struct e1000_mac_operations ops;
32706+ e1000_mac_operations_no_const ops;
32707 u8 addr[6];
32708 u8 perm_addr[6];
32709
32710@@ -213,6 +214,7 @@ struct e1000_mbx_operations {
32711 s32 (*check_for_ack)(struct e1000_hw *);
32712 s32 (*check_for_rst)(struct e1000_hw *);
32713 };
32714+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
32715
32716 struct e1000_mbx_stats {
32717 u32 msgs_tx;
32718@@ -224,7 +226,7 @@ struct e1000_mbx_stats {
32719 };
32720
32721 struct e1000_mbx_info {
32722- struct e1000_mbx_operations ops;
32723+ e1000_mbx_operations_no_const ops;
32724 struct e1000_mbx_stats stats;
32725 u32 timeout;
32726 u32 usec_delay;
32727diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
32728index 6c5cca8..de8ef63 100644
32729--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
32730+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
32731@@ -2708,6 +2708,7 @@ struct ixgbe_eeprom_operations {
32732 s32 (*update_checksum)(struct ixgbe_hw *);
32733 u16 (*calc_checksum)(struct ixgbe_hw *);
32734 };
32735+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
32736
32737 struct ixgbe_mac_operations {
32738 s32 (*init_hw)(struct ixgbe_hw *);
32739@@ -2769,6 +2770,7 @@ struct ixgbe_mac_operations {
32740 /* Manageability interface */
32741 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
32742 };
32743+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
32744
32745 struct ixgbe_phy_operations {
32746 s32 (*identify)(struct ixgbe_hw *);
32747@@ -2788,9 +2790,10 @@ struct ixgbe_phy_operations {
32748 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
32749 s32 (*check_overtemp)(struct ixgbe_hw *);
32750 };
32751+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
32752
32753 struct ixgbe_eeprom_info {
32754- struct ixgbe_eeprom_operations ops;
32755+ ixgbe_eeprom_operations_no_const ops;
32756 enum ixgbe_eeprom_type type;
32757 u32 semaphore_delay;
32758 u16 word_size;
32759@@ -2800,7 +2803,7 @@ struct ixgbe_eeprom_info {
32760
32761 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
32762 struct ixgbe_mac_info {
32763- struct ixgbe_mac_operations ops;
32764+ ixgbe_mac_operations_no_const ops;
32765 enum ixgbe_mac_type type;
32766 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
32767 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
32768@@ -2828,7 +2831,7 @@ struct ixgbe_mac_info {
32769 };
32770
32771 struct ixgbe_phy_info {
32772- struct ixgbe_phy_operations ops;
32773+ ixgbe_phy_operations_no_const ops;
32774 struct mdio_if_info mdio;
32775 enum ixgbe_phy_type type;
32776 u32 id;
32777@@ -2856,6 +2859,7 @@ struct ixgbe_mbx_operations {
32778 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
32779 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
32780 };
32781+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
32782
32783 struct ixgbe_mbx_stats {
32784 u32 msgs_tx;
32785@@ -2867,7 +2871,7 @@ struct ixgbe_mbx_stats {
32786 };
32787
32788 struct ixgbe_mbx_info {
32789- struct ixgbe_mbx_operations ops;
32790+ ixgbe_mbx_operations_no_const ops;
32791 struct ixgbe_mbx_stats stats;
32792 u32 timeout;
32793 u32 usec_delay;
32794diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
32795index 10306b4..28df758 100644
32796--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
32797+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
32798@@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
32799 s32 (*clear_vfta)(struct ixgbe_hw *);
32800 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
32801 };
32802+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
32803
32804 enum ixgbe_mac_type {
32805 ixgbe_mac_unknown = 0,
32806@@ -79,7 +80,7 @@ enum ixgbe_mac_type {
32807 };
32808
32809 struct ixgbe_mac_info {
32810- struct ixgbe_mac_operations ops;
32811+ ixgbe_mac_operations_no_const ops;
32812 u8 addr[6];
32813 u8 perm_addr[6];
32814
32815@@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
32816 s32 (*check_for_ack)(struct ixgbe_hw *);
32817 s32 (*check_for_rst)(struct ixgbe_hw *);
32818 };
32819+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
32820
32821 struct ixgbe_mbx_stats {
32822 u32 msgs_tx;
32823@@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
32824 };
32825
32826 struct ixgbe_mbx_info {
32827- struct ixgbe_mbx_operations ops;
32828+ ixgbe_mbx_operations_no_const ops;
32829 struct ixgbe_mbx_stats stats;
32830 u32 timeout;
32831 u32 udelay;
32832diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
32833index 94bbc85..78c12e6 100644
32834--- a/drivers/net/ethernet/mellanox/mlx4/main.c
32835+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
32836@@ -40,6 +40,7 @@
32837 #include <linux/dma-mapping.h>
32838 #include <linux/slab.h>
32839 #include <linux/io-mapping.h>
32840+#include <linux/sched.h>
32841
32842 #include <linux/mlx4/device.h>
32843 #include <linux/mlx4/doorbell.h>
32844diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
32845index 5046a64..71ca936 100644
32846--- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
32847+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
32848@@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
32849 void (*link_down)(struct __vxge_hw_device *devh);
32850 void (*crit_err)(struct __vxge_hw_device *devh,
32851 enum vxge_hw_event type, u64 ext_data);
32852-};
32853+} __no_const;
32854
32855 /*
32856 * struct __vxge_hw_blockpool_entry - Block private data structure
32857diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
32858index 4a518a3..936b334 100644
32859--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
32860+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
32861@@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
32862 struct vxge_hw_mempool_dma *dma_object,
32863 u32 index,
32864 u32 is_last);
32865-};
32866+} __no_const;
32867
32868 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
32869 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
32870diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
32871index c8f47f1..5da9840 100644
32872--- a/drivers/net/ethernet/realtek/r8169.c
32873+++ b/drivers/net/ethernet/realtek/r8169.c
32874@@ -698,17 +698,17 @@ struct rtl8169_private {
32875 struct mdio_ops {
32876 void (*write)(void __iomem *, int, int);
32877 int (*read)(void __iomem *, int);
32878- } mdio_ops;
32879+ } __no_const mdio_ops;
32880
32881 struct pll_power_ops {
32882 void (*down)(struct rtl8169_private *);
32883 void (*up)(struct rtl8169_private *);
32884- } pll_power_ops;
32885+ } __no_const pll_power_ops;
32886
32887 struct jumbo_ops {
32888 void (*enable)(struct rtl8169_private *);
32889 void (*disable)(struct rtl8169_private *);
32890- } jumbo_ops;
32891+ } __no_const jumbo_ops;
32892
32893 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
32894 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
32895diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
32896index 1b4658c..a30dabb 100644
32897--- a/drivers/net/ethernet/sis/sis190.c
32898+++ b/drivers/net/ethernet/sis/sis190.c
32899@@ -1624,7 +1624,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
32900 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
32901 struct net_device *dev)
32902 {
32903- static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
32904+ static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
32905 struct sis190_private *tp = netdev_priv(dev);
32906 struct pci_dev *isa_bridge;
32907 u8 reg, tmp8;
32908diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
32909index edfa15d..002bfa9 100644
32910--- a/drivers/net/ppp/ppp_generic.c
32911+++ b/drivers/net/ppp/ppp_generic.c
32912@@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
32913 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
32914 struct ppp_stats stats;
32915 struct ppp_comp_stats cstats;
32916- char *vers;
32917
32918 switch (cmd) {
32919 case SIOCGPPPSTATS:
32920@@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
32921 break;
32922
32923 case SIOCGPPPVER:
32924- vers = PPP_VERSION;
32925- if (copy_to_user(addr, vers, strlen(vers) + 1))
32926+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
32927 break;
32928 err = 0;
32929 break;
32930diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
32931index 515f122..41dd273 100644
32932--- a/drivers/net/tokenring/abyss.c
32933+++ b/drivers/net/tokenring/abyss.c
32934@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
32935
32936 static int __init abyss_init (void)
32937 {
32938- abyss_netdev_ops = tms380tr_netdev_ops;
32939+ pax_open_kernel();
32940+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32941
32942- abyss_netdev_ops.ndo_open = abyss_open;
32943- abyss_netdev_ops.ndo_stop = abyss_close;
32944+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
32945+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
32946+ pax_close_kernel();
32947
32948 return pci_register_driver(&abyss_driver);
32949 }
32950diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
32951index 6153cfd..cf69c1c 100644
32952--- a/drivers/net/tokenring/madgemc.c
32953+++ b/drivers/net/tokenring/madgemc.c
32954@@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver = {
32955
32956 static int __init madgemc_init (void)
32957 {
32958- madgemc_netdev_ops = tms380tr_netdev_ops;
32959- madgemc_netdev_ops.ndo_open = madgemc_open;
32960- madgemc_netdev_ops.ndo_stop = madgemc_close;
32961+ pax_open_kernel();
32962+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32963+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
32964+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
32965+ pax_close_kernel();
32966
32967 return mca_register_driver (&madgemc_driver);
32968 }
32969diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
32970index 8d362e6..f91cc52 100644
32971--- a/drivers/net/tokenring/proteon.c
32972+++ b/drivers/net/tokenring/proteon.c
32973@@ -353,9 +353,11 @@ static int __init proteon_init(void)
32974 struct platform_device *pdev;
32975 int i, num = 0, err = 0;
32976
32977- proteon_netdev_ops = tms380tr_netdev_ops;
32978- proteon_netdev_ops.ndo_open = proteon_open;
32979- proteon_netdev_ops.ndo_stop = tms380tr_close;
32980+ pax_open_kernel();
32981+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
32982+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
32983+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
32984+ pax_close_kernel();
32985
32986 err = platform_driver_register(&proteon_driver);
32987 if (err)
32988diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
32989index 46db5c5..37c1536 100644
32990--- a/drivers/net/tokenring/skisa.c
32991+++ b/drivers/net/tokenring/skisa.c
32992@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
32993 struct platform_device *pdev;
32994 int i, num = 0, err = 0;
32995
32996- sk_isa_netdev_ops = tms380tr_netdev_ops;
32997- sk_isa_netdev_ops.ndo_open = sk_isa_open;
32998- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
32999+ pax_open_kernel();
33000+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
33001+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
33002+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
33003+ pax_close_kernel();
33004
33005 err = platform_driver_register(&sk_isa_driver);
33006 if (err)
33007diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
33008index 304fe78..db112fa 100644
33009--- a/drivers/net/usb/hso.c
33010+++ b/drivers/net/usb/hso.c
33011@@ -71,7 +71,7 @@
33012 #include <asm/byteorder.h>
33013 #include <linux/serial_core.h>
33014 #include <linux/serial.h>
33015-
33016+#include <asm/local.h>
33017
33018 #define MOD_AUTHOR "Option Wireless"
33019 #define MOD_DESCRIPTION "USB High Speed Option driver"
33020@@ -257,7 +257,7 @@ struct hso_serial {
33021
33022 /* from usb_serial_port */
33023 struct tty_struct *tty;
33024- int open_count;
33025+ local_t open_count;
33026 spinlock_t serial_lock;
33027
33028 int (*write_data) (struct hso_serial *serial);
33029@@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
33030 struct urb *urb;
33031
33032 urb = serial->rx_urb[0];
33033- if (serial->open_count > 0) {
33034+ if (local_read(&serial->open_count) > 0) {
33035 count = put_rxbuf_data(urb, serial);
33036 if (count == -1)
33037 return;
33038@@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
33039 DUMP1(urb->transfer_buffer, urb->actual_length);
33040
33041 /* Anyone listening? */
33042- if (serial->open_count == 0)
33043+ if (local_read(&serial->open_count) == 0)
33044 return;
33045
33046 if (status == 0) {
33047@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
33048 spin_unlock_irq(&serial->serial_lock);
33049
33050 /* check for port already opened, if not set the termios */
33051- serial->open_count++;
33052- if (serial->open_count == 1) {
33053+ if (local_inc_return(&serial->open_count) == 1) {
33054 serial->rx_state = RX_IDLE;
33055 /* Force default termio settings */
33056 _hso_serial_set_termios(tty, NULL);
33057@@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
33058 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
33059 if (result) {
33060 hso_stop_serial_device(serial->parent);
33061- serial->open_count--;
33062+ local_dec(&serial->open_count);
33063 kref_put(&serial->parent->ref, hso_serial_ref_free);
33064 }
33065 } else {
33066@@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
33067
33068 /* reset the rts and dtr */
33069 /* do the actual close */
33070- serial->open_count--;
33071+ local_dec(&serial->open_count);
33072
33073- if (serial->open_count <= 0) {
33074- serial->open_count = 0;
33075+ if (local_read(&serial->open_count) <= 0) {
33076+ local_set(&serial->open_count, 0);
33077 spin_lock_irq(&serial->serial_lock);
33078 if (serial->tty == tty) {
33079 serial->tty->driver_data = NULL;
33080@@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
33081
33082 /* the actual setup */
33083 spin_lock_irqsave(&serial->serial_lock, flags);
33084- if (serial->open_count)
33085+ if (local_read(&serial->open_count))
33086 _hso_serial_set_termios(tty, old);
33087 else
33088 tty->termios = old;
33089@@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *urb)
33090 D1("Pending read interrupt on port %d\n", i);
33091 spin_lock(&serial->serial_lock);
33092 if (serial->rx_state == RX_IDLE &&
33093- serial->open_count > 0) {
33094+ local_read(&serial->open_count) > 0) {
33095 /* Setup and send a ctrl req read on
33096 * port i */
33097 if (!serial->rx_urb_filled[0]) {
33098@@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interface *iface)
33099 /* Start all serial ports */
33100 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
33101 if (serial_table[i] && (serial_table[i]->interface == iface)) {
33102- if (dev2ser(serial_table[i])->open_count) {
33103+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
33104 result =
33105 hso_start_serial_device(serial_table[i], GFP_NOIO);
33106 hso_kick_transmit(dev2ser(serial_table[i]));
33107diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
33108index e662cbc..8d4a102 100644
33109--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
33110+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
33111@@ -601,8 +601,7 @@ vmxnet3_set_rss_indir(struct net_device *netdev,
33112 * Return with error code if any of the queue indices
33113 * is out of range
33114 */
33115- if (p->ring_index[i] < 0 ||
33116- p->ring_index[i] >= adapter->num_rx_queues)
33117+ if (p->ring_index[i] >= adapter->num_rx_queues)
33118 return -EINVAL;
33119 }
33120
33121diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
33122index 0f9ee46..e2d6e65 100644
33123--- a/drivers/net/wireless/ath/ath.h
33124+++ b/drivers/net/wireless/ath/ath.h
33125@@ -119,6 +119,7 @@ struct ath_ops {
33126 void (*write_flush) (void *);
33127 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
33128 };
33129+typedef struct ath_ops __no_const ath_ops_no_const;
33130
33131 struct ath_common;
33132 struct ath_bus_ops;
33133diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
33134index b592016..fe47870 100644
33135--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
33136+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
33137@@ -183,8 +183,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33138 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
33139 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
33140
33141- ACCESS_ONCE(ads->ds_link) = i->link;
33142- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
33143+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
33144+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
33145
33146 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
33147 ctl6 = SM(i->keytype, AR_EncrType);
33148@@ -198,26 +198,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33149
33150 if ((i->is_first || i->is_last) &&
33151 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
33152- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
33153+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
33154 | set11nTries(i->rates, 1)
33155 | set11nTries(i->rates, 2)
33156 | set11nTries(i->rates, 3)
33157 | (i->dur_update ? AR_DurUpdateEna : 0)
33158 | SM(0, AR_BurstDur);
33159
33160- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
33161+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
33162 | set11nRate(i->rates, 1)
33163 | set11nRate(i->rates, 2)
33164 | set11nRate(i->rates, 3);
33165 } else {
33166- ACCESS_ONCE(ads->ds_ctl2) = 0;
33167- ACCESS_ONCE(ads->ds_ctl3) = 0;
33168+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
33169+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
33170 }
33171
33172 if (!i->is_first) {
33173- ACCESS_ONCE(ads->ds_ctl0) = 0;
33174- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
33175- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
33176+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
33177+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
33178+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
33179 return;
33180 }
33181
33182@@ -242,7 +242,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33183 break;
33184 }
33185
33186- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
33187+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
33188 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
33189 | SM(i->txpower, AR_XmitPower)
33190 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
33191@@ -252,19 +252,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33192 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
33193 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
33194
33195- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
33196- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
33197+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
33198+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
33199
33200 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
33201 return;
33202
33203- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
33204+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
33205 | set11nPktDurRTSCTS(i->rates, 1);
33206
33207- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
33208+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
33209 | set11nPktDurRTSCTS(i->rates, 3);
33210
33211- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
33212+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
33213 | set11nRateFlags(i->rates, 1)
33214 | set11nRateFlags(i->rates, 2)
33215 | set11nRateFlags(i->rates, 3)
33216diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
33217index f5ae3c6..7936af3 100644
33218--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
33219+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
33220@@ -35,47 +35,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33221 (i->qcu << AR_TxQcuNum_S) | 0x17;
33222
33223 checksum += val;
33224- ACCESS_ONCE(ads->info) = val;
33225+ ACCESS_ONCE_RW(ads->info) = val;
33226
33227 checksum += i->link;
33228- ACCESS_ONCE(ads->link) = i->link;
33229+ ACCESS_ONCE_RW(ads->link) = i->link;
33230
33231 checksum += i->buf_addr[0];
33232- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
33233+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
33234 checksum += i->buf_addr[1];
33235- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
33236+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
33237 checksum += i->buf_addr[2];
33238- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
33239+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
33240 checksum += i->buf_addr[3];
33241- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
33242+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
33243
33244 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
33245- ACCESS_ONCE(ads->ctl3) = val;
33246+ ACCESS_ONCE_RW(ads->ctl3) = val;
33247 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
33248- ACCESS_ONCE(ads->ctl5) = val;
33249+ ACCESS_ONCE_RW(ads->ctl5) = val;
33250 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
33251- ACCESS_ONCE(ads->ctl7) = val;
33252+ ACCESS_ONCE_RW(ads->ctl7) = val;
33253 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
33254- ACCESS_ONCE(ads->ctl9) = val;
33255+ ACCESS_ONCE_RW(ads->ctl9) = val;
33256
33257 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
33258- ACCESS_ONCE(ads->ctl10) = checksum;
33259+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
33260
33261 if (i->is_first || i->is_last) {
33262- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
33263+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
33264 | set11nTries(i->rates, 1)
33265 | set11nTries(i->rates, 2)
33266 | set11nTries(i->rates, 3)
33267 | (i->dur_update ? AR_DurUpdateEna : 0)
33268 | SM(0, AR_BurstDur);
33269
33270- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
33271+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
33272 | set11nRate(i->rates, 1)
33273 | set11nRate(i->rates, 2)
33274 | set11nRate(i->rates, 3);
33275 } else {
33276- ACCESS_ONCE(ads->ctl13) = 0;
33277- ACCESS_ONCE(ads->ctl14) = 0;
33278+ ACCESS_ONCE_RW(ads->ctl13) = 0;
33279+ ACCESS_ONCE_RW(ads->ctl14) = 0;
33280 }
33281
33282 ads->ctl20 = 0;
33283@@ -84,17 +84,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33284
33285 ctl17 = SM(i->keytype, AR_EncrType);
33286 if (!i->is_first) {
33287- ACCESS_ONCE(ads->ctl11) = 0;
33288- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
33289- ACCESS_ONCE(ads->ctl15) = 0;
33290- ACCESS_ONCE(ads->ctl16) = 0;
33291- ACCESS_ONCE(ads->ctl17) = ctl17;
33292- ACCESS_ONCE(ads->ctl18) = 0;
33293- ACCESS_ONCE(ads->ctl19) = 0;
33294+ ACCESS_ONCE_RW(ads->ctl11) = 0;
33295+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
33296+ ACCESS_ONCE_RW(ads->ctl15) = 0;
33297+ ACCESS_ONCE_RW(ads->ctl16) = 0;
33298+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
33299+ ACCESS_ONCE_RW(ads->ctl18) = 0;
33300+ ACCESS_ONCE_RW(ads->ctl19) = 0;
33301 return;
33302 }
33303
33304- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
33305+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
33306 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
33307 | SM(i->txpower, AR_XmitPower)
33308 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
33309@@ -130,22 +130,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
33310 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
33311 ctl12 |= SM(val, AR_PAPRDChainMask);
33312
33313- ACCESS_ONCE(ads->ctl12) = ctl12;
33314- ACCESS_ONCE(ads->ctl17) = ctl17;
33315+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
33316+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
33317
33318- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
33319+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
33320 | set11nPktDurRTSCTS(i->rates, 1);
33321
33322- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
33323+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
33324 | set11nPktDurRTSCTS(i->rates, 3);
33325
33326- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
33327+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
33328 | set11nRateFlags(i->rates, 1)
33329 | set11nRateFlags(i->rates, 2)
33330 | set11nRateFlags(i->rates, 3)
33331 | SM(i->rtscts_rate, AR_RTSCTSRate);
33332
33333- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
33334+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
33335 }
33336
33337 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
33338diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
33339index f389b3c..7359e18 100644
33340--- a/drivers/net/wireless/ath/ath9k/hw.h
33341+++ b/drivers/net/wireless/ath/ath9k/hw.h
33342@@ -605,7 +605,7 @@ struct ath_hw_private_ops {
33343
33344 /* ANI */
33345 void (*ani_cache_ini_regs)(struct ath_hw *ah);
33346-};
33347+} __no_const;
33348
33349 /**
33350 * struct ath_hw_ops - callbacks used by hardware code and driver code
33351@@ -635,7 +635,7 @@ struct ath_hw_ops {
33352 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
33353 struct ath_hw_antcomb_conf *antconf);
33354
33355-};
33356+} __no_const;
33357
33358 struct ath_nf_limits {
33359 s16 max;
33360@@ -655,7 +655,7 @@ enum ath_cal_list {
33361 #define AH_FASTCC 0x4
33362
33363 struct ath_hw {
33364- struct ath_ops reg_ops;
33365+ ath_ops_no_const reg_ops;
33366
33367 struct ieee80211_hw *hw;
33368 struct ath_common common;
33369diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
33370index bea8524..c677c06 100644
33371--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
33372+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
33373@@ -547,7 +547,7 @@ struct phy_func_ptr {
33374 void (*carrsuppr)(struct brcms_phy *);
33375 s32 (*rxsigpwr)(struct brcms_phy *, s32);
33376 void (*detach)(struct brcms_phy *);
33377-};
33378+} __no_const;
33379
33380 struct brcms_phy {
33381 struct brcms_phy_pub pubpi_ro;
33382diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
33383index 05f2ad1..ae00eea 100644
33384--- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
33385+++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
33386@@ -3685,7 +3685,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
33387 */
33388 if (iwl3945_mod_params.disable_hw_scan) {
33389 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
33390- iwl3945_hw_ops.hw_scan = NULL;
33391+ pax_open_kernel();
33392+ *(void **)&iwl3945_hw_ops.hw_scan = NULL;
33393+ pax_close_kernel();
33394 }
33395
33396 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
33397diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
33398index 69a77e2..552b42c 100644
33399--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
33400+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
33401@@ -71,8 +71,8 @@ do { \
33402 } while (0)
33403
33404 #else
33405-#define IWL_DEBUG(m, level, fmt, args...)
33406-#define IWL_DEBUG_LIMIT(m, level, fmt, args...)
33407+#define IWL_DEBUG(m, level, fmt, args...) do {} while (0)
33408+#define IWL_DEBUG_LIMIT(m, level, fmt, args...) do {} while (0)
33409 #define iwl_print_hex_dump(m, level, p, len)
33410 #endif /* CONFIG_IWLWIFI_DEBUG */
33411
33412diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
33413index 523ad55..f8c5dc5 100644
33414--- a/drivers/net/wireless/mac80211_hwsim.c
33415+++ b/drivers/net/wireless/mac80211_hwsim.c
33416@@ -1678,9 +1678,11 @@ static int __init init_mac80211_hwsim(void)
33417 return -EINVAL;
33418
33419 if (fake_hw_scan) {
33420- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
33421- mac80211_hwsim_ops.sw_scan_start = NULL;
33422- mac80211_hwsim_ops.sw_scan_complete = NULL;
33423+ pax_open_kernel();
33424+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
33425+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
33426+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
33427+ pax_close_kernel();
33428 }
33429
33430 spin_lock_init(&hwsim_radio_lock);
33431diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
33432index 30f138b..c904585 100644
33433--- a/drivers/net/wireless/mwifiex/main.h
33434+++ b/drivers/net/wireless/mwifiex/main.h
33435@@ -543,7 +543,7 @@ struct mwifiex_if_ops {
33436 void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
33437 int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *);
33438 int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
33439-};
33440+} __no_const;
33441
33442 struct mwifiex_adapter {
33443 u8 iface_type;
33444diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
33445index 0c13840..a5c3ed6 100644
33446--- a/drivers/net/wireless/rndis_wlan.c
33447+++ b/drivers/net/wireless/rndis_wlan.c
33448@@ -1275,7 +1275,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
33449
33450 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
33451
33452- if (rts_threshold < 0 || rts_threshold > 2347)
33453+ if (rts_threshold > 2347)
33454 rts_threshold = 2347;
33455
33456 tmp = cpu_to_le32(rts_threshold);
33457diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
33458index a77f1bb..c608b2b 100644
33459--- a/drivers/net/wireless/wl1251/wl1251.h
33460+++ b/drivers/net/wireless/wl1251/wl1251.h
33461@@ -266,7 +266,7 @@ struct wl1251_if_operations {
33462 void (*reset)(struct wl1251 *wl);
33463 void (*enable_irq)(struct wl1251 *wl);
33464 void (*disable_irq)(struct wl1251 *wl);
33465-};
33466+} __no_const;
33467
33468 struct wl1251 {
33469 struct ieee80211_hw *hw;
33470diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
33471index f34b5b2..b5abb9f 100644
33472--- a/drivers/oprofile/buffer_sync.c
33473+++ b/drivers/oprofile/buffer_sync.c
33474@@ -343,7 +343,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
33475 if (cookie == NO_COOKIE)
33476 offset = pc;
33477 if (cookie == INVALID_COOKIE) {
33478- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
33479+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
33480 offset = pc;
33481 }
33482 if (cookie != last_cookie) {
33483@@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
33484 /* add userspace sample */
33485
33486 if (!mm) {
33487- atomic_inc(&oprofile_stats.sample_lost_no_mm);
33488+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
33489 return 0;
33490 }
33491
33492 cookie = lookup_dcookie(mm, s->eip, &offset);
33493
33494 if (cookie == INVALID_COOKIE) {
33495- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
33496+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
33497 return 0;
33498 }
33499
33500@@ -563,7 +563,7 @@ void sync_buffer(int cpu)
33501 /* ignore backtraces if failed to add a sample */
33502 if (state == sb_bt_start) {
33503 state = sb_bt_ignore;
33504- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
33505+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
33506 }
33507 }
33508 release_mm(mm);
33509diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
33510index c0cc4e7..44d4e54 100644
33511--- a/drivers/oprofile/event_buffer.c
33512+++ b/drivers/oprofile/event_buffer.c
33513@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
33514 }
33515
33516 if (buffer_pos == buffer_size) {
33517- atomic_inc(&oprofile_stats.event_lost_overflow);
33518+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
33519 return;
33520 }
33521
33522diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
33523index f8c752e..28bf4fc 100644
33524--- a/drivers/oprofile/oprof.c
33525+++ b/drivers/oprofile/oprof.c
33526@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
33527 if (oprofile_ops.switch_events())
33528 return;
33529
33530- atomic_inc(&oprofile_stats.multiplex_counter);
33531+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
33532 start_switch_worker();
33533 }
33534
33535diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
33536index 917d28e..d62d981 100644
33537--- a/drivers/oprofile/oprofile_stats.c
33538+++ b/drivers/oprofile/oprofile_stats.c
33539@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
33540 cpu_buf->sample_invalid_eip = 0;
33541 }
33542
33543- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
33544- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
33545- atomic_set(&oprofile_stats.event_lost_overflow, 0);
33546- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
33547- atomic_set(&oprofile_stats.multiplex_counter, 0);
33548+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
33549+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
33550+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
33551+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
33552+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
33553 }
33554
33555
33556diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
33557index 38b6fc0..b5cbfce 100644
33558--- a/drivers/oprofile/oprofile_stats.h
33559+++ b/drivers/oprofile/oprofile_stats.h
33560@@ -13,11 +13,11 @@
33561 #include <linux/atomic.h>
33562
33563 struct oprofile_stat_struct {
33564- atomic_t sample_lost_no_mm;
33565- atomic_t sample_lost_no_mapping;
33566- atomic_t bt_lost_no_mapping;
33567- atomic_t event_lost_overflow;
33568- atomic_t multiplex_counter;
33569+ atomic_unchecked_t sample_lost_no_mm;
33570+ atomic_unchecked_t sample_lost_no_mapping;
33571+ atomic_unchecked_t bt_lost_no_mapping;
33572+ atomic_unchecked_t event_lost_overflow;
33573+ atomic_unchecked_t multiplex_counter;
33574 };
33575
33576 extern struct oprofile_stat_struct oprofile_stats;
33577diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
33578index 2f0aa0f..90fab02 100644
33579--- a/drivers/oprofile/oprofilefs.c
33580+++ b/drivers/oprofile/oprofilefs.c
33581@@ -193,7 +193,7 @@ static const struct file_operations atomic_ro_fops = {
33582
33583
33584 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
33585- char const *name, atomic_t *val)
33586+ char const *name, atomic_unchecked_t *val)
33587 {
33588 return __oprofilefs_create_file(sb, root, name,
33589 &atomic_ro_fops, 0444, val);
33590diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
33591index 3f56bc0..707d642 100644
33592--- a/drivers/parport/procfs.c
33593+++ b/drivers/parport/procfs.c
33594@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
33595
33596 *ppos += len;
33597
33598- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
33599+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
33600 }
33601
33602 #ifdef CONFIG_PARPORT_1284
33603@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
33604
33605 *ppos += len;
33606
33607- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
33608+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
33609 }
33610 #endif /* IEEE1284.3 support. */
33611
33612diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
33613index 9fff878..ad0ad53 100644
33614--- a/drivers/pci/hotplug/cpci_hotplug.h
33615+++ b/drivers/pci/hotplug/cpci_hotplug.h
33616@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
33617 int (*hardware_test) (struct slot* slot, u32 value);
33618 u8 (*get_power) (struct slot* slot);
33619 int (*set_power) (struct slot* slot, int value);
33620-};
33621+} __no_const;
33622
33623 struct cpci_hp_controller {
33624 unsigned int irq;
33625diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
33626index 76ba8a1..20ca857 100644
33627--- a/drivers/pci/hotplug/cpqphp_nvram.c
33628+++ b/drivers/pci/hotplug/cpqphp_nvram.c
33629@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
33630
33631 void compaq_nvram_init (void __iomem *rom_start)
33632 {
33633+
33634+#ifndef CONFIG_PAX_KERNEXEC
33635 if (rom_start) {
33636 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
33637 }
33638+#endif
33639+
33640 dbg("int15 entry = %p\n", compaq_int15_entry_point);
33641
33642 /* initialize our int15 lock */
33643diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
33644index cbfbab1..6a9fced 100644
33645--- a/drivers/pci/pcie/aspm.c
33646+++ b/drivers/pci/pcie/aspm.c
33647@@ -27,9 +27,9 @@
33648 #define MODULE_PARAM_PREFIX "pcie_aspm."
33649
33650 /* Note: those are not register definitions */
33651-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
33652-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
33653-#define ASPM_STATE_L1 (4) /* L1 state */
33654+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
33655+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
33656+#define ASPM_STATE_L1 (4U) /* L1 state */
33657 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
33658 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
33659
33660diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
33661index 04e74f4..a960176 100644
33662--- a/drivers/pci/probe.c
33663+++ b/drivers/pci/probe.c
33664@@ -136,7 +136,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
33665 u32 l, sz, mask;
33666 u16 orig_cmd;
33667
33668- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
33669+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
33670
33671 if (!dev->mmio_always_on) {
33672 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
33673diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
33674index 27911b5..5b6db88 100644
33675--- a/drivers/pci/proc.c
33676+++ b/drivers/pci/proc.c
33677@@ -476,7 +476,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
33678 static int __init pci_proc_init(void)
33679 {
33680 struct pci_dev *dev = NULL;
33681+
33682+#ifdef CONFIG_GRKERNSEC_PROC_ADD
33683+#ifdef CONFIG_GRKERNSEC_PROC_USER
33684+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
33685+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
33686+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
33687+#endif
33688+#else
33689 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
33690+#endif
33691 proc_create("devices", 0, proc_bus_pci_dir,
33692 &proc_bus_pci_dev_operations);
33693 proc_initialized = 1;
33694diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
33695index 7b82868..b9344c9 100644
33696--- a/drivers/platform/x86/thinkpad_acpi.c
33697+++ b/drivers/platform/x86/thinkpad_acpi.c
33698@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
33699 return 0;
33700 }
33701
33702-void static hotkey_mask_warn_incomplete_mask(void)
33703+static void hotkey_mask_warn_incomplete_mask(void)
33704 {
33705 /* log only what the user can fix... */
33706 const u32 wantedmask = hotkey_driver_mask &
33707@@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
33708 }
33709 }
33710
33711-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33712- struct tp_nvram_state *newn,
33713- const u32 event_mask)
33714-{
33715-
33716 #define TPACPI_COMPARE_KEY(__scancode, __member) \
33717 do { \
33718 if ((event_mask & (1 << __scancode)) && \
33719@@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33720 tpacpi_hotkey_send_key(__scancode); \
33721 } while (0)
33722
33723- void issue_volchange(const unsigned int oldvol,
33724- const unsigned int newvol)
33725- {
33726- unsigned int i = oldvol;
33727+static void issue_volchange(const unsigned int oldvol,
33728+ const unsigned int newvol,
33729+ const u32 event_mask)
33730+{
33731+ unsigned int i = oldvol;
33732
33733- while (i > newvol) {
33734- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
33735- i--;
33736- }
33737- while (i < newvol) {
33738- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
33739- i++;
33740- }
33741+ while (i > newvol) {
33742+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
33743+ i--;
33744 }
33745+ while (i < newvol) {
33746+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
33747+ i++;
33748+ }
33749+}
33750
33751- void issue_brightnesschange(const unsigned int oldbrt,
33752- const unsigned int newbrt)
33753- {
33754- unsigned int i = oldbrt;
33755+static void issue_brightnesschange(const unsigned int oldbrt,
33756+ const unsigned int newbrt,
33757+ const u32 event_mask)
33758+{
33759+ unsigned int i = oldbrt;
33760
33761- while (i > newbrt) {
33762- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
33763- i--;
33764- }
33765- while (i < newbrt) {
33766- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
33767- i++;
33768- }
33769+ while (i > newbrt) {
33770+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
33771+ i--;
33772+ }
33773+ while (i < newbrt) {
33774+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
33775+ i++;
33776 }
33777+}
33778
33779+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33780+ struct tp_nvram_state *newn,
33781+ const u32 event_mask)
33782+{
33783 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
33784 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
33785 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
33786@@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33787 oldn->volume_level != newn->volume_level) {
33788 /* recently muted, or repeated mute keypress, or
33789 * multiple presses ending in mute */
33790- issue_volchange(oldn->volume_level, newn->volume_level);
33791+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
33792 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
33793 }
33794 } else {
33795@@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33796 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
33797 }
33798 if (oldn->volume_level != newn->volume_level) {
33799- issue_volchange(oldn->volume_level, newn->volume_level);
33800+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
33801 } else if (oldn->volume_toggle != newn->volume_toggle) {
33802 /* repeated vol up/down keypress at end of scale ? */
33803 if (newn->volume_level == 0)
33804@@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33805 /* handle brightness */
33806 if (oldn->brightness_level != newn->brightness_level) {
33807 issue_brightnesschange(oldn->brightness_level,
33808- newn->brightness_level);
33809+ newn->brightness_level,
33810+ event_mask);
33811 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
33812 /* repeated key presses that didn't change state */
33813 if (newn->brightness_level == 0)
33814@@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
33815 && !tp_features.bright_unkfw)
33816 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
33817 }
33818+}
33819
33820 #undef TPACPI_COMPARE_KEY
33821 #undef TPACPI_MAY_SEND_KEY
33822-}
33823
33824 /*
33825 * Polling driver
33826diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
33827index b859d16..5cc6b1a 100644
33828--- a/drivers/pnp/pnpbios/bioscalls.c
33829+++ b/drivers/pnp/pnpbios/bioscalls.c
33830@@ -59,7 +59,7 @@ do { \
33831 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
33832 } while(0)
33833
33834-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
33835+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
33836 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
33837
33838 /*
33839@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
33840
33841 cpu = get_cpu();
33842 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
33843+
33844+ pax_open_kernel();
33845 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
33846+ pax_close_kernel();
33847
33848 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
33849 spin_lock_irqsave(&pnp_bios_lock, flags);
33850@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
33851 :"memory");
33852 spin_unlock_irqrestore(&pnp_bios_lock, flags);
33853
33854+ pax_open_kernel();
33855 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
33856+ pax_close_kernel();
33857+
33858 put_cpu();
33859
33860 /* If we get here and this is set then the PnP BIOS faulted on us. */
33861@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
33862 return status;
33863 }
33864
33865-void pnpbios_calls_init(union pnp_bios_install_struct *header)
33866+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
33867 {
33868 int i;
33869
33870@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
33871 pnp_bios_callpoint.offset = header->fields.pm16offset;
33872 pnp_bios_callpoint.segment = PNP_CS16;
33873
33874+ pax_open_kernel();
33875+
33876 for_each_possible_cpu(i) {
33877 struct desc_struct *gdt = get_cpu_gdt_table(i);
33878 if (!gdt)
33879@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
33880 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
33881 (unsigned long)__va(header->fields.pm16dseg));
33882 }
33883+
33884+ pax_close_kernel();
33885 }
33886diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
33887index b0ecacb..7c9da2e 100644
33888--- a/drivers/pnp/resource.c
33889+++ b/drivers/pnp/resource.c
33890@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
33891 return 1;
33892
33893 /* check if the resource is valid */
33894- if (*irq < 0 || *irq > 15)
33895+ if (*irq > 15)
33896 return 0;
33897
33898 /* check if the resource is reserved */
33899@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
33900 return 1;
33901
33902 /* check if the resource is valid */
33903- if (*dma < 0 || *dma == 4 || *dma > 7)
33904+ if (*dma == 4 || *dma > 7)
33905 return 0;
33906
33907 /* check if the resource is reserved */
33908diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
33909index bb16f5b..c751eef 100644
33910--- a/drivers/power/bq27x00_battery.c
33911+++ b/drivers/power/bq27x00_battery.c
33912@@ -67,7 +67,7 @@
33913 struct bq27x00_device_info;
33914 struct bq27x00_access_methods {
33915 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
33916-};
33917+} __no_const;
33918
33919 enum bq27x00_chip { BQ27000, BQ27500 };
33920
33921diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
33922index 33f5d9a..d957d3f 100644
33923--- a/drivers/regulator/max8660.c
33924+++ b/drivers/regulator/max8660.c
33925@@ -383,8 +383,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
33926 max8660->shadow_regs[MAX8660_OVER1] = 5;
33927 } else {
33928 /* Otherwise devices can be toggled via software */
33929- max8660_dcdc_ops.enable = max8660_dcdc_enable;
33930- max8660_dcdc_ops.disable = max8660_dcdc_disable;
33931+ pax_open_kernel();
33932+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
33933+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
33934+ pax_close_kernel();
33935 }
33936
33937 /*
33938diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
33939index 023d17d..74ef35b 100644
33940--- a/drivers/regulator/mc13892-regulator.c
33941+++ b/drivers/regulator/mc13892-regulator.c
33942@@ -565,10 +565,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
33943 }
33944 mc13xxx_unlock(mc13892);
33945
33946- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
33947+ pax_open_kernel();
33948+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
33949 = mc13892_vcam_set_mode;
33950- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
33951+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
33952 = mc13892_vcam_get_mode;
33953+ pax_close_kernel();
33954 for (i = 0; i < pdata->num_regulators; i++) {
33955 init_data = &pdata->regulators[i];
33956 priv->regulators[i] = regulator_register(
33957diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
33958index cace6d3..f623fda 100644
33959--- a/drivers/rtc/rtc-dev.c
33960+++ b/drivers/rtc/rtc-dev.c
33961@@ -14,6 +14,7 @@
33962 #include <linux/module.h>
33963 #include <linux/rtc.h>
33964 #include <linux/sched.h>
33965+#include <linux/grsecurity.h>
33966 #include "rtc-core.h"
33967
33968 static dev_t rtc_devt;
33969@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
33970 if (copy_from_user(&tm, uarg, sizeof(tm)))
33971 return -EFAULT;
33972
33973+ gr_log_timechange();
33974+
33975 return rtc_set_time(rtc, &tm);
33976
33977 case RTC_PIE_ON:
33978diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
33979index ffb5878..e6d785c 100644
33980--- a/drivers/scsi/aacraid/aacraid.h
33981+++ b/drivers/scsi/aacraid/aacraid.h
33982@@ -492,7 +492,7 @@ struct adapter_ops
33983 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
33984 /* Administrative operations */
33985 int (*adapter_comm)(struct aac_dev * dev, int comm);
33986-};
33987+} __no_const;
33988
33989 /*
33990 * Define which interrupt handler needs to be installed
33991diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
33992index 705e13e..91c873c 100644
33993--- a/drivers/scsi/aacraid/linit.c
33994+++ b/drivers/scsi/aacraid/linit.c
33995@@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
33996 #elif defined(__devinitconst)
33997 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
33998 #else
33999-static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
34000+static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
34001 #endif
34002 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
34003 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
34004diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
34005index d5ff142..49c0ebb 100644
34006--- a/drivers/scsi/aic94xx/aic94xx_init.c
34007+++ b/drivers/scsi/aic94xx/aic94xx_init.c
34008@@ -1012,7 +1012,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
34009 .lldd_control_phy = asd_control_phy,
34010 };
34011
34012-static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
34013+static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
34014 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
34015 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
34016 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
34017diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
34018index a796de9..1ef20e1 100644
34019--- a/drivers/scsi/bfa/bfa.h
34020+++ b/drivers/scsi/bfa/bfa.h
34021@@ -196,7 +196,7 @@ struct bfa_hwif_s {
34022 u32 *end);
34023 int cpe_vec_q0;
34024 int rme_vec_q0;
34025-};
34026+} __no_const;
34027 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
34028
34029 struct bfa_faa_cbfn_s {
34030diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
34031index e07bd47..cd1bbbb 100644
34032--- a/drivers/scsi/bfa/bfa_fcpim.c
34033+++ b/drivers/scsi/bfa/bfa_fcpim.c
34034@@ -4121,7 +4121,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
34035
34036 bfa_iotag_attach(fcp);
34037
34038- fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
34039+ fcp->itn_arr = (bfa_itn_s_no_const *) bfa_mem_kva_curp(fcp);
34040 bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
34041 (fcp->num_itns * sizeof(struct bfa_itn_s));
34042 memset(fcp->itn_arr, 0,
34043@@ -4179,7 +4179,7 @@ bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
34044 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
34045 {
34046 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
34047- struct bfa_itn_s *itn;
34048+ bfa_itn_s_no_const *itn;
34049
34050 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
34051 itn->isr = isr;
34052diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
34053index 1080bcb..a3b39e3 100644
34054--- a/drivers/scsi/bfa/bfa_fcpim.h
34055+++ b/drivers/scsi/bfa/bfa_fcpim.h
34056@@ -37,6 +37,7 @@ struct bfa_iotag_s {
34057 struct bfa_itn_s {
34058 bfa_isr_func_t isr;
34059 };
34060+typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
34061
34062 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
34063 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
34064@@ -149,7 +150,7 @@ struct bfa_fcp_mod_s {
34065 struct list_head iotag_tio_free_q; /* free IO resources */
34066 struct list_head iotag_unused_q; /* unused IO resources*/
34067 struct bfa_iotag_s *iotag_arr;
34068- struct bfa_itn_s *itn_arr;
34069+ bfa_itn_s_no_const *itn_arr;
34070 int num_ioim_reqs;
34071 int num_fwtio_reqs;
34072 int num_itns;
34073diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
34074index 546d46b..642fa5b 100644
34075--- a/drivers/scsi/bfa/bfa_ioc.h
34076+++ b/drivers/scsi/bfa/bfa_ioc.h
34077@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
34078 bfa_ioc_disable_cbfn_t disable_cbfn;
34079 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
34080 bfa_ioc_reset_cbfn_t reset_cbfn;
34081-};
34082+} __no_const;
34083
34084 /*
34085 * IOC event notification mechanism.
34086@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
34087 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
34088 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
34089 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
34090-};
34091+} __no_const;
34092
34093 /*
34094 * Queue element to wait for room in request queue. FIFO order is
34095diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
34096index 351dc0b..951dc32 100644
34097--- a/drivers/scsi/hosts.c
34098+++ b/drivers/scsi/hosts.c
34099@@ -42,7 +42,7 @@
34100 #include "scsi_logging.h"
34101
34102
34103-static atomic_t scsi_host_next_hn; /* host_no for next new host */
34104+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
34105
34106
34107 static void scsi_host_cls_release(struct device *dev)
34108@@ -357,7 +357,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
34109 * subtract one because we increment first then return, but we need to
34110 * know what the next host number was before increment
34111 */
34112- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
34113+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
34114 shost->dma_channel = 0xff;
34115
34116 /* These three are default values which can be overridden */
34117diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
34118index 865d452..e9b7fa7 100644
34119--- a/drivers/scsi/hpsa.c
34120+++ b/drivers/scsi/hpsa.c
34121@@ -505,7 +505,7 @@ static inline u32 next_command(struct ctlr_info *h)
34122 u32 a;
34123
34124 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
34125- return h->access.command_completed(h);
34126+ return h->access->command_completed(h);
34127
34128 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
34129 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
34130@@ -2989,7 +2989,7 @@ static void start_io(struct ctlr_info *h)
34131 while (!list_empty(&h->reqQ)) {
34132 c = list_entry(h->reqQ.next, struct CommandList, list);
34133 /* can't do anything if fifo is full */
34134- if ((h->access.fifo_full(h))) {
34135+ if ((h->access->fifo_full(h))) {
34136 dev_warn(&h->pdev->dev, "fifo full\n");
34137 break;
34138 }
34139@@ -2999,7 +2999,7 @@ static void start_io(struct ctlr_info *h)
34140 h->Qdepth--;
34141
34142 /* Tell the controller execute command */
34143- h->access.submit_command(h, c);
34144+ h->access->submit_command(h, c);
34145
34146 /* Put job onto the completed Q */
34147 addQ(&h->cmpQ, c);
34148@@ -3008,17 +3008,17 @@ static void start_io(struct ctlr_info *h)
34149
34150 static inline unsigned long get_next_completion(struct ctlr_info *h)
34151 {
34152- return h->access.command_completed(h);
34153+ return h->access->command_completed(h);
34154 }
34155
34156 static inline bool interrupt_pending(struct ctlr_info *h)
34157 {
34158- return h->access.intr_pending(h);
34159+ return h->access->intr_pending(h);
34160 }
34161
34162 static inline long interrupt_not_for_us(struct ctlr_info *h)
34163 {
34164- return (h->access.intr_pending(h) == 0) ||
34165+ return (h->access->intr_pending(h) == 0) ||
34166 (h->interrupts_enabled == 0);
34167 }
34168
34169@@ -3917,7 +3917,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
34170 if (prod_index < 0)
34171 return -ENODEV;
34172 h->product_name = products[prod_index].product_name;
34173- h->access = *(products[prod_index].access);
34174+ h->access = products[prod_index].access;
34175
34176 if (hpsa_board_disabled(h->pdev)) {
34177 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
34178@@ -4162,7 +4162,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
34179
34180 assert_spin_locked(&lockup_detector_lock);
34181 remove_ctlr_from_lockup_detector_list(h);
34182- h->access.set_intr_mask(h, HPSA_INTR_OFF);
34183+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
34184 spin_lock_irqsave(&h->lock, flags);
34185 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
34186 spin_unlock_irqrestore(&h->lock, flags);
34187@@ -4340,7 +4340,7 @@ reinit_after_soft_reset:
34188 }
34189
34190 /* make sure the board interrupts are off */
34191- h->access.set_intr_mask(h, HPSA_INTR_OFF);
34192+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
34193
34194 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
34195 goto clean2;
34196@@ -4374,7 +4374,7 @@ reinit_after_soft_reset:
34197 * fake ones to scoop up any residual completions.
34198 */
34199 spin_lock_irqsave(&h->lock, flags);
34200- h->access.set_intr_mask(h, HPSA_INTR_OFF);
34201+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
34202 spin_unlock_irqrestore(&h->lock, flags);
34203 free_irq(h->intr[h->intr_mode], h);
34204 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
34205@@ -4393,9 +4393,9 @@ reinit_after_soft_reset:
34206 dev_info(&h->pdev->dev, "Board READY.\n");
34207 dev_info(&h->pdev->dev,
34208 "Waiting for stale completions to drain.\n");
34209- h->access.set_intr_mask(h, HPSA_INTR_ON);
34210+ h->access->set_intr_mask(h, HPSA_INTR_ON);
34211 msleep(10000);
34212- h->access.set_intr_mask(h, HPSA_INTR_OFF);
34213+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
34214
34215 rc = controller_reset_failed(h->cfgtable);
34216 if (rc)
34217@@ -4416,7 +4416,7 @@ reinit_after_soft_reset:
34218 }
34219
34220 /* Turn the interrupts on so we can service requests */
34221- h->access.set_intr_mask(h, HPSA_INTR_ON);
34222+ h->access->set_intr_mask(h, HPSA_INTR_ON);
34223
34224 hpsa_hba_inquiry(h);
34225 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
34226@@ -4468,7 +4468,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
34227 * To write all data in the battery backed cache to disks
34228 */
34229 hpsa_flush_cache(h);
34230- h->access.set_intr_mask(h, HPSA_INTR_OFF);
34231+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
34232 free_irq(h->intr[h->intr_mode], h);
34233 #ifdef CONFIG_PCI_MSI
34234 if (h->msix_vector)
34235@@ -4632,7 +4632,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
34236 return;
34237 }
34238 /* Change the access methods to the performant access methods */
34239- h->access = SA5_performant_access;
34240+ h->access = &SA5_performant_access;
34241 h->transMethod = CFGTBL_Trans_Performant;
34242 }
34243
34244diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
34245index 91edafb..a9b88ec 100644
34246--- a/drivers/scsi/hpsa.h
34247+++ b/drivers/scsi/hpsa.h
34248@@ -73,7 +73,7 @@ struct ctlr_info {
34249 unsigned int msix_vector;
34250 unsigned int msi_vector;
34251 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
34252- struct access_method access;
34253+ struct access_method *access;
34254
34255 /* queue and queue Info */
34256 struct list_head reqQ;
34257diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
34258index f2df059..a3a9930 100644
34259--- a/drivers/scsi/ips.h
34260+++ b/drivers/scsi/ips.h
34261@@ -1027,7 +1027,7 @@ typedef struct {
34262 int (*intr)(struct ips_ha *);
34263 void (*enableint)(struct ips_ha *);
34264 uint32_t (*statupd)(struct ips_ha *);
34265-} ips_hw_func_t;
34266+} __no_const ips_hw_func_t;
34267
34268 typedef struct ips_ha {
34269 uint8_t ha_id[IPS_MAX_CHANNELS+1];
34270diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
34271index 9de9db2..1e09660 100644
34272--- a/drivers/scsi/libfc/fc_exch.c
34273+++ b/drivers/scsi/libfc/fc_exch.c
34274@@ -105,12 +105,12 @@ struct fc_exch_mgr {
34275 * all together if not used XXX
34276 */
34277 struct {
34278- atomic_t no_free_exch;
34279- atomic_t no_free_exch_xid;
34280- atomic_t xid_not_found;
34281- atomic_t xid_busy;
34282- atomic_t seq_not_found;
34283- atomic_t non_bls_resp;
34284+ atomic_unchecked_t no_free_exch;
34285+ atomic_unchecked_t no_free_exch_xid;
34286+ atomic_unchecked_t xid_not_found;
34287+ atomic_unchecked_t xid_busy;
34288+ atomic_unchecked_t seq_not_found;
34289+ atomic_unchecked_t non_bls_resp;
34290 } stats;
34291 };
34292
34293@@ -719,7 +719,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
34294 /* allocate memory for exchange */
34295 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
34296 if (!ep) {
34297- atomic_inc(&mp->stats.no_free_exch);
34298+ atomic_inc_unchecked(&mp->stats.no_free_exch);
34299 goto out;
34300 }
34301 memset(ep, 0, sizeof(*ep));
34302@@ -780,7 +780,7 @@ out:
34303 return ep;
34304 err:
34305 spin_unlock_bh(&pool->lock);
34306- atomic_inc(&mp->stats.no_free_exch_xid);
34307+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
34308 mempool_free(ep, mp->ep_pool);
34309 return NULL;
34310 }
34311@@ -923,7 +923,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34312 xid = ntohs(fh->fh_ox_id); /* we originated exch */
34313 ep = fc_exch_find(mp, xid);
34314 if (!ep) {
34315- atomic_inc(&mp->stats.xid_not_found);
34316+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34317 reject = FC_RJT_OX_ID;
34318 goto out;
34319 }
34320@@ -953,7 +953,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34321 ep = fc_exch_find(mp, xid);
34322 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
34323 if (ep) {
34324- atomic_inc(&mp->stats.xid_busy);
34325+ atomic_inc_unchecked(&mp->stats.xid_busy);
34326 reject = FC_RJT_RX_ID;
34327 goto rel;
34328 }
34329@@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34330 }
34331 xid = ep->xid; /* get our XID */
34332 } else if (!ep) {
34333- atomic_inc(&mp->stats.xid_not_found);
34334+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34335 reject = FC_RJT_RX_ID; /* XID not found */
34336 goto out;
34337 }
34338@@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
34339 } else {
34340 sp = &ep->seq;
34341 if (sp->id != fh->fh_seq_id) {
34342- atomic_inc(&mp->stats.seq_not_found);
34343+ atomic_inc_unchecked(&mp->stats.seq_not_found);
34344 if (f_ctl & FC_FC_END_SEQ) {
34345 /*
34346 * Update sequence_id based on incoming last
34347@@ -1431,22 +1431,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
34348
34349 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
34350 if (!ep) {
34351- atomic_inc(&mp->stats.xid_not_found);
34352+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34353 goto out;
34354 }
34355 if (ep->esb_stat & ESB_ST_COMPLETE) {
34356- atomic_inc(&mp->stats.xid_not_found);
34357+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34358 goto rel;
34359 }
34360 if (ep->rxid == FC_XID_UNKNOWN)
34361 ep->rxid = ntohs(fh->fh_rx_id);
34362 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
34363- atomic_inc(&mp->stats.xid_not_found);
34364+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34365 goto rel;
34366 }
34367 if (ep->did != ntoh24(fh->fh_s_id) &&
34368 ep->did != FC_FID_FLOGI) {
34369- atomic_inc(&mp->stats.xid_not_found);
34370+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34371 goto rel;
34372 }
34373 sof = fr_sof(fp);
34374@@ -1455,7 +1455,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
34375 sp->ssb_stat |= SSB_ST_RESP;
34376 sp->id = fh->fh_seq_id;
34377 } else if (sp->id != fh->fh_seq_id) {
34378- atomic_inc(&mp->stats.seq_not_found);
34379+ atomic_inc_unchecked(&mp->stats.seq_not_found);
34380 goto rel;
34381 }
34382
34383@@ -1519,9 +1519,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
34384 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
34385
34386 if (!sp)
34387- atomic_inc(&mp->stats.xid_not_found);
34388+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34389 else
34390- atomic_inc(&mp->stats.non_bls_resp);
34391+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
34392
34393 fc_frame_free(fp);
34394 }
34395diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
34396index db9238f..4378ed2 100644
34397--- a/drivers/scsi/libsas/sas_ata.c
34398+++ b/drivers/scsi/libsas/sas_ata.c
34399@@ -368,7 +368,7 @@ static struct ata_port_operations sas_sata_ops = {
34400 .postreset = ata_std_postreset,
34401 .error_handler = ata_std_error_handler,
34402 .post_internal_cmd = sas_ata_post_internal,
34403- .qc_defer = ata_std_qc_defer,
34404+ .qc_defer = ata_std_qc_defer,
34405 .qc_prep = ata_noop_qc_prep,
34406 .qc_issue = sas_ata_qc_issue,
34407 .qc_fill_rtf = sas_ata_qc_fill_rtf,
34408diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
34409index bb4c8e0..f33d849 100644
34410--- a/drivers/scsi/lpfc/lpfc.h
34411+++ b/drivers/scsi/lpfc/lpfc.h
34412@@ -425,7 +425,7 @@ struct lpfc_vport {
34413 struct dentry *debug_nodelist;
34414 struct dentry *vport_debugfs_root;
34415 struct lpfc_debugfs_trc *disc_trc;
34416- atomic_t disc_trc_cnt;
34417+ atomic_unchecked_t disc_trc_cnt;
34418 #endif
34419 uint8_t stat_data_enabled;
34420 uint8_t stat_data_blocked;
34421@@ -835,8 +835,8 @@ struct lpfc_hba {
34422 struct timer_list fabric_block_timer;
34423 unsigned long bit_flags;
34424 #define FABRIC_COMANDS_BLOCKED 0
34425- atomic_t num_rsrc_err;
34426- atomic_t num_cmd_success;
34427+ atomic_unchecked_t num_rsrc_err;
34428+ atomic_unchecked_t num_cmd_success;
34429 unsigned long last_rsrc_error_time;
34430 unsigned long last_ramp_down_time;
34431 unsigned long last_ramp_up_time;
34432@@ -866,7 +866,7 @@ struct lpfc_hba {
34433
34434 struct dentry *debug_slow_ring_trc;
34435 struct lpfc_debugfs_trc *slow_ring_trc;
34436- atomic_t slow_ring_trc_cnt;
34437+ atomic_unchecked_t slow_ring_trc_cnt;
34438 /* iDiag debugfs sub-directory */
34439 struct dentry *idiag_root;
34440 struct dentry *idiag_pci_cfg;
34441diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
34442index 2838259..a07cfb5 100644
34443--- a/drivers/scsi/lpfc/lpfc_debugfs.c
34444+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
34445@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
34446
34447 #include <linux/debugfs.h>
34448
34449-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
34450+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
34451 static unsigned long lpfc_debugfs_start_time = 0L;
34452
34453 /* iDiag */
34454@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
34455 lpfc_debugfs_enable = 0;
34456
34457 len = 0;
34458- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
34459+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
34460 (lpfc_debugfs_max_disc_trc - 1);
34461 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
34462 dtp = vport->disc_trc + i;
34463@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
34464 lpfc_debugfs_enable = 0;
34465
34466 len = 0;
34467- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
34468+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
34469 (lpfc_debugfs_max_slow_ring_trc - 1);
34470 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
34471 dtp = phba->slow_ring_trc + i;
34472@@ -636,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
34473 !vport || !vport->disc_trc)
34474 return;
34475
34476- index = atomic_inc_return(&vport->disc_trc_cnt) &
34477+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
34478 (lpfc_debugfs_max_disc_trc - 1);
34479 dtp = vport->disc_trc + index;
34480 dtp->fmt = fmt;
34481 dtp->data1 = data1;
34482 dtp->data2 = data2;
34483 dtp->data3 = data3;
34484- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
34485+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
34486 dtp->jif = jiffies;
34487 #endif
34488 return;
34489@@ -674,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
34490 !phba || !phba->slow_ring_trc)
34491 return;
34492
34493- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
34494+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
34495 (lpfc_debugfs_max_slow_ring_trc - 1);
34496 dtp = phba->slow_ring_trc + index;
34497 dtp->fmt = fmt;
34498 dtp->data1 = data1;
34499 dtp->data2 = data2;
34500 dtp->data3 = data3;
34501- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
34502+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
34503 dtp->jif = jiffies;
34504 #endif
34505 return;
34506@@ -3986,7 +3986,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
34507 "slow_ring buffer\n");
34508 goto debug_failed;
34509 }
34510- atomic_set(&phba->slow_ring_trc_cnt, 0);
34511+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
34512 memset(phba->slow_ring_trc, 0,
34513 (sizeof(struct lpfc_debugfs_trc) *
34514 lpfc_debugfs_max_slow_ring_trc));
34515@@ -4032,7 +4032,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
34516 "buffer\n");
34517 goto debug_failed;
34518 }
34519- atomic_set(&vport->disc_trc_cnt, 0);
34520+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
34521
34522 snprintf(name, sizeof(name), "discovery_trace");
34523 vport->debug_disc_trc =
34524diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
34525index 55bc4fc..a2a109c 100644
34526--- a/drivers/scsi/lpfc/lpfc_init.c
34527+++ b/drivers/scsi/lpfc/lpfc_init.c
34528@@ -10027,8 +10027,10 @@ lpfc_init(void)
34529 printk(LPFC_COPYRIGHT "\n");
34530
34531 if (lpfc_enable_npiv) {
34532- lpfc_transport_functions.vport_create = lpfc_vport_create;
34533- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
34534+ pax_open_kernel();
34535+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
34536+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
34537+ pax_close_kernel();
34538 }
34539 lpfc_transport_template =
34540 fc_attach_transport(&lpfc_transport_functions);
34541diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
34542index 2e1e54e..1af0a0d 100644
34543--- a/drivers/scsi/lpfc/lpfc_scsi.c
34544+++ b/drivers/scsi/lpfc/lpfc_scsi.c
34545@@ -305,7 +305,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
34546 uint32_t evt_posted;
34547
34548 spin_lock_irqsave(&phba->hbalock, flags);
34549- atomic_inc(&phba->num_rsrc_err);
34550+ atomic_inc_unchecked(&phba->num_rsrc_err);
34551 phba->last_rsrc_error_time = jiffies;
34552
34553 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
34554@@ -346,7 +346,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
34555 unsigned long flags;
34556 struct lpfc_hba *phba = vport->phba;
34557 uint32_t evt_posted;
34558- atomic_inc(&phba->num_cmd_success);
34559+ atomic_inc_unchecked(&phba->num_cmd_success);
34560
34561 if (vport->cfg_lun_queue_depth <= queue_depth)
34562 return;
34563@@ -390,8 +390,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
34564 unsigned long num_rsrc_err, num_cmd_success;
34565 int i;
34566
34567- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
34568- num_cmd_success = atomic_read(&phba->num_cmd_success);
34569+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
34570+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
34571
34572 vports = lpfc_create_vport_work_array(phba);
34573 if (vports != NULL)
34574@@ -411,8 +411,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
34575 }
34576 }
34577 lpfc_destroy_vport_work_array(phba, vports);
34578- atomic_set(&phba->num_rsrc_err, 0);
34579- atomic_set(&phba->num_cmd_success, 0);
34580+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
34581+ atomic_set_unchecked(&phba->num_cmd_success, 0);
34582 }
34583
34584 /**
34585@@ -446,8 +446,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
34586 }
34587 }
34588 lpfc_destroy_vport_work_array(phba, vports);
34589- atomic_set(&phba->num_rsrc_err, 0);
34590- atomic_set(&phba->num_cmd_success, 0);
34591+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
34592+ atomic_set_unchecked(&phba->num_cmd_success, 0);
34593 }
34594
34595 /**
34596diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
34597index 5163edb..7b142bc 100644
34598--- a/drivers/scsi/pmcraid.c
34599+++ b/drivers/scsi/pmcraid.c
34600@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
34601 res->scsi_dev = scsi_dev;
34602 scsi_dev->hostdata = res;
34603 res->change_detected = 0;
34604- atomic_set(&res->read_failures, 0);
34605- atomic_set(&res->write_failures, 0);
34606+ atomic_set_unchecked(&res->read_failures, 0);
34607+ atomic_set_unchecked(&res->write_failures, 0);
34608 rc = 0;
34609 }
34610 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
34611@@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
34612
34613 /* If this was a SCSI read/write command keep count of errors */
34614 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
34615- atomic_inc(&res->read_failures);
34616+ atomic_inc_unchecked(&res->read_failures);
34617 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
34618- atomic_inc(&res->write_failures);
34619+ atomic_inc_unchecked(&res->write_failures);
34620
34621 if (!RES_IS_GSCSI(res->cfg_entry) &&
34622 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
34623@@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
34624 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
34625 * hrrq_id assigned here in queuecommand
34626 */
34627- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
34628+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
34629 pinstance->num_hrrq;
34630 cmd->cmd_done = pmcraid_io_done;
34631
34632@@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
34633 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
34634 * hrrq_id assigned here in queuecommand
34635 */
34636- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
34637+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
34638 pinstance->num_hrrq;
34639
34640 if (request_size) {
34641@@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
34642
34643 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
34644 /* add resources only after host is added into system */
34645- if (!atomic_read(&pinstance->expose_resources))
34646+ if (!atomic_read_unchecked(&pinstance->expose_resources))
34647 return;
34648
34649 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
34650@@ -5331,8 +5331,8 @@ static int __devinit pmcraid_init_instance(
34651 init_waitqueue_head(&pinstance->reset_wait_q);
34652
34653 atomic_set(&pinstance->outstanding_cmds, 0);
34654- atomic_set(&pinstance->last_message_id, 0);
34655- atomic_set(&pinstance->expose_resources, 0);
34656+ atomic_set_unchecked(&pinstance->last_message_id, 0);
34657+ atomic_set_unchecked(&pinstance->expose_resources, 0);
34658
34659 INIT_LIST_HEAD(&pinstance->free_res_q);
34660 INIT_LIST_HEAD(&pinstance->used_res_q);
34661@@ -6047,7 +6047,7 @@ static int __devinit pmcraid_probe(
34662 /* Schedule worker thread to handle CCN and take care of adding and
34663 * removing devices to OS
34664 */
34665- atomic_set(&pinstance->expose_resources, 1);
34666+ atomic_set_unchecked(&pinstance->expose_resources, 1);
34667 schedule_work(&pinstance->worker_q);
34668 return rc;
34669
34670diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
34671index ca496c7..9c791d5 100644
34672--- a/drivers/scsi/pmcraid.h
34673+++ b/drivers/scsi/pmcraid.h
34674@@ -748,7 +748,7 @@ struct pmcraid_instance {
34675 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
34676
34677 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
34678- atomic_t last_message_id;
34679+ atomic_unchecked_t last_message_id;
34680
34681 /* configuration table */
34682 struct pmcraid_config_table *cfg_table;
34683@@ -777,7 +777,7 @@ struct pmcraid_instance {
34684 atomic_t outstanding_cmds;
34685
34686 /* should add/delete resources to mid-layer now ?*/
34687- atomic_t expose_resources;
34688+ atomic_unchecked_t expose_resources;
34689
34690
34691
34692@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
34693 struct pmcraid_config_table_entry_ext cfg_entry_ext;
34694 };
34695 struct scsi_device *scsi_dev; /* Link scsi_device structure */
34696- atomic_t read_failures; /* count of failed READ commands */
34697- atomic_t write_failures; /* count of failed WRITE commands */
34698+ atomic_unchecked_t read_failures; /* count of failed READ commands */
34699+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
34700
34701 /* To indicate add/delete/modify during CCN */
34702 u8 change_detected;
34703diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
34704index fcf052c..a8025a4 100644
34705--- a/drivers/scsi/qla2xxx/qla_def.h
34706+++ b/drivers/scsi/qla2xxx/qla_def.h
34707@@ -2244,7 +2244,7 @@ struct isp_operations {
34708 int (*get_flash_version) (struct scsi_qla_host *, void *);
34709 int (*start_scsi) (srb_t *);
34710 int (*abort_isp) (struct scsi_qla_host *);
34711-};
34712+} __no_const;
34713
34714 /* MSI-X Support *************************************************************/
34715
34716diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
34717index fd5edc6..4906148 100644
34718--- a/drivers/scsi/qla4xxx/ql4_def.h
34719+++ b/drivers/scsi/qla4xxx/ql4_def.h
34720@@ -258,7 +258,7 @@ struct ddb_entry {
34721 * (4000 only) */
34722 atomic_t relogin_timer; /* Max Time to wait for
34723 * relogin to complete */
34724- atomic_t relogin_retry_count; /* Num of times relogin has been
34725+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
34726 * retried */
34727 uint32_t default_time2wait; /* Default Min time between
34728 * relogins (+aens) */
34729diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
34730index 4169c8b..a8b896b 100644
34731--- a/drivers/scsi/qla4xxx/ql4_os.c
34732+++ b/drivers/scsi/qla4xxx/ql4_os.c
34733@@ -2104,12 +2104,12 @@ void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
34734 */
34735 if (!iscsi_is_session_online(cls_sess)) {
34736 /* Reset retry relogin timer */
34737- atomic_inc(&ddb_entry->relogin_retry_count);
34738+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
34739 DEBUG2(ql4_printk(KERN_INFO, ha,
34740 "%s: index[%d] relogin timed out-retrying"
34741 " relogin (%d), retry (%d)\n", __func__,
34742 ddb_entry->fw_ddb_index,
34743- atomic_read(&ddb_entry->relogin_retry_count),
34744+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
34745 ddb_entry->default_time2wait + 4));
34746 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
34747 atomic_set(&ddb_entry->retry_relogin_timer,
34748@@ -3835,7 +3835,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
34749
34750 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
34751 atomic_set(&ddb_entry->relogin_timer, 0);
34752- atomic_set(&ddb_entry->relogin_retry_count, 0);
34753+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
34754
34755 ddb_entry->default_relogin_timeout =
34756 le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
34757diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
34758index 2aeb2e9..46e3925 100644
34759--- a/drivers/scsi/scsi.c
34760+++ b/drivers/scsi/scsi.c
34761@@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
34762 unsigned long timeout;
34763 int rtn = 0;
34764
34765- atomic_inc(&cmd->device->iorequest_cnt);
34766+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34767
34768 /* check if the device is still usable */
34769 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
34770diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
34771index f85cfa6..a57c9e8 100644
34772--- a/drivers/scsi/scsi_lib.c
34773+++ b/drivers/scsi/scsi_lib.c
34774@@ -1416,7 +1416,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
34775 shost = sdev->host;
34776 scsi_init_cmd_errh(cmd);
34777 cmd->result = DID_NO_CONNECT << 16;
34778- atomic_inc(&cmd->device->iorequest_cnt);
34779+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34780
34781 /*
34782 * SCSI request completion path will do scsi_device_unbusy(),
34783@@ -1442,9 +1442,9 @@ static void scsi_softirq_done(struct request *rq)
34784
34785 INIT_LIST_HEAD(&cmd->eh_entry);
34786
34787- atomic_inc(&cmd->device->iodone_cnt);
34788+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
34789 if (cmd->result)
34790- atomic_inc(&cmd->device->ioerr_cnt);
34791+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
34792
34793 disposition = scsi_decide_disposition(cmd);
34794 if (disposition != SUCCESS &&
34795diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
34796index 04c2a27..9d8bd66 100644
34797--- a/drivers/scsi/scsi_sysfs.c
34798+++ b/drivers/scsi/scsi_sysfs.c
34799@@ -660,7 +660,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
34800 char *buf) \
34801 { \
34802 struct scsi_device *sdev = to_scsi_device(dev); \
34803- unsigned long long count = atomic_read(&sdev->field); \
34804+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
34805 return snprintf(buf, 20, "0x%llx\n", count); \
34806 } \
34807 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
34808diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
34809index 84a1fdf..693b0d6 100644
34810--- a/drivers/scsi/scsi_tgt_lib.c
34811+++ b/drivers/scsi/scsi_tgt_lib.c
34812@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
34813 int err;
34814
34815 dprintk("%lx %u\n", uaddr, len);
34816- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
34817+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
34818 if (err) {
34819 /*
34820 * TODO: need to fixup sg_tablesize, max_segment_size,
34821diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
34822index 1b21491..1b7f60e 100644
34823--- a/drivers/scsi/scsi_transport_fc.c
34824+++ b/drivers/scsi/scsi_transport_fc.c
34825@@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
34826 * Netlink Infrastructure
34827 */
34828
34829-static atomic_t fc_event_seq;
34830+static atomic_unchecked_t fc_event_seq;
34831
34832 /**
34833 * fc_get_event_number - Obtain the next sequential FC event number
34834@@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
34835 u32
34836 fc_get_event_number(void)
34837 {
34838- return atomic_add_return(1, &fc_event_seq);
34839+ return atomic_add_return_unchecked(1, &fc_event_seq);
34840 }
34841 EXPORT_SYMBOL(fc_get_event_number);
34842
34843@@ -645,7 +645,7 @@ static __init int fc_transport_init(void)
34844 {
34845 int error;
34846
34847- atomic_set(&fc_event_seq, 0);
34848+ atomic_set_unchecked(&fc_event_seq, 0);
34849
34850 error = transport_class_register(&fc_host_class);
34851 if (error)
34852@@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
34853 char *cp;
34854
34855 *val = simple_strtoul(buf, &cp, 0);
34856- if ((*cp && (*cp != '\n')) || (*val < 0))
34857+ if (*cp && (*cp != '\n'))
34858 return -EINVAL;
34859 /*
34860 * Check for overflow; dev_loss_tmo is u32
34861diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
34862index 96029e6..4d77fa0 100644
34863--- a/drivers/scsi/scsi_transport_iscsi.c
34864+++ b/drivers/scsi/scsi_transport_iscsi.c
34865@@ -79,7 +79,7 @@ struct iscsi_internal {
34866 struct transport_container session_cont;
34867 };
34868
34869-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
34870+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
34871 static struct workqueue_struct *iscsi_eh_timer_workq;
34872
34873 static DEFINE_IDA(iscsi_sess_ida);
34874@@ -1062,7 +1062,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
34875 int err;
34876
34877 ihost = shost->shost_data;
34878- session->sid = atomic_add_return(1, &iscsi_session_nr);
34879+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
34880
34881 if (target_id == ISCSI_MAX_TARGET) {
34882 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
34883@@ -2663,7 +2663,7 @@ static __init int iscsi_transport_init(void)
34884 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
34885 ISCSI_TRANSPORT_VERSION);
34886
34887- atomic_set(&iscsi_session_nr, 0);
34888+ atomic_set_unchecked(&iscsi_session_nr, 0);
34889
34890 err = class_register(&iscsi_transport_class);
34891 if (err)
34892diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
34893index 21a045e..ec89e03 100644
34894--- a/drivers/scsi/scsi_transport_srp.c
34895+++ b/drivers/scsi/scsi_transport_srp.c
34896@@ -33,7 +33,7 @@
34897 #include "scsi_transport_srp_internal.h"
34898
34899 struct srp_host_attrs {
34900- atomic_t next_port_id;
34901+ atomic_unchecked_t next_port_id;
34902 };
34903 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
34904
34905@@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
34906 struct Scsi_Host *shost = dev_to_shost(dev);
34907 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
34908
34909- atomic_set(&srp_host->next_port_id, 0);
34910+ atomic_set_unchecked(&srp_host->next_port_id, 0);
34911 return 0;
34912 }
34913
34914@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
34915 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
34916 rport->roles = ids->roles;
34917
34918- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
34919+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
34920 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
34921
34922 transport_setup_device(&rport->dev);
34923diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
34924index 441a1c5..07cece7 100644
34925--- a/drivers/scsi/sg.c
34926+++ b/drivers/scsi/sg.c
34927@@ -1077,7 +1077,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
34928 sdp->disk->disk_name,
34929 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
34930 NULL,
34931- (char *)arg);
34932+ (char __user *)arg);
34933 case BLKTRACESTART:
34934 return blk_trace_startstop(sdp->device->request_queue, 1);
34935 case BLKTRACESTOP:
34936@@ -2312,7 +2312,7 @@ struct sg_proc_leaf {
34937 const struct file_operations * fops;
34938 };
34939
34940-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
34941+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
34942 {"allow_dio", &adio_fops},
34943 {"debug", &debug_fops},
34944 {"def_reserved_size", &dressz_fops},
34945@@ -2327,7 +2327,7 @@ sg_proc_init(void)
34946 {
34947 int k, mask;
34948 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
34949- struct sg_proc_leaf * leaf;
34950+ const struct sg_proc_leaf * leaf;
34951
34952 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
34953 if (!sg_proc_sgp)
34954diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
34955index f64250e..1ee3049 100644
34956--- a/drivers/spi/spi-dw-pci.c
34957+++ b/drivers/spi/spi-dw-pci.c
34958@@ -149,7 +149,7 @@ static int spi_resume(struct pci_dev *pdev)
34959 #define spi_resume NULL
34960 #endif
34961
34962-static const struct pci_device_id pci_ids[] __devinitdata = {
34963+static const struct pci_device_id pci_ids[] __devinitconst = {
34964 /* Intel MID platform SPI controller 0 */
34965 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
34966 {},
34967diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
34968index 77eae99..b7cdcc9 100644
34969--- a/drivers/spi/spi.c
34970+++ b/drivers/spi/spi.c
34971@@ -1024,7 +1024,7 @@ int spi_bus_unlock(struct spi_master *master)
34972 EXPORT_SYMBOL_GPL(spi_bus_unlock);
34973
34974 /* portable code must never pass more than 32 bytes */
34975-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
34976+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
34977
34978 static u8 *buf;
34979
34980diff --git a/drivers/staging/gma500/power.c b/drivers/staging/gma500/power.c
34981index 436fe97..4082570 100644
34982--- a/drivers/staging/gma500/power.c
34983+++ b/drivers/staging/gma500/power.c
34984@@ -266,7 +266,7 @@ bool gma_power_begin(struct drm_device *dev, bool force_on)
34985 ret = gma_resume_pci(dev->pdev);
34986 if (ret == 0) {
34987 /* FIXME: we want to defer this for Medfield/Oaktrail */
34988- gma_resume_display(dev);
34989+ gma_resume_display(dev->pdev);
34990 psb_irq_preinstall(dev);
34991 psb_irq_postinstall(dev);
34992 pm_runtime_get(&dev->pdev->dev);
34993diff --git a/drivers/staging/hv/rndis_filter.c b/drivers/staging/hv/rndis_filter.c
34994index bafccb3..e3ac78d 100644
34995--- a/drivers/staging/hv/rndis_filter.c
34996+++ b/drivers/staging/hv/rndis_filter.c
34997@@ -42,7 +42,7 @@ struct rndis_device {
34998
34999 enum rndis_device_state state;
35000 bool link_state;
35001- atomic_t new_req_id;
35002+ atomic_unchecked_t new_req_id;
35003
35004 spinlock_t request_lock;
35005 struct list_head req_list;
35006@@ -116,7 +116,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
35007 * template
35008 */
35009 set = &rndis_msg->msg.set_req;
35010- set->req_id = atomic_inc_return(&dev->new_req_id);
35011+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
35012
35013 /* Add to the request list */
35014 spin_lock_irqsave(&dev->request_lock, flags);
35015@@ -646,7 +646,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
35016
35017 /* Setup the rndis set */
35018 halt = &request->request_msg.msg.halt_req;
35019- halt->req_id = atomic_inc_return(&dev->new_req_id);
35020+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
35021
35022 /* Ignore return since this msg is optional. */
35023 rndis_filter_send_request(dev, request);
35024diff --git a/drivers/staging/iio/buffer_generic.h b/drivers/staging/iio/buffer_generic.h
35025index 9e8f010..af9efb5 100644
35026--- a/drivers/staging/iio/buffer_generic.h
35027+++ b/drivers/staging/iio/buffer_generic.h
35028@@ -64,7 +64,7 @@ struct iio_buffer_access_funcs {
35029
35030 int (*is_enabled)(struct iio_buffer *buffer);
35031 int (*enable)(struct iio_buffer *buffer);
35032-};
35033+} __no_const;
35034
35035 /**
35036 * struct iio_buffer_setup_ops - buffer setup related callbacks
35037diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
35038index 8b307b4..a97ac91 100644
35039--- a/drivers/staging/octeon/ethernet-rx.c
35040+++ b/drivers/staging/octeon/ethernet-rx.c
35041@@ -420,11 +420,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
35042 /* Increment RX stats for virtual ports */
35043 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
35044 #ifdef CONFIG_64BIT
35045- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
35046- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
35047+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
35048+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
35049 #else
35050- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
35051- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
35052+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
35053+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
35054 #endif
35055 }
35056 netif_receive_skb(skb);
35057@@ -436,9 +436,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
35058 dev->name);
35059 */
35060 #ifdef CONFIG_64BIT
35061- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
35062+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
35063 #else
35064- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
35065+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
35066 #endif
35067 dev_kfree_skb_irq(skb);
35068 }
35069diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
35070index 076f866..2308070 100644
35071--- a/drivers/staging/octeon/ethernet.c
35072+++ b/drivers/staging/octeon/ethernet.c
35073@@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
35074 * since the RX tasklet also increments it.
35075 */
35076 #ifdef CONFIG_64BIT
35077- atomic64_add(rx_status.dropped_packets,
35078- (atomic64_t *)&priv->stats.rx_dropped);
35079+ atomic64_add_unchecked(rx_status.dropped_packets,
35080+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
35081 #else
35082- atomic_add(rx_status.dropped_packets,
35083- (atomic_t *)&priv->stats.rx_dropped);
35084+ atomic_add_unchecked(rx_status.dropped_packets,
35085+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
35086 #endif
35087 }
35088
35089diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
35090index 7a19555..466456d 100644
35091--- a/drivers/staging/pohmelfs/inode.c
35092+++ b/drivers/staging/pohmelfs/inode.c
35093@@ -1861,7 +1861,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
35094 mutex_init(&psb->mcache_lock);
35095 psb->mcache_root = RB_ROOT;
35096 psb->mcache_timeout = msecs_to_jiffies(5000);
35097- atomic_long_set(&psb->mcache_gen, 0);
35098+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
35099
35100 psb->trans_max_pages = 100;
35101
35102@@ -1876,7 +1876,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
35103 INIT_LIST_HEAD(&psb->crypto_ready_list);
35104 INIT_LIST_HEAD(&psb->crypto_active_list);
35105
35106- atomic_set(&psb->trans_gen, 1);
35107+ atomic_set_unchecked(&psb->trans_gen, 1);
35108 atomic_long_set(&psb->total_inodes, 0);
35109
35110 mutex_init(&psb->state_lock);
35111diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c
35112index e22665c..a2a9390 100644
35113--- a/drivers/staging/pohmelfs/mcache.c
35114+++ b/drivers/staging/pohmelfs/mcache.c
35115@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start
35116 m->data = data;
35117 m->start = start;
35118 m->size = size;
35119- m->gen = atomic_long_inc_return(&psb->mcache_gen);
35120+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
35121
35122 mutex_lock(&psb->mcache_lock);
35123 err = pohmelfs_mcache_insert(psb, m);
35124diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
35125index 985b6b7..7699e05 100644
35126--- a/drivers/staging/pohmelfs/netfs.h
35127+++ b/drivers/staging/pohmelfs/netfs.h
35128@@ -571,14 +571,14 @@ struct pohmelfs_config;
35129 struct pohmelfs_sb {
35130 struct rb_root mcache_root;
35131 struct mutex mcache_lock;
35132- atomic_long_t mcache_gen;
35133+ atomic_long_unchecked_t mcache_gen;
35134 unsigned long mcache_timeout;
35135
35136 unsigned int idx;
35137
35138 unsigned int trans_retries;
35139
35140- atomic_t trans_gen;
35141+ atomic_unchecked_t trans_gen;
35142
35143 unsigned int crypto_attached_size;
35144 unsigned int crypto_align_size;
35145diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c
35146index 06c1a74..866eebc 100644
35147--- a/drivers/staging/pohmelfs/trans.c
35148+++ b/drivers/staging/pohmelfs/trans.c
35149@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
35150 int err;
35151 struct netfs_cmd *cmd = t->iovec.iov_base;
35152
35153- t->gen = atomic_inc_return(&psb->trans_gen);
35154+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
35155
35156 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
35157 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
35158diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
35159index 86308a0..feaa925 100644
35160--- a/drivers/staging/rtl8712/rtl871x_io.h
35161+++ b/drivers/staging/rtl8712/rtl871x_io.h
35162@@ -108,7 +108,7 @@ struct _io_ops {
35163 u8 *pmem);
35164 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
35165 u8 *pmem);
35166-};
35167+} __no_const;
35168
35169 struct io_req {
35170 struct list_head list;
35171diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
35172index c7b5e8b..783d6cb 100644
35173--- a/drivers/staging/sbe-2t3e3/netdev.c
35174+++ b/drivers/staging/sbe-2t3e3/netdev.c
35175@@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
35176 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
35177
35178 if (rlen)
35179- if (copy_to_user(data, &resp, rlen))
35180+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
35181 return -EFAULT;
35182
35183 return 0;
35184diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
35185index be21617..0954e45 100644
35186--- a/drivers/staging/usbip/usbip_common.h
35187+++ b/drivers/staging/usbip/usbip_common.h
35188@@ -289,7 +289,7 @@ struct usbip_device {
35189 void (*shutdown)(struct usbip_device *);
35190 void (*reset)(struct usbip_device *);
35191 void (*unusable)(struct usbip_device *);
35192- } eh_ops;
35193+ } __no_const eh_ops;
35194 };
35195
35196 #if 0
35197diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
35198index 88b3298..3783eee 100644
35199--- a/drivers/staging/usbip/vhci.h
35200+++ b/drivers/staging/usbip/vhci.h
35201@@ -88,7 +88,7 @@ struct vhci_hcd {
35202 unsigned resuming:1;
35203 unsigned long re_timeout;
35204
35205- atomic_t seqnum;
35206+ atomic_unchecked_t seqnum;
35207
35208 /*
35209 * NOTE:
35210diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
35211index 2ee97e2..0420b86 100644
35212--- a/drivers/staging/usbip/vhci_hcd.c
35213+++ b/drivers/staging/usbip/vhci_hcd.c
35214@@ -527,7 +527,7 @@ static void vhci_tx_urb(struct urb *urb)
35215 return;
35216 }
35217
35218- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
35219+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
35220 if (priv->seqnum == 0xffff)
35221 dev_info(&urb->dev->dev, "seqnum max\n");
35222
35223@@ -779,7 +779,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
35224 return -ENOMEM;
35225 }
35226
35227- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
35228+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
35229 if (unlink->seqnum == 0xffff)
35230 pr_info("seqnum max\n");
35231
35232@@ -969,7 +969,7 @@ static int vhci_start(struct usb_hcd *hcd)
35233 vdev->rhport = rhport;
35234 }
35235
35236- atomic_set(&vhci->seqnum, 0);
35237+ atomic_set_unchecked(&vhci->seqnum, 0);
35238 spin_lock_init(&vhci->lock);
35239
35240 hcd->power_budget = 0; /* no limit */
35241diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
35242index 3872b8c..fe6d2f4 100644
35243--- a/drivers/staging/usbip/vhci_rx.c
35244+++ b/drivers/staging/usbip/vhci_rx.c
35245@@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
35246 if (!urb) {
35247 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
35248 pr_info("max seqnum %d\n",
35249- atomic_read(&the_controller->seqnum));
35250+ atomic_read_unchecked(&the_controller->seqnum));
35251 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
35252 return;
35253 }
35254diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
35255index 7735027..30eed13 100644
35256--- a/drivers/staging/vt6655/hostap.c
35257+++ b/drivers/staging/vt6655/hostap.c
35258@@ -79,14 +79,13 @@ static int msglevel =MSG_LEVEL_INFO;
35259 *
35260 */
35261
35262+static net_device_ops_no_const apdev_netdev_ops;
35263+
35264 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35265 {
35266 PSDevice apdev_priv;
35267 struct net_device *dev = pDevice->dev;
35268 int ret;
35269- const struct net_device_ops apdev_netdev_ops = {
35270- .ndo_start_xmit = pDevice->tx_80211,
35271- };
35272
35273 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
35274
35275@@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35276 *apdev_priv = *pDevice;
35277 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
35278
35279+ /* only half broken now */
35280+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
35281 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
35282
35283 pDevice->apdev->type = ARPHRD_IEEE80211;
35284diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
35285index 51b5adf..098e320 100644
35286--- a/drivers/staging/vt6656/hostap.c
35287+++ b/drivers/staging/vt6656/hostap.c
35288@@ -80,14 +80,13 @@ static int msglevel =MSG_LEVEL_INFO;
35289 *
35290 */
35291
35292+static net_device_ops_no_const apdev_netdev_ops;
35293+
35294 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35295 {
35296 PSDevice apdev_priv;
35297 struct net_device *dev = pDevice->dev;
35298 int ret;
35299- const struct net_device_ops apdev_netdev_ops = {
35300- .ndo_start_xmit = pDevice->tx_80211,
35301- };
35302
35303 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
35304
35305@@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
35306 *apdev_priv = *pDevice;
35307 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
35308
35309+ /* only half broken now */
35310+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
35311 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
35312
35313 pDevice->apdev->type = ARPHRD_IEEE80211;
35314diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
35315index 7843dfd..3db105f 100644
35316--- a/drivers/staging/wlan-ng/hfa384x_usb.c
35317+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
35318@@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
35319
35320 struct usbctlx_completor {
35321 int (*complete) (struct usbctlx_completor *);
35322-};
35323+} __no_const;
35324
35325 static int
35326 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
35327diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
35328index 1ca66ea..76f1343 100644
35329--- a/drivers/staging/zcache/tmem.c
35330+++ b/drivers/staging/zcache/tmem.c
35331@@ -39,7 +39,7 @@
35332 * A tmem host implementation must use this function to register callbacks
35333 * for memory allocation.
35334 */
35335-static struct tmem_hostops tmem_hostops;
35336+static tmem_hostops_no_const tmem_hostops;
35337
35338 static void tmem_objnode_tree_init(void);
35339
35340@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
35341 * A tmem host implementation must use this function to register
35342 * callbacks for a page-accessible memory (PAM) implementation
35343 */
35344-static struct tmem_pamops tmem_pamops;
35345+static tmem_pamops_no_const tmem_pamops;
35346
35347 void tmem_register_pamops(struct tmem_pamops *m)
35348 {
35349diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
35350index ed147c4..94fc3c6 100644
35351--- a/drivers/staging/zcache/tmem.h
35352+++ b/drivers/staging/zcache/tmem.h
35353@@ -180,6 +180,7 @@ struct tmem_pamops {
35354 void (*new_obj)(struct tmem_obj *);
35355 int (*replace_in_obj)(void *, struct tmem_obj *);
35356 };
35357+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
35358 extern void tmem_register_pamops(struct tmem_pamops *m);
35359
35360 /* memory allocation methods provided by the host implementation */
35361@@ -189,6 +190,7 @@ struct tmem_hostops {
35362 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
35363 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
35364 };
35365+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
35366 extern void tmem_register_hostops(struct tmem_hostops *m);
35367
35368 /* core tmem accessor functions */
35369diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
35370index 8599545..7761358 100644
35371--- a/drivers/target/iscsi/iscsi_target.c
35372+++ b/drivers/target/iscsi/iscsi_target.c
35373@@ -1364,7 +1364,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
35374 * outstanding_r2ts reaches zero, go ahead and send the delayed
35375 * TASK_ABORTED status.
35376 */
35377- if (atomic_read(&se_cmd->t_transport_aborted) != 0) {
35378+ if (atomic_read_unchecked(&se_cmd->t_transport_aborted) != 0) {
35379 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
35380 if (--cmd->outstanding_r2ts < 1) {
35381 iscsit_stop_dataout_timer(cmd);
35382diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
35383index 6845228..df77141 100644
35384--- a/drivers/target/target_core_tmr.c
35385+++ b/drivers/target/target_core_tmr.c
35386@@ -250,7 +250,7 @@ static void core_tmr_drain_task_list(
35387 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
35388 cmd->t_task_list_num,
35389 atomic_read(&cmd->t_task_cdbs_left),
35390- atomic_read(&cmd->t_task_cdbs_sent),
35391+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
35392 atomic_read(&cmd->t_transport_active),
35393 atomic_read(&cmd->t_transport_stop),
35394 atomic_read(&cmd->t_transport_sent));
35395@@ -281,7 +281,7 @@ static void core_tmr_drain_task_list(
35396 pr_debug("LUN_RESET: got t_transport_active = 1 for"
35397 " task: %p, t_fe_count: %d dev: %p\n", task,
35398 fe_count, dev);
35399- atomic_set(&cmd->t_transport_aborted, 1);
35400+ atomic_set_unchecked(&cmd->t_transport_aborted, 1);
35401 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35402
35403 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
35404@@ -289,7 +289,7 @@ static void core_tmr_drain_task_list(
35405 }
35406 pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
35407 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
35408- atomic_set(&cmd->t_transport_aborted, 1);
35409+ atomic_set_unchecked(&cmd->t_transport_aborted, 1);
35410 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35411
35412 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
35413diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
35414index e87d0eb..856cbcc 100644
35415--- a/drivers/target/target_core_transport.c
35416+++ b/drivers/target/target_core_transport.c
35417@@ -1343,7 +1343,7 @@ struct se_device *transport_add_device_to_core_hba(
35418
35419 dev->queue_depth = dev_limits->queue_depth;
35420 atomic_set(&dev->depth_left, dev->queue_depth);
35421- atomic_set(&dev->dev_ordered_id, 0);
35422+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
35423
35424 se_dev_set_default_attribs(dev, dev_limits);
35425
35426@@ -1530,7 +1530,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
35427 * Used to determine when ORDERED commands should go from
35428 * Dormant to Active status.
35429 */
35430- cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
35431+ cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
35432 smp_mb__after_atomic_inc();
35433 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
35434 cmd->se_ordered_id, cmd->sam_task_attr,
35435@@ -1800,7 +1800,7 @@ static void transport_generic_request_failure(struct se_cmd *cmd)
35436 " t_transport_active: %d t_transport_stop: %d"
35437 " t_transport_sent: %d\n", cmd->t_task_list_num,
35438 atomic_read(&cmd->t_task_cdbs_left),
35439- atomic_read(&cmd->t_task_cdbs_sent),
35440+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
35441 atomic_read(&cmd->t_task_cdbs_ex_left),
35442 atomic_read(&cmd->t_transport_active),
35443 atomic_read(&cmd->t_transport_stop),
35444@@ -2089,9 +2089,9 @@ check_depth:
35445
35446 spin_lock_irqsave(&cmd->t_state_lock, flags);
35447 task->task_flags |= (TF_ACTIVE | TF_SENT);
35448- atomic_inc(&cmd->t_task_cdbs_sent);
35449+ atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
35450
35451- if (atomic_read(&cmd->t_task_cdbs_sent) ==
35452+ if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
35453 cmd->t_task_list_num)
35454 atomic_set(&cmd->t_transport_sent, 1);
35455
35456@@ -4260,7 +4260,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
35457 atomic_set(&cmd->transport_lun_stop, 0);
35458 }
35459 if (!atomic_read(&cmd->t_transport_active) ||
35460- atomic_read(&cmd->t_transport_aborted)) {
35461+ atomic_read_unchecked(&cmd->t_transport_aborted)) {
35462 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
35463 return false;
35464 }
35465@@ -4509,7 +4509,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
35466 {
35467 int ret = 0;
35468
35469- if (atomic_read(&cmd->t_transport_aborted) != 0) {
35470+ if (atomic_read_unchecked(&cmd->t_transport_aborted) != 0) {
35471 if (!send_status ||
35472 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
35473 return 1;
35474@@ -4546,7 +4546,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
35475 */
35476 if (cmd->data_direction == DMA_TO_DEVICE) {
35477 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
35478- atomic_inc(&cmd->t_transport_aborted);
35479+ atomic_inc_unchecked(&cmd->t_transport_aborted);
35480 smp_mb__after_atomic_inc();
35481 }
35482 }
35483diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
35484index b9040be..e3f5aab 100644
35485--- a/drivers/tty/hvc/hvcs.c
35486+++ b/drivers/tty/hvc/hvcs.c
35487@@ -83,6 +83,7 @@
35488 #include <asm/hvcserver.h>
35489 #include <asm/uaccess.h>
35490 #include <asm/vio.h>
35491+#include <asm/local.h>
35492
35493 /*
35494 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
35495@@ -270,7 +271,7 @@ struct hvcs_struct {
35496 unsigned int index;
35497
35498 struct tty_struct *tty;
35499- int open_count;
35500+ local_t open_count;
35501
35502 /*
35503 * Used to tell the driver kernel_thread what operations need to take
35504@@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
35505
35506 spin_lock_irqsave(&hvcsd->lock, flags);
35507
35508- if (hvcsd->open_count > 0) {
35509+ if (local_read(&hvcsd->open_count) > 0) {
35510 spin_unlock_irqrestore(&hvcsd->lock, flags);
35511 printk(KERN_INFO "HVCS: vterm state unchanged. "
35512 "The hvcs device node is still in use.\n");
35513@@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
35514 if ((retval = hvcs_partner_connect(hvcsd)))
35515 goto error_release;
35516
35517- hvcsd->open_count = 1;
35518+ local_set(&hvcsd->open_count, 1);
35519 hvcsd->tty = tty;
35520 tty->driver_data = hvcsd;
35521
35522@@ -1179,7 +1180,7 @@ fast_open:
35523
35524 spin_lock_irqsave(&hvcsd->lock, flags);
35525 kref_get(&hvcsd->kref);
35526- hvcsd->open_count++;
35527+ local_inc(&hvcsd->open_count);
35528 hvcsd->todo_mask |= HVCS_SCHED_READ;
35529 spin_unlock_irqrestore(&hvcsd->lock, flags);
35530
35531@@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
35532 hvcsd = tty->driver_data;
35533
35534 spin_lock_irqsave(&hvcsd->lock, flags);
35535- if (--hvcsd->open_count == 0) {
35536+ if (local_dec_and_test(&hvcsd->open_count)) {
35537
35538 vio_disable_interrupts(hvcsd->vdev);
35539
35540@@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
35541 free_irq(irq, hvcsd);
35542 kref_put(&hvcsd->kref, destroy_hvcs_struct);
35543 return;
35544- } else if (hvcsd->open_count < 0) {
35545+ } else if (local_read(&hvcsd->open_count) < 0) {
35546 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
35547 " is missmanaged.\n",
35548- hvcsd->vdev->unit_address, hvcsd->open_count);
35549+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
35550 }
35551
35552 spin_unlock_irqrestore(&hvcsd->lock, flags);
35553@@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struct * tty)
35554
35555 spin_lock_irqsave(&hvcsd->lock, flags);
35556 /* Preserve this so that we know how many kref refs to put */
35557- temp_open_count = hvcsd->open_count;
35558+ temp_open_count = local_read(&hvcsd->open_count);
35559
35560 /*
35561 * Don't kref put inside the spinlock because the destruction
35562@@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struct * tty)
35563 hvcsd->tty->driver_data = NULL;
35564 hvcsd->tty = NULL;
35565
35566- hvcsd->open_count = 0;
35567+ local_set(&hvcsd->open_count, 0);
35568
35569 /* This will drop any buffered data on the floor which is OK in a hangup
35570 * scenario. */
35571@@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct *tty,
35572 * the middle of a write operation? This is a crummy place to do this
35573 * but we want to keep it all in the spinlock.
35574 */
35575- if (hvcsd->open_count <= 0) {
35576+ if (local_read(&hvcsd->open_count) <= 0) {
35577 spin_unlock_irqrestore(&hvcsd->lock, flags);
35578 return -ENODEV;
35579 }
35580@@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_struct *tty)
35581 {
35582 struct hvcs_struct *hvcsd = tty->driver_data;
35583
35584- if (!hvcsd || hvcsd->open_count <= 0)
35585+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
35586 return 0;
35587
35588 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
35589diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
35590index ef92869..f4ebd88 100644
35591--- a/drivers/tty/ipwireless/tty.c
35592+++ b/drivers/tty/ipwireless/tty.c
35593@@ -29,6 +29,7 @@
35594 #include <linux/tty_driver.h>
35595 #include <linux/tty_flip.h>
35596 #include <linux/uaccess.h>
35597+#include <asm/local.h>
35598
35599 #include "tty.h"
35600 #include "network.h"
35601@@ -51,7 +52,7 @@ struct ipw_tty {
35602 int tty_type;
35603 struct ipw_network *network;
35604 struct tty_struct *linux_tty;
35605- int open_count;
35606+ local_t open_count;
35607 unsigned int control_lines;
35608 struct mutex ipw_tty_mutex;
35609 int tx_bytes_queued;
35610@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
35611 mutex_unlock(&tty->ipw_tty_mutex);
35612 return -ENODEV;
35613 }
35614- if (tty->open_count == 0)
35615+ if (local_read(&tty->open_count) == 0)
35616 tty->tx_bytes_queued = 0;
35617
35618- tty->open_count++;
35619+ local_inc(&tty->open_count);
35620
35621 tty->linux_tty = linux_tty;
35622 linux_tty->driver_data = tty;
35623@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
35624
35625 static void do_ipw_close(struct ipw_tty *tty)
35626 {
35627- tty->open_count--;
35628-
35629- if (tty->open_count == 0) {
35630+ if (local_dec_return(&tty->open_count) == 0) {
35631 struct tty_struct *linux_tty = tty->linux_tty;
35632
35633 if (linux_tty != NULL) {
35634@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
35635 return;
35636
35637 mutex_lock(&tty->ipw_tty_mutex);
35638- if (tty->open_count == 0) {
35639+ if (local_read(&tty->open_count) == 0) {
35640 mutex_unlock(&tty->ipw_tty_mutex);
35641 return;
35642 }
35643@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
35644 return;
35645 }
35646
35647- if (!tty->open_count) {
35648+ if (!local_read(&tty->open_count)) {
35649 mutex_unlock(&tty->ipw_tty_mutex);
35650 return;
35651 }
35652@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
35653 return -ENODEV;
35654
35655 mutex_lock(&tty->ipw_tty_mutex);
35656- if (!tty->open_count) {
35657+ if (!local_read(&tty->open_count)) {
35658 mutex_unlock(&tty->ipw_tty_mutex);
35659 return -EINVAL;
35660 }
35661@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
35662 if (!tty)
35663 return -ENODEV;
35664
35665- if (!tty->open_count)
35666+ if (!local_read(&tty->open_count))
35667 return -EINVAL;
35668
35669 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
35670@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
35671 if (!tty)
35672 return 0;
35673
35674- if (!tty->open_count)
35675+ if (!local_read(&tty->open_count))
35676 return 0;
35677
35678 return tty->tx_bytes_queued;
35679@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
35680 if (!tty)
35681 return -ENODEV;
35682
35683- if (!tty->open_count)
35684+ if (!local_read(&tty->open_count))
35685 return -EINVAL;
35686
35687 return get_control_lines(tty);
35688@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
35689 if (!tty)
35690 return -ENODEV;
35691
35692- if (!tty->open_count)
35693+ if (!local_read(&tty->open_count))
35694 return -EINVAL;
35695
35696 return set_control_lines(tty, set, clear);
35697@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
35698 if (!tty)
35699 return -ENODEV;
35700
35701- if (!tty->open_count)
35702+ if (!local_read(&tty->open_count))
35703 return -EINVAL;
35704
35705 /* FIXME: Exactly how is the tty object locked here .. */
35706@@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
35707 against a parallel ioctl etc */
35708 mutex_lock(&ttyj->ipw_tty_mutex);
35709 }
35710- while (ttyj->open_count)
35711+ while (local_read(&ttyj->open_count))
35712 do_ipw_close(ttyj);
35713 ipwireless_disassociate_network_ttys(network,
35714 ttyj->channel_idx);
35715diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
35716index fc7bbba..9527e93 100644
35717--- a/drivers/tty/n_gsm.c
35718+++ b/drivers/tty/n_gsm.c
35719@@ -1629,7 +1629,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
35720 kref_init(&dlci->ref);
35721 mutex_init(&dlci->mutex);
35722 dlci->fifo = &dlci->_fifo;
35723- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
35724+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
35725 kfree(dlci);
35726 return NULL;
35727 }
35728diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
35729index 39d6ab6..eb97f41 100644
35730--- a/drivers/tty/n_tty.c
35731+++ b/drivers/tty/n_tty.c
35732@@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
35733 {
35734 *ops = tty_ldisc_N_TTY;
35735 ops->owner = NULL;
35736- ops->refcount = ops->flags = 0;
35737+ atomic_set(&ops->refcount, 0);
35738+ ops->flags = 0;
35739 }
35740 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
35741diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
35742index e18604b..a7d5a11 100644
35743--- a/drivers/tty/pty.c
35744+++ b/drivers/tty/pty.c
35745@@ -773,8 +773,10 @@ static void __init unix98_pty_init(void)
35746 register_sysctl_table(pty_root_table);
35747
35748 /* Now create the /dev/ptmx special device */
35749+ pax_open_kernel();
35750 tty_default_fops(&ptmx_fops);
35751- ptmx_fops.open = ptmx_open;
35752+ *(void **)&ptmx_fops.open = ptmx_open;
35753+ pax_close_kernel();
35754
35755 cdev_init(&ptmx_cdev, &ptmx_fops);
35756 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
35757diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
35758index 2b42a01..32a2ed3 100644
35759--- a/drivers/tty/serial/kgdboc.c
35760+++ b/drivers/tty/serial/kgdboc.c
35761@@ -24,8 +24,9 @@
35762 #define MAX_CONFIG_LEN 40
35763
35764 static struct kgdb_io kgdboc_io_ops;
35765+static struct kgdb_io kgdboc_io_ops_console;
35766
35767-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
35768+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
35769 static int configured = -1;
35770
35771 static char config[MAX_CONFIG_LEN];
35772@@ -148,6 +149,8 @@ static void cleanup_kgdboc(void)
35773 kgdboc_unregister_kbd();
35774 if (configured == 1)
35775 kgdb_unregister_io_module(&kgdboc_io_ops);
35776+ else if (configured == 2)
35777+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
35778 }
35779
35780 static int configure_kgdboc(void)
35781@@ -157,13 +160,13 @@ static int configure_kgdboc(void)
35782 int err;
35783 char *cptr = config;
35784 struct console *cons;
35785+ int is_console = 0;
35786
35787 err = kgdboc_option_setup(config);
35788 if (err || !strlen(config) || isspace(config[0]))
35789 goto noconfig;
35790
35791 err = -ENODEV;
35792- kgdboc_io_ops.is_console = 0;
35793 kgdb_tty_driver = NULL;
35794
35795 kgdboc_use_kms = 0;
35796@@ -184,7 +187,7 @@ static int configure_kgdboc(void)
35797 int idx;
35798 if (cons->device && cons->device(cons, &idx) == p &&
35799 idx == tty_line) {
35800- kgdboc_io_ops.is_console = 1;
35801+ is_console = 1;
35802 break;
35803 }
35804 cons = cons->next;
35805@@ -194,12 +197,16 @@ static int configure_kgdboc(void)
35806 kgdb_tty_line = tty_line;
35807
35808 do_register:
35809- err = kgdb_register_io_module(&kgdboc_io_ops);
35810+ if (is_console) {
35811+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
35812+ configured = 2;
35813+ } else {
35814+ err = kgdb_register_io_module(&kgdboc_io_ops);
35815+ configured = 1;
35816+ }
35817 if (err)
35818 goto noconfig;
35819
35820- configured = 1;
35821-
35822 return 0;
35823
35824 noconfig:
35825@@ -213,7 +220,7 @@ noconfig:
35826 static int __init init_kgdboc(void)
35827 {
35828 /* Already configured? */
35829- if (configured == 1)
35830+ if (configured >= 1)
35831 return 0;
35832
35833 return configure_kgdboc();
35834@@ -262,7 +269,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
35835 if (config[len - 1] == '\n')
35836 config[len - 1] = '\0';
35837
35838- if (configured == 1)
35839+ if (configured >= 1)
35840 cleanup_kgdboc();
35841
35842 /* Go and configure with the new params. */
35843@@ -302,6 +309,15 @@ static struct kgdb_io kgdboc_io_ops = {
35844 .post_exception = kgdboc_post_exp_handler,
35845 };
35846
35847+static struct kgdb_io kgdboc_io_ops_console = {
35848+ .name = "kgdboc",
35849+ .read_char = kgdboc_get_char,
35850+ .write_char = kgdboc_put_char,
35851+ .pre_exception = kgdboc_pre_exp_handler,
35852+ .post_exception = kgdboc_post_exp_handler,
35853+ .is_console = 1
35854+};
35855+
35856 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
35857 /* This is only available if kgdboc is a built in for early debugging */
35858 static int __init kgdboc_early_init(char *opt)
35859diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
35860index 05085be..67eadb0 100644
35861--- a/drivers/tty/tty_io.c
35862+++ b/drivers/tty/tty_io.c
35863@@ -3240,7 +3240,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
35864
35865 void tty_default_fops(struct file_operations *fops)
35866 {
35867- *fops = tty_fops;
35868+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
35869 }
35870
35871 /*
35872diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
35873index 8e0924f..4204eb4 100644
35874--- a/drivers/tty/tty_ldisc.c
35875+++ b/drivers/tty/tty_ldisc.c
35876@@ -75,7 +75,7 @@ static void put_ldisc(struct tty_ldisc *ld)
35877 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
35878 struct tty_ldisc_ops *ldo = ld->ops;
35879
35880- ldo->refcount--;
35881+ atomic_dec(&ldo->refcount);
35882 module_put(ldo->owner);
35883 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35884
35885@@ -110,7 +110,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
35886 spin_lock_irqsave(&tty_ldisc_lock, flags);
35887 tty_ldiscs[disc] = new_ldisc;
35888 new_ldisc->num = disc;
35889- new_ldisc->refcount = 0;
35890+ atomic_set(&new_ldisc->refcount, 0);
35891 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35892
35893 return ret;
35894@@ -138,7 +138,7 @@ int tty_unregister_ldisc(int disc)
35895 return -EINVAL;
35896
35897 spin_lock_irqsave(&tty_ldisc_lock, flags);
35898- if (tty_ldiscs[disc]->refcount)
35899+ if (atomic_read(&tty_ldiscs[disc]->refcount))
35900 ret = -EBUSY;
35901 else
35902 tty_ldiscs[disc] = NULL;
35903@@ -159,7 +159,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
35904 if (ldops) {
35905 ret = ERR_PTR(-EAGAIN);
35906 if (try_module_get(ldops->owner)) {
35907- ldops->refcount++;
35908+ atomic_inc(&ldops->refcount);
35909 ret = ldops;
35910 }
35911 }
35912@@ -172,7 +172,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
35913 unsigned long flags;
35914
35915 spin_lock_irqsave(&tty_ldisc_lock, flags);
35916- ldops->refcount--;
35917+ atomic_dec(&ldops->refcount);
35918 module_put(ldops->owner);
35919 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
35920 }
35921diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
35922index a605549..6bd3c96 100644
35923--- a/drivers/tty/vt/keyboard.c
35924+++ b/drivers/tty/vt/keyboard.c
35925@@ -657,6 +657,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
35926 kbd->kbdmode == VC_OFF) &&
35927 value != KVAL(K_SAK))
35928 return; /* SAK is allowed even in raw mode */
35929+
35930+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
35931+ {
35932+ void *func = fn_handler[value];
35933+ if (func == fn_show_state || func == fn_show_ptregs ||
35934+ func == fn_show_mem)
35935+ return;
35936+ }
35937+#endif
35938+
35939 fn_handler[value](vc);
35940 }
35941
35942diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
35943index 5e096f4..0da1363 100644
35944--- a/drivers/tty/vt/vt_ioctl.c
35945+++ b/drivers/tty/vt/vt_ioctl.c
35946@@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
35947 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
35948 return -EFAULT;
35949
35950- if (!capable(CAP_SYS_TTY_CONFIG))
35951- perm = 0;
35952-
35953 switch (cmd) {
35954 case KDGKBENT:
35955 key_map = key_maps[s];
35956@@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
35957 val = (i ? K_HOLE : K_NOSUCHMAP);
35958 return put_user(val, &user_kbe->kb_value);
35959 case KDSKBENT:
35960+ if (!capable(CAP_SYS_TTY_CONFIG))
35961+ perm = 0;
35962+
35963 if (!perm)
35964 return -EPERM;
35965 if (!i && v == K_NOSUCHMAP) {
35966@@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
35967 int i, j, k;
35968 int ret;
35969
35970- if (!capable(CAP_SYS_TTY_CONFIG))
35971- perm = 0;
35972-
35973 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
35974 if (!kbs) {
35975 ret = -ENOMEM;
35976@@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
35977 kfree(kbs);
35978 return ((p && *p) ? -EOVERFLOW : 0);
35979 case KDSKBSENT:
35980+ if (!capable(CAP_SYS_TTY_CONFIG))
35981+ perm = 0;
35982+
35983 if (!perm) {
35984 ret = -EPERM;
35985 goto reterr;
35986diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
35987index a783d53..cb30d94 100644
35988--- a/drivers/uio/uio.c
35989+++ b/drivers/uio/uio.c
35990@@ -25,6 +25,7 @@
35991 #include <linux/kobject.h>
35992 #include <linux/cdev.h>
35993 #include <linux/uio_driver.h>
35994+#include <asm/local.h>
35995
35996 #define UIO_MAX_DEVICES (1U << MINORBITS)
35997
35998@@ -32,10 +33,10 @@ struct uio_device {
35999 struct module *owner;
36000 struct device *dev;
36001 int minor;
36002- atomic_t event;
36003+ atomic_unchecked_t event;
36004 struct fasync_struct *async_queue;
36005 wait_queue_head_t wait;
36006- int vma_count;
36007+ local_t vma_count;
36008 struct uio_info *info;
36009 struct kobject *map_dir;
36010 struct kobject *portio_dir;
36011@@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
36012 struct device_attribute *attr, char *buf)
36013 {
36014 struct uio_device *idev = dev_get_drvdata(dev);
36015- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
36016+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
36017 }
36018
36019 static struct device_attribute uio_class_attributes[] = {
36020@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
36021 {
36022 struct uio_device *idev = info->uio_dev;
36023
36024- atomic_inc(&idev->event);
36025+ atomic_inc_unchecked(&idev->event);
36026 wake_up_interruptible(&idev->wait);
36027 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
36028 }
36029@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
36030 }
36031
36032 listener->dev = idev;
36033- listener->event_count = atomic_read(&idev->event);
36034+ listener->event_count = atomic_read_unchecked(&idev->event);
36035 filep->private_data = listener;
36036
36037 if (idev->info->open) {
36038@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
36039 return -EIO;
36040
36041 poll_wait(filep, &idev->wait, wait);
36042- if (listener->event_count != atomic_read(&idev->event))
36043+ if (listener->event_count != atomic_read_unchecked(&idev->event))
36044 return POLLIN | POLLRDNORM;
36045 return 0;
36046 }
36047@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
36048 do {
36049 set_current_state(TASK_INTERRUPTIBLE);
36050
36051- event_count = atomic_read(&idev->event);
36052+ event_count = atomic_read_unchecked(&idev->event);
36053 if (event_count != listener->event_count) {
36054 if (copy_to_user(buf, &event_count, count))
36055 retval = -EFAULT;
36056@@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
36057 static void uio_vma_open(struct vm_area_struct *vma)
36058 {
36059 struct uio_device *idev = vma->vm_private_data;
36060- idev->vma_count++;
36061+ local_inc(&idev->vma_count);
36062 }
36063
36064 static void uio_vma_close(struct vm_area_struct *vma)
36065 {
36066 struct uio_device *idev = vma->vm_private_data;
36067- idev->vma_count--;
36068+ local_dec(&idev->vma_count);
36069 }
36070
36071 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
36072@@ -821,7 +822,7 @@ int __uio_register_device(struct module *owner,
36073 idev->owner = owner;
36074 idev->info = info;
36075 init_waitqueue_head(&idev->wait);
36076- atomic_set(&idev->event, 0);
36077+ atomic_set_unchecked(&idev->event, 0);
36078
36079 ret = uio_get_minor(idev);
36080 if (ret)
36081diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
36082index a845f8b..4f54072 100644
36083--- a/drivers/usb/atm/cxacru.c
36084+++ b/drivers/usb/atm/cxacru.c
36085@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
36086 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
36087 if (ret < 2)
36088 return -EINVAL;
36089- if (index < 0 || index > 0x7f)
36090+ if (index > 0x7f)
36091 return -EINVAL;
36092 pos += tmp;
36093
36094diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
36095index d3448ca..d2864ca 100644
36096--- a/drivers/usb/atm/usbatm.c
36097+++ b/drivers/usb/atm/usbatm.c
36098@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36099 if (printk_ratelimit())
36100 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
36101 __func__, vpi, vci);
36102- atomic_inc(&vcc->stats->rx_err);
36103+ atomic_inc_unchecked(&vcc->stats->rx_err);
36104 return;
36105 }
36106
36107@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36108 if (length > ATM_MAX_AAL5_PDU) {
36109 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
36110 __func__, length, vcc);
36111- atomic_inc(&vcc->stats->rx_err);
36112+ atomic_inc_unchecked(&vcc->stats->rx_err);
36113 goto out;
36114 }
36115
36116@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36117 if (sarb->len < pdu_length) {
36118 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
36119 __func__, pdu_length, sarb->len, vcc);
36120- atomic_inc(&vcc->stats->rx_err);
36121+ atomic_inc_unchecked(&vcc->stats->rx_err);
36122 goto out;
36123 }
36124
36125 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
36126 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
36127 __func__, vcc);
36128- atomic_inc(&vcc->stats->rx_err);
36129+ atomic_inc_unchecked(&vcc->stats->rx_err);
36130 goto out;
36131 }
36132
36133@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36134 if (printk_ratelimit())
36135 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
36136 __func__, length);
36137- atomic_inc(&vcc->stats->rx_drop);
36138+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36139 goto out;
36140 }
36141
36142@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
36143
36144 vcc->push(vcc, skb);
36145
36146- atomic_inc(&vcc->stats->rx);
36147+ atomic_inc_unchecked(&vcc->stats->rx);
36148 out:
36149 skb_trim(sarb, 0);
36150 }
36151@@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned long data)
36152 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
36153
36154 usbatm_pop(vcc, skb);
36155- atomic_inc(&vcc->stats->tx);
36156+ atomic_inc_unchecked(&vcc->stats->tx);
36157
36158 skb = skb_dequeue(&instance->sndqueue);
36159 }
36160@@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
36161 if (!left--)
36162 return sprintf(page,
36163 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
36164- atomic_read(&atm_dev->stats.aal5.tx),
36165- atomic_read(&atm_dev->stats.aal5.tx_err),
36166- atomic_read(&atm_dev->stats.aal5.rx),
36167- atomic_read(&atm_dev->stats.aal5.rx_err),
36168- atomic_read(&atm_dev->stats.aal5.rx_drop));
36169+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
36170+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
36171+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
36172+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
36173+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
36174
36175 if (!left--) {
36176 if (instance->disconnected)
36177diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
36178index d956965..4179a77 100644
36179--- a/drivers/usb/core/devices.c
36180+++ b/drivers/usb/core/devices.c
36181@@ -126,7 +126,7 @@ static const char format_endpt[] =
36182 * time it gets called.
36183 */
36184 static struct device_connect_event {
36185- atomic_t count;
36186+ atomic_unchecked_t count;
36187 wait_queue_head_t wait;
36188 } device_event = {
36189 .count = ATOMIC_INIT(1),
36190@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
36191
36192 void usbfs_conn_disc_event(void)
36193 {
36194- atomic_add(2, &device_event.count);
36195+ atomic_add_unchecked(2, &device_event.count);
36196 wake_up(&device_event.wait);
36197 }
36198
36199@@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file,
36200
36201 poll_wait(file, &device_event.wait, wait);
36202
36203- event_count = atomic_read(&device_event.count);
36204+ event_count = atomic_read_unchecked(&device_event.count);
36205 if (file->f_version != event_count) {
36206 file->f_version = event_count;
36207 return POLLIN | POLLRDNORM;
36208diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
36209index b3bdfed..a9460e0 100644
36210--- a/drivers/usb/core/message.c
36211+++ b/drivers/usb/core/message.c
36212@@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device *udev, int index)
36213 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
36214 if (buf) {
36215 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
36216- if (len > 0) {
36217- smallbuf = kmalloc(++len, GFP_NOIO);
36218+ if (len++ > 0) {
36219+ smallbuf = kmalloc(len, GFP_NOIO);
36220 if (!smallbuf)
36221 return buf;
36222 memcpy(smallbuf, buf, len);
36223diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
36224index 1fc8f12..20647c1 100644
36225--- a/drivers/usb/early/ehci-dbgp.c
36226+++ b/drivers/usb/early/ehci-dbgp.c
36227@@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
36228
36229 #ifdef CONFIG_KGDB
36230 static struct kgdb_io kgdbdbgp_io_ops;
36231-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
36232+static struct kgdb_io kgdbdbgp_io_ops_console;
36233+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
36234 #else
36235 #define dbgp_kgdb_mode (0)
36236 #endif
36237@@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
36238 .write_char = kgdbdbgp_write_char,
36239 };
36240
36241+static struct kgdb_io kgdbdbgp_io_ops_console = {
36242+ .name = "kgdbdbgp",
36243+ .read_char = kgdbdbgp_read_char,
36244+ .write_char = kgdbdbgp_write_char,
36245+ .is_console = 1
36246+};
36247+
36248 static int kgdbdbgp_wait_time;
36249
36250 static int __init kgdbdbgp_parse_config(char *str)
36251@@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(char *str)
36252 ptr++;
36253 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
36254 }
36255- kgdb_register_io_module(&kgdbdbgp_io_ops);
36256- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
36257+ if (early_dbgp_console.index != -1)
36258+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
36259+ else
36260+ kgdb_register_io_module(&kgdbdbgp_io_ops);
36261
36262 return 0;
36263 }
36264diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
36265index d6bea3e..60b250e 100644
36266--- a/drivers/usb/wusbcore/wa-hc.h
36267+++ b/drivers/usb/wusbcore/wa-hc.h
36268@@ -192,7 +192,7 @@ struct wahc {
36269 struct list_head xfer_delayed_list;
36270 spinlock_t xfer_list_lock;
36271 struct work_struct xfer_work;
36272- atomic_t xfer_id_count;
36273+ atomic_unchecked_t xfer_id_count;
36274 };
36275
36276
36277@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
36278 INIT_LIST_HEAD(&wa->xfer_delayed_list);
36279 spin_lock_init(&wa->xfer_list_lock);
36280 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
36281- atomic_set(&wa->xfer_id_count, 1);
36282+ atomic_set_unchecked(&wa->xfer_id_count, 1);
36283 }
36284
36285 /**
36286diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
36287index 57c01ab..8a05959 100644
36288--- a/drivers/usb/wusbcore/wa-xfer.c
36289+++ b/drivers/usb/wusbcore/wa-xfer.c
36290@@ -296,7 +296,7 @@ out:
36291 */
36292 static void wa_xfer_id_init(struct wa_xfer *xfer)
36293 {
36294- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
36295+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
36296 }
36297
36298 /*
36299diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
36300index c14c42b..f955cc2 100644
36301--- a/drivers/vhost/vhost.c
36302+++ b/drivers/vhost/vhost.c
36303@@ -629,7 +629,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
36304 return 0;
36305 }
36306
36307-static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
36308+static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
36309 {
36310 struct file *eventfp, *filep = NULL,
36311 *pollstart = NULL, *pollstop = NULL;
36312diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
36313index b0b2ac3..89a4399 100644
36314--- a/drivers/video/aty/aty128fb.c
36315+++ b/drivers/video/aty/aty128fb.c
36316@@ -148,7 +148,7 @@ enum {
36317 };
36318
36319 /* Must match above enum */
36320-static const char *r128_family[] __devinitdata = {
36321+static const char *r128_family[] __devinitconst = {
36322 "AGP",
36323 "PCI",
36324 "PRO AGP",
36325diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
36326index 5c3960d..15cf8fc 100644
36327--- a/drivers/video/fbcmap.c
36328+++ b/drivers/video/fbcmap.c
36329@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
36330 rc = -ENODEV;
36331 goto out;
36332 }
36333- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
36334- !info->fbops->fb_setcmap)) {
36335+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
36336 rc = -EINVAL;
36337 goto out1;
36338 }
36339diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
36340index ad93629..e020fc3 100644
36341--- a/drivers/video/fbmem.c
36342+++ b/drivers/video/fbmem.c
36343@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
36344 image->dx += image->width + 8;
36345 }
36346 } else if (rotate == FB_ROTATE_UD) {
36347- for (x = 0; x < num && image->dx >= 0; x++) {
36348+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
36349 info->fbops->fb_imageblit(info, image);
36350 image->dx -= image->width + 8;
36351 }
36352@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
36353 image->dy += image->height + 8;
36354 }
36355 } else if (rotate == FB_ROTATE_CCW) {
36356- for (x = 0; x < num && image->dy >= 0; x++) {
36357+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
36358 info->fbops->fb_imageblit(info, image);
36359 image->dy -= image->height + 8;
36360 }
36361@@ -1143,7 +1143,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
36362 return -EFAULT;
36363 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
36364 return -EINVAL;
36365- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
36366+ if (con2fb.framebuffer >= FB_MAX)
36367 return -EINVAL;
36368 if (!registered_fb[con2fb.framebuffer])
36369 request_module("fb%d", con2fb.framebuffer);
36370diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
36371index 5a5d092..265c5ed 100644
36372--- a/drivers/video/geode/gx1fb_core.c
36373+++ b/drivers/video/geode/gx1fb_core.c
36374@@ -29,7 +29,7 @@ static int crt_option = 1;
36375 static char panel_option[32] = "";
36376
36377 /* Modes relevant to the GX1 (taken from modedb.c) */
36378-static const struct fb_videomode __devinitdata gx1_modedb[] = {
36379+static const struct fb_videomode __devinitconst gx1_modedb[] = {
36380 /* 640x480-60 VESA */
36381 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
36382 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
36383diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
36384index 0fad23f..0e9afa4 100644
36385--- a/drivers/video/gxt4500.c
36386+++ b/drivers/video/gxt4500.c
36387@@ -156,7 +156,7 @@ struct gxt4500_par {
36388 static char *mode_option;
36389
36390 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
36391-static const struct fb_videomode defaultmode __devinitdata = {
36392+static const struct fb_videomode defaultmode __devinitconst = {
36393 .refresh = 60,
36394 .xres = 1280,
36395 .yres = 1024,
36396@@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
36397 return 0;
36398 }
36399
36400-static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
36401+static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
36402 .id = "IBM GXT4500P",
36403 .type = FB_TYPE_PACKED_PIXELS,
36404 .visual = FB_VISUAL_PSEUDOCOLOR,
36405diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
36406index 7672d2e..b56437f 100644
36407--- a/drivers/video/i810/i810_accel.c
36408+++ b/drivers/video/i810/i810_accel.c
36409@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
36410 }
36411 }
36412 printk("ringbuffer lockup!!!\n");
36413+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
36414 i810_report_error(mmio);
36415 par->dev_flags |= LOCKUP;
36416 info->pixmap.scan_align = 1;
36417diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
36418index 318f6fb..9a389c1 100644
36419--- a/drivers/video/i810/i810_main.c
36420+++ b/drivers/video/i810/i810_main.c
36421@@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
36422 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
36423
36424 /* PCI */
36425-static const char *i810_pci_list[] __devinitdata = {
36426+static const char *i810_pci_list[] __devinitconst = {
36427 "Intel(R) 810 Framebuffer Device" ,
36428 "Intel(R) 810-DC100 Framebuffer Device" ,
36429 "Intel(R) 810E Framebuffer Device" ,
36430diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
36431index de36693..3c63fc2 100644
36432--- a/drivers/video/jz4740_fb.c
36433+++ b/drivers/video/jz4740_fb.c
36434@@ -136,7 +136,7 @@ struct jzfb {
36435 uint32_t pseudo_palette[16];
36436 };
36437
36438-static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
36439+static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
36440 .id = "JZ4740 FB",
36441 .type = FB_TYPE_PACKED_PIXELS,
36442 .visual = FB_VISUAL_TRUECOLOR,
36443diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
36444index 3c14e43..eafa544 100644
36445--- a/drivers/video/logo/logo_linux_clut224.ppm
36446+++ b/drivers/video/logo/logo_linux_clut224.ppm
36447@@ -1,1604 +1,1123 @@
36448 P3
36449-# Standard 224-color Linux logo
36450 80 80
36451 255
36452- 0 0 0 0 0 0 0 0 0 0 0 0
36453- 0 0 0 0 0 0 0 0 0 0 0 0
36454- 0 0 0 0 0 0 0 0 0 0 0 0
36455- 0 0 0 0 0 0 0 0 0 0 0 0
36456- 0 0 0 0 0 0 0 0 0 0 0 0
36457- 0 0 0 0 0 0 0 0 0 0 0 0
36458- 0 0 0 0 0 0 0 0 0 0 0 0
36459- 0 0 0 0 0 0 0 0 0 0 0 0
36460- 0 0 0 0 0 0 0 0 0 0 0 0
36461- 6 6 6 6 6 6 10 10 10 10 10 10
36462- 10 10 10 6 6 6 6 6 6 6 6 6
36463- 0 0 0 0 0 0 0 0 0 0 0 0
36464- 0 0 0 0 0 0 0 0 0 0 0 0
36465- 0 0 0 0 0 0 0 0 0 0 0 0
36466- 0 0 0 0 0 0 0 0 0 0 0 0
36467- 0 0 0 0 0 0 0 0 0 0 0 0
36468- 0 0 0 0 0 0 0 0 0 0 0 0
36469- 0 0 0 0 0 0 0 0 0 0 0 0
36470- 0 0 0 0 0 0 0 0 0 0 0 0
36471- 0 0 0 0 0 0 0 0 0 0 0 0
36472- 0 0 0 0 0 0 0 0 0 0 0 0
36473- 0 0 0 0 0 0 0 0 0 0 0 0
36474- 0 0 0 0 0 0 0 0 0 0 0 0
36475- 0 0 0 0 0 0 0 0 0 0 0 0
36476- 0 0 0 0 0 0 0 0 0 0 0 0
36477- 0 0 0 0 0 0 0 0 0 0 0 0
36478- 0 0 0 0 0 0 0 0 0 0 0 0
36479- 0 0 0 0 0 0 0 0 0 0 0 0
36480- 0 0 0 6 6 6 10 10 10 14 14 14
36481- 22 22 22 26 26 26 30 30 30 34 34 34
36482- 30 30 30 30 30 30 26 26 26 18 18 18
36483- 14 14 14 10 10 10 6 6 6 0 0 0
36484- 0 0 0 0 0 0 0 0 0 0 0 0
36485- 0 0 0 0 0 0 0 0 0 0 0 0
36486- 0 0 0 0 0 0 0 0 0 0 0 0
36487- 0 0 0 0 0 0 0 0 0 0 0 0
36488- 0 0 0 0 0 0 0 0 0 0 0 0
36489- 0 0 0 0 0 0 0 0 0 0 0 0
36490- 0 0 0 0 0 0 0 0 0 0 0 0
36491- 0 0 0 0 0 0 0 0 0 0 0 0
36492- 0 0 0 0 0 0 0 0 0 0 0 0
36493- 0 0 0 0 0 1 0 0 1 0 0 0
36494- 0 0 0 0 0 0 0 0 0 0 0 0
36495- 0 0 0 0 0 0 0 0 0 0 0 0
36496- 0 0 0 0 0 0 0 0 0 0 0 0
36497- 0 0 0 0 0 0 0 0 0 0 0 0
36498- 0 0 0 0 0 0 0 0 0 0 0 0
36499- 0 0 0 0 0 0 0 0 0 0 0 0
36500- 6 6 6 14 14 14 26 26 26 42 42 42
36501- 54 54 54 66 66 66 78 78 78 78 78 78
36502- 78 78 78 74 74 74 66 66 66 54 54 54
36503- 42 42 42 26 26 26 18 18 18 10 10 10
36504- 6 6 6 0 0 0 0 0 0 0 0 0
36505- 0 0 0 0 0 0 0 0 0 0 0 0
36506- 0 0 0 0 0 0 0 0 0 0 0 0
36507- 0 0 0 0 0 0 0 0 0 0 0 0
36508- 0 0 0 0 0 0 0 0 0 0 0 0
36509- 0 0 0 0 0 0 0 0 0 0 0 0
36510- 0 0 0 0 0 0 0 0 0 0 0 0
36511- 0 0 0 0 0 0 0 0 0 0 0 0
36512- 0 0 0 0 0 0 0 0 0 0 0 0
36513- 0 0 1 0 0 0 0 0 0 0 0 0
36514- 0 0 0 0 0 0 0 0 0 0 0 0
36515- 0 0 0 0 0 0 0 0 0 0 0 0
36516- 0 0 0 0 0 0 0 0 0 0 0 0
36517- 0 0 0 0 0 0 0 0 0 0 0 0
36518- 0 0 0 0 0 0 0 0 0 0 0 0
36519- 0 0 0 0 0 0 0 0 0 10 10 10
36520- 22 22 22 42 42 42 66 66 66 86 86 86
36521- 66 66 66 38 38 38 38 38 38 22 22 22
36522- 26 26 26 34 34 34 54 54 54 66 66 66
36523- 86 86 86 70 70 70 46 46 46 26 26 26
36524- 14 14 14 6 6 6 0 0 0 0 0 0
36525- 0 0 0 0 0 0 0 0 0 0 0 0
36526- 0 0 0 0 0 0 0 0 0 0 0 0
36527- 0 0 0 0 0 0 0 0 0 0 0 0
36528- 0 0 0 0 0 0 0 0 0 0 0 0
36529- 0 0 0 0 0 0 0 0 0 0 0 0
36530- 0 0 0 0 0 0 0 0 0 0 0 0
36531- 0 0 0 0 0 0 0 0 0 0 0 0
36532- 0 0 0 0 0 0 0 0 0 0 0 0
36533- 0 0 1 0 0 1 0 0 1 0 0 0
36534- 0 0 0 0 0 0 0 0 0 0 0 0
36535- 0 0 0 0 0 0 0 0 0 0 0 0
36536- 0 0 0 0 0 0 0 0 0 0 0 0
36537- 0 0 0 0 0 0 0 0 0 0 0 0
36538- 0 0 0 0 0 0 0 0 0 0 0 0
36539- 0 0 0 0 0 0 10 10 10 26 26 26
36540- 50 50 50 82 82 82 58 58 58 6 6 6
36541- 2 2 6 2 2 6 2 2 6 2 2 6
36542- 2 2 6 2 2 6 2 2 6 2 2 6
36543- 6 6 6 54 54 54 86 86 86 66 66 66
36544- 38 38 38 18 18 18 6 6 6 0 0 0
36545- 0 0 0 0 0 0 0 0 0 0 0 0
36546- 0 0 0 0 0 0 0 0 0 0 0 0
36547- 0 0 0 0 0 0 0 0 0 0 0 0
36548- 0 0 0 0 0 0 0 0 0 0 0 0
36549- 0 0 0 0 0 0 0 0 0 0 0 0
36550- 0 0 0 0 0 0 0 0 0 0 0 0
36551- 0 0 0 0 0 0 0 0 0 0 0 0
36552- 0 0 0 0 0 0 0 0 0 0 0 0
36553- 0 0 0 0 0 0 0 0 0 0 0 0
36554- 0 0 0 0 0 0 0 0 0 0 0 0
36555- 0 0 0 0 0 0 0 0 0 0 0 0
36556- 0 0 0 0 0 0 0 0 0 0 0 0
36557- 0 0 0 0 0 0 0 0 0 0 0 0
36558- 0 0 0 0 0 0 0 0 0 0 0 0
36559- 0 0 0 6 6 6 22 22 22 50 50 50
36560- 78 78 78 34 34 34 2 2 6 2 2 6
36561- 2 2 6 2 2 6 2 2 6 2 2 6
36562- 2 2 6 2 2 6 2 2 6 2 2 6
36563- 2 2 6 2 2 6 6 6 6 70 70 70
36564- 78 78 78 46 46 46 22 22 22 6 6 6
36565- 0 0 0 0 0 0 0 0 0 0 0 0
36566- 0 0 0 0 0 0 0 0 0 0 0 0
36567- 0 0 0 0 0 0 0 0 0 0 0 0
36568- 0 0 0 0 0 0 0 0 0 0 0 0
36569- 0 0 0 0 0 0 0 0 0 0 0 0
36570- 0 0 0 0 0 0 0 0 0 0 0 0
36571- 0 0 0 0 0 0 0 0 0 0 0 0
36572- 0 0 0 0 0 0 0 0 0 0 0 0
36573- 0 0 1 0 0 1 0 0 1 0 0 0
36574- 0 0 0 0 0 0 0 0 0 0 0 0
36575- 0 0 0 0 0 0 0 0 0 0 0 0
36576- 0 0 0 0 0 0 0 0 0 0 0 0
36577- 0 0 0 0 0 0 0 0 0 0 0 0
36578- 0 0 0 0 0 0 0 0 0 0 0 0
36579- 6 6 6 18 18 18 42 42 42 82 82 82
36580- 26 26 26 2 2 6 2 2 6 2 2 6
36581- 2 2 6 2 2 6 2 2 6 2 2 6
36582- 2 2 6 2 2 6 2 2 6 14 14 14
36583- 46 46 46 34 34 34 6 6 6 2 2 6
36584- 42 42 42 78 78 78 42 42 42 18 18 18
36585- 6 6 6 0 0 0 0 0 0 0 0 0
36586- 0 0 0 0 0 0 0 0 0 0 0 0
36587- 0 0 0 0 0 0 0 0 0 0 0 0
36588- 0 0 0 0 0 0 0 0 0 0 0 0
36589- 0 0 0 0 0 0 0 0 0 0 0 0
36590- 0 0 0 0 0 0 0 0 0 0 0 0
36591- 0 0 0 0 0 0 0 0 0 0 0 0
36592- 0 0 0 0 0 0 0 0 0 0 0 0
36593- 0 0 1 0 0 0 0 0 1 0 0 0
36594- 0 0 0 0 0 0 0 0 0 0 0 0
36595- 0 0 0 0 0 0 0 0 0 0 0 0
36596- 0 0 0 0 0 0 0 0 0 0 0 0
36597- 0 0 0 0 0 0 0 0 0 0 0 0
36598- 0 0 0 0 0 0 0 0 0 0 0 0
36599- 10 10 10 30 30 30 66 66 66 58 58 58
36600- 2 2 6 2 2 6 2 2 6 2 2 6
36601- 2 2 6 2 2 6 2 2 6 2 2 6
36602- 2 2 6 2 2 6 2 2 6 26 26 26
36603- 86 86 86 101 101 101 46 46 46 10 10 10
36604- 2 2 6 58 58 58 70 70 70 34 34 34
36605- 10 10 10 0 0 0 0 0 0 0 0 0
36606- 0 0 0 0 0 0 0 0 0 0 0 0
36607- 0 0 0 0 0 0 0 0 0 0 0 0
36608- 0 0 0 0 0 0 0 0 0 0 0 0
36609- 0 0 0 0 0 0 0 0 0 0 0 0
36610- 0 0 0 0 0 0 0 0 0 0 0 0
36611- 0 0 0 0 0 0 0 0 0 0 0 0
36612- 0 0 0 0 0 0 0 0 0 0 0 0
36613- 0 0 1 0 0 1 0 0 1 0 0 0
36614- 0 0 0 0 0 0 0 0 0 0 0 0
36615- 0 0 0 0 0 0 0 0 0 0 0 0
36616- 0 0 0 0 0 0 0 0 0 0 0 0
36617- 0 0 0 0 0 0 0 0 0 0 0 0
36618- 0 0 0 0 0 0 0 0 0 0 0 0
36619- 14 14 14 42 42 42 86 86 86 10 10 10
36620- 2 2 6 2 2 6 2 2 6 2 2 6
36621- 2 2 6 2 2 6 2 2 6 2 2 6
36622- 2 2 6 2 2 6 2 2 6 30 30 30
36623- 94 94 94 94 94 94 58 58 58 26 26 26
36624- 2 2 6 6 6 6 78 78 78 54 54 54
36625- 22 22 22 6 6 6 0 0 0 0 0 0
36626- 0 0 0 0 0 0 0 0 0 0 0 0
36627- 0 0 0 0 0 0 0 0 0 0 0 0
36628- 0 0 0 0 0 0 0 0 0 0 0 0
36629- 0 0 0 0 0 0 0 0 0 0 0 0
36630- 0 0 0 0 0 0 0 0 0 0 0 0
36631- 0 0 0 0 0 0 0 0 0 0 0 0
36632- 0 0 0 0 0 0 0 0 0 0 0 0
36633- 0 0 0 0 0 0 0 0 0 0 0 0
36634- 0 0 0 0 0 0 0 0 0 0 0 0
36635- 0 0 0 0 0 0 0 0 0 0 0 0
36636- 0 0 0 0 0 0 0 0 0 0 0 0
36637- 0 0 0 0 0 0 0 0 0 0 0 0
36638- 0 0 0 0 0 0 0 0 0 6 6 6
36639- 22 22 22 62 62 62 62 62 62 2 2 6
36640- 2 2 6 2 2 6 2 2 6 2 2 6
36641- 2 2 6 2 2 6 2 2 6 2 2 6
36642- 2 2 6 2 2 6 2 2 6 26 26 26
36643- 54 54 54 38 38 38 18 18 18 10 10 10
36644- 2 2 6 2 2 6 34 34 34 82 82 82
36645- 38 38 38 14 14 14 0 0 0 0 0 0
36646- 0 0 0 0 0 0 0 0 0 0 0 0
36647- 0 0 0 0 0 0 0 0 0 0 0 0
36648- 0 0 0 0 0 0 0 0 0 0 0 0
36649- 0 0 0 0 0 0 0 0 0 0 0 0
36650- 0 0 0 0 0 0 0 0 0 0 0 0
36651- 0 0 0 0 0 0 0 0 0 0 0 0
36652- 0 0 0 0 0 0 0 0 0 0 0 0
36653- 0 0 0 0 0 1 0 0 1 0 0 0
36654- 0 0 0 0 0 0 0 0 0 0 0 0
36655- 0 0 0 0 0 0 0 0 0 0 0 0
36656- 0 0 0 0 0 0 0 0 0 0 0 0
36657- 0 0 0 0 0 0 0 0 0 0 0 0
36658- 0 0 0 0 0 0 0 0 0 6 6 6
36659- 30 30 30 78 78 78 30 30 30 2 2 6
36660- 2 2 6 2 2 6 2 2 6 2 2 6
36661- 2 2 6 2 2 6 2 2 6 2 2 6
36662- 2 2 6 2 2 6 2 2 6 10 10 10
36663- 10 10 10 2 2 6 2 2 6 2 2 6
36664- 2 2 6 2 2 6 2 2 6 78 78 78
36665- 50 50 50 18 18 18 6 6 6 0 0 0
36666- 0 0 0 0 0 0 0 0 0 0 0 0
36667- 0 0 0 0 0 0 0 0 0 0 0 0
36668- 0 0 0 0 0 0 0 0 0 0 0 0
36669- 0 0 0 0 0 0 0 0 0 0 0 0
36670- 0 0 0 0 0 0 0 0 0 0 0 0
36671- 0 0 0 0 0 0 0 0 0 0 0 0
36672- 0 0 0 0 0 0 0 0 0 0 0 0
36673- 0 0 1 0 0 0 0 0 0 0 0 0
36674- 0 0 0 0 0 0 0 0 0 0 0 0
36675- 0 0 0 0 0 0 0 0 0 0 0 0
36676- 0 0 0 0 0 0 0 0 0 0 0 0
36677- 0 0 0 0 0 0 0 0 0 0 0 0
36678- 0 0 0 0 0 0 0 0 0 10 10 10
36679- 38 38 38 86 86 86 14 14 14 2 2 6
36680- 2 2 6 2 2 6 2 2 6 2 2 6
36681- 2 2 6 2 2 6 2 2 6 2 2 6
36682- 2 2 6 2 2 6 2 2 6 2 2 6
36683- 2 2 6 2 2 6 2 2 6 2 2 6
36684- 2 2 6 2 2 6 2 2 6 54 54 54
36685- 66 66 66 26 26 26 6 6 6 0 0 0
36686- 0 0 0 0 0 0 0 0 0 0 0 0
36687- 0 0 0 0 0 0 0 0 0 0 0 0
36688- 0 0 0 0 0 0 0 0 0 0 0 0
36689- 0 0 0 0 0 0 0 0 0 0 0 0
36690- 0 0 0 0 0 0 0 0 0 0 0 0
36691- 0 0 0 0 0 0 0 0 0 0 0 0
36692- 0 0 0 0 0 0 0 0 0 0 0 0
36693- 0 0 0 0 0 1 0 0 1 0 0 0
36694- 0 0 0 0 0 0 0 0 0 0 0 0
36695- 0 0 0 0 0 0 0 0 0 0 0 0
36696- 0 0 0 0 0 0 0 0 0 0 0 0
36697- 0 0 0 0 0 0 0 0 0 0 0 0
36698- 0 0 0 0 0 0 0 0 0 14 14 14
36699- 42 42 42 82 82 82 2 2 6 2 2 6
36700- 2 2 6 6 6 6 10 10 10 2 2 6
36701- 2 2 6 2 2 6 2 2 6 2 2 6
36702- 2 2 6 2 2 6 2 2 6 6 6 6
36703- 14 14 14 10 10 10 2 2 6 2 2 6
36704- 2 2 6 2 2 6 2 2 6 18 18 18
36705- 82 82 82 34 34 34 10 10 10 0 0 0
36706- 0 0 0 0 0 0 0 0 0 0 0 0
36707- 0 0 0 0 0 0 0 0 0 0 0 0
36708- 0 0 0 0 0 0 0 0 0 0 0 0
36709- 0 0 0 0 0 0 0 0 0 0 0 0
36710- 0 0 0 0 0 0 0 0 0 0 0 0
36711- 0 0 0 0 0 0 0 0 0 0 0 0
36712- 0 0 0 0 0 0 0 0 0 0 0 0
36713- 0 0 1 0 0 0 0 0 0 0 0 0
36714- 0 0 0 0 0 0 0 0 0 0 0 0
36715- 0 0 0 0 0 0 0 0 0 0 0 0
36716- 0 0 0 0 0 0 0 0 0 0 0 0
36717- 0 0 0 0 0 0 0 0 0 0 0 0
36718- 0 0 0 0 0 0 0 0 0 14 14 14
36719- 46 46 46 86 86 86 2 2 6 2 2 6
36720- 6 6 6 6 6 6 22 22 22 34 34 34
36721- 6 6 6 2 2 6 2 2 6 2 2 6
36722- 2 2 6 2 2 6 18 18 18 34 34 34
36723- 10 10 10 50 50 50 22 22 22 2 2 6
36724- 2 2 6 2 2 6 2 2 6 10 10 10
36725- 86 86 86 42 42 42 14 14 14 0 0 0
36726- 0 0 0 0 0 0 0 0 0 0 0 0
36727- 0 0 0 0 0 0 0 0 0 0 0 0
36728- 0 0 0 0 0 0 0 0 0 0 0 0
36729- 0 0 0 0 0 0 0 0 0 0 0 0
36730- 0 0 0 0 0 0 0 0 0 0 0 0
36731- 0 0 0 0 0 0 0 0 0 0 0 0
36732- 0 0 0 0 0 0 0 0 0 0 0 0
36733- 0 0 1 0 0 1 0 0 1 0 0 0
36734- 0 0 0 0 0 0 0 0 0 0 0 0
36735- 0 0 0 0 0 0 0 0 0 0 0 0
36736- 0 0 0 0 0 0 0 0 0 0 0 0
36737- 0 0 0 0 0 0 0 0 0 0 0 0
36738- 0 0 0 0 0 0 0 0 0 14 14 14
36739- 46 46 46 86 86 86 2 2 6 2 2 6
36740- 38 38 38 116 116 116 94 94 94 22 22 22
36741- 22 22 22 2 2 6 2 2 6 2 2 6
36742- 14 14 14 86 86 86 138 138 138 162 162 162
36743-154 154 154 38 38 38 26 26 26 6 6 6
36744- 2 2 6 2 2 6 2 2 6 2 2 6
36745- 86 86 86 46 46 46 14 14 14 0 0 0
36746- 0 0 0 0 0 0 0 0 0 0 0 0
36747- 0 0 0 0 0 0 0 0 0 0 0 0
36748- 0 0 0 0 0 0 0 0 0 0 0 0
36749- 0 0 0 0 0 0 0 0 0 0 0 0
36750- 0 0 0 0 0 0 0 0 0 0 0 0
36751- 0 0 0 0 0 0 0 0 0 0 0 0
36752- 0 0 0 0 0 0 0 0 0 0 0 0
36753- 0 0 0 0 0 0 0 0 0 0 0 0
36754- 0 0 0 0 0 0 0 0 0 0 0 0
36755- 0 0 0 0 0 0 0 0 0 0 0 0
36756- 0 0 0 0 0 0 0 0 0 0 0 0
36757- 0 0 0 0 0 0 0 0 0 0 0 0
36758- 0 0 0 0 0 0 0 0 0 14 14 14
36759- 46 46 46 86 86 86 2 2 6 14 14 14
36760-134 134 134 198 198 198 195 195 195 116 116 116
36761- 10 10 10 2 2 6 2 2 6 6 6 6
36762-101 98 89 187 187 187 210 210 210 218 218 218
36763-214 214 214 134 134 134 14 14 14 6 6 6
36764- 2 2 6 2 2 6 2 2 6 2 2 6
36765- 86 86 86 50 50 50 18 18 18 6 6 6
36766- 0 0 0 0 0 0 0 0 0 0 0 0
36767- 0 0 0 0 0 0 0 0 0 0 0 0
36768- 0 0 0 0 0 0 0 0 0 0 0 0
36769- 0 0 0 0 0 0 0 0 0 0 0 0
36770- 0 0 0 0 0 0 0 0 0 0 0 0
36771- 0 0 0 0 0 0 0 0 0 0 0 0
36772- 0 0 0 0 0 0 0 0 1 0 0 0
36773- 0 0 1 0 0 1 0 0 1 0 0 0
36774- 0 0 0 0 0 0 0 0 0 0 0 0
36775- 0 0 0 0 0 0 0 0 0 0 0 0
36776- 0 0 0 0 0 0 0 0 0 0 0 0
36777- 0 0 0 0 0 0 0 0 0 0 0 0
36778- 0 0 0 0 0 0 0 0 0 14 14 14
36779- 46 46 46 86 86 86 2 2 6 54 54 54
36780-218 218 218 195 195 195 226 226 226 246 246 246
36781- 58 58 58 2 2 6 2 2 6 30 30 30
36782-210 210 210 253 253 253 174 174 174 123 123 123
36783-221 221 221 234 234 234 74 74 74 2 2 6
36784- 2 2 6 2 2 6 2 2 6 2 2 6
36785- 70 70 70 58 58 58 22 22 22 6 6 6
36786- 0 0 0 0 0 0 0 0 0 0 0 0
36787- 0 0 0 0 0 0 0 0 0 0 0 0
36788- 0 0 0 0 0 0 0 0 0 0 0 0
36789- 0 0 0 0 0 0 0 0 0 0 0 0
36790- 0 0 0 0 0 0 0 0 0 0 0 0
36791- 0 0 0 0 0 0 0 0 0 0 0 0
36792- 0 0 0 0 0 0 0 0 0 0 0 0
36793- 0 0 0 0 0 0 0 0 0 0 0 0
36794- 0 0 0 0 0 0 0 0 0 0 0 0
36795- 0 0 0 0 0 0 0 0 0 0 0 0
36796- 0 0 0 0 0 0 0 0 0 0 0 0
36797- 0 0 0 0 0 0 0 0 0 0 0 0
36798- 0 0 0 0 0 0 0 0 0 14 14 14
36799- 46 46 46 82 82 82 2 2 6 106 106 106
36800-170 170 170 26 26 26 86 86 86 226 226 226
36801-123 123 123 10 10 10 14 14 14 46 46 46
36802-231 231 231 190 190 190 6 6 6 70 70 70
36803- 90 90 90 238 238 238 158 158 158 2 2 6
36804- 2 2 6 2 2 6 2 2 6 2 2 6
36805- 70 70 70 58 58 58 22 22 22 6 6 6
36806- 0 0 0 0 0 0 0 0 0 0 0 0
36807- 0 0 0 0 0 0 0 0 0 0 0 0
36808- 0 0 0 0 0 0 0 0 0 0 0 0
36809- 0 0 0 0 0 0 0 0 0 0 0 0
36810- 0 0 0 0 0 0 0 0 0 0 0 0
36811- 0 0 0 0 0 0 0 0 0 0 0 0
36812- 0 0 0 0 0 0 0 0 1 0 0 0
36813- 0 0 1 0 0 1 0 0 1 0 0 0
36814- 0 0 0 0 0 0 0 0 0 0 0 0
36815- 0 0 0 0 0 0 0 0 0 0 0 0
36816- 0 0 0 0 0 0 0 0 0 0 0 0
36817- 0 0 0 0 0 0 0 0 0 0 0 0
36818- 0 0 0 0 0 0 0 0 0 14 14 14
36819- 42 42 42 86 86 86 6 6 6 116 116 116
36820-106 106 106 6 6 6 70 70 70 149 149 149
36821-128 128 128 18 18 18 38 38 38 54 54 54
36822-221 221 221 106 106 106 2 2 6 14 14 14
36823- 46 46 46 190 190 190 198 198 198 2 2 6
36824- 2 2 6 2 2 6 2 2 6 2 2 6
36825- 74 74 74 62 62 62 22 22 22 6 6 6
36826- 0 0 0 0 0 0 0 0 0 0 0 0
36827- 0 0 0 0 0 0 0 0 0 0 0 0
36828- 0 0 0 0 0 0 0 0 0 0 0 0
36829- 0 0 0 0 0 0 0 0 0 0 0 0
36830- 0 0 0 0 0 0 0 0 0 0 0 0
36831- 0 0 0 0 0 0 0 0 0 0 0 0
36832- 0 0 0 0 0 0 0 0 1 0 0 0
36833- 0 0 1 0 0 0 0 0 1 0 0 0
36834- 0 0 0 0 0 0 0 0 0 0 0 0
36835- 0 0 0 0 0 0 0 0 0 0 0 0
36836- 0 0 0 0 0 0 0 0 0 0 0 0
36837- 0 0 0 0 0 0 0 0 0 0 0 0
36838- 0 0 0 0 0 0 0 0 0 14 14 14
36839- 42 42 42 94 94 94 14 14 14 101 101 101
36840-128 128 128 2 2 6 18 18 18 116 116 116
36841-118 98 46 121 92 8 121 92 8 98 78 10
36842-162 162 162 106 106 106 2 2 6 2 2 6
36843- 2 2 6 195 195 195 195 195 195 6 6 6
36844- 2 2 6 2 2 6 2 2 6 2 2 6
36845- 74 74 74 62 62 62 22 22 22 6 6 6
36846- 0 0 0 0 0 0 0 0 0 0 0 0
36847- 0 0 0 0 0 0 0 0 0 0 0 0
36848- 0 0 0 0 0 0 0 0 0 0 0 0
36849- 0 0 0 0 0 0 0 0 0 0 0 0
36850- 0 0 0 0 0 0 0 0 0 0 0 0
36851- 0 0 0 0 0 0 0 0 0 0 0 0
36852- 0 0 0 0 0 0 0 0 1 0 0 1
36853- 0 0 1 0 0 0 0 0 1 0 0 0
36854- 0 0 0 0 0 0 0 0 0 0 0 0
36855- 0 0 0 0 0 0 0 0 0 0 0 0
36856- 0 0 0 0 0 0 0 0 0 0 0 0
36857- 0 0 0 0 0 0 0 0 0 0 0 0
36858- 0 0 0 0 0 0 0 0 0 10 10 10
36859- 38 38 38 90 90 90 14 14 14 58 58 58
36860-210 210 210 26 26 26 54 38 6 154 114 10
36861-226 170 11 236 186 11 225 175 15 184 144 12
36862-215 174 15 175 146 61 37 26 9 2 2 6
36863- 70 70 70 246 246 246 138 138 138 2 2 6
36864- 2 2 6 2 2 6 2 2 6 2 2 6
36865- 70 70 70 66 66 66 26 26 26 6 6 6
36866- 0 0 0 0 0 0 0 0 0 0 0 0
36867- 0 0 0 0 0 0 0 0 0 0 0 0
36868- 0 0 0 0 0 0 0 0 0 0 0 0
36869- 0 0 0 0 0 0 0 0 0 0 0 0
36870- 0 0 0 0 0 0 0 0 0 0 0 0
36871- 0 0 0 0 0 0 0 0 0 0 0 0
36872- 0 0 0 0 0 0 0 0 0 0 0 0
36873- 0 0 0 0 0 0 0 0 0 0 0 0
36874- 0 0 0 0 0 0 0 0 0 0 0 0
36875- 0 0 0 0 0 0 0 0 0 0 0 0
36876- 0 0 0 0 0 0 0 0 0 0 0 0
36877- 0 0 0 0 0 0 0 0 0 0 0 0
36878- 0 0 0 0 0 0 0 0 0 10 10 10
36879- 38 38 38 86 86 86 14 14 14 10 10 10
36880-195 195 195 188 164 115 192 133 9 225 175 15
36881-239 182 13 234 190 10 232 195 16 232 200 30
36882-245 207 45 241 208 19 232 195 16 184 144 12
36883-218 194 134 211 206 186 42 42 42 2 2 6
36884- 2 2 6 2 2 6 2 2 6 2 2 6
36885- 50 50 50 74 74 74 30 30 30 6 6 6
36886- 0 0 0 0 0 0 0 0 0 0 0 0
36887- 0 0 0 0 0 0 0 0 0 0 0 0
36888- 0 0 0 0 0 0 0 0 0 0 0 0
36889- 0 0 0 0 0 0 0 0 0 0 0 0
36890- 0 0 0 0 0 0 0 0 0 0 0 0
36891- 0 0 0 0 0 0 0 0 0 0 0 0
36892- 0 0 0 0 0 0 0 0 0 0 0 0
36893- 0 0 0 0 0 0 0 0 0 0 0 0
36894- 0 0 0 0 0 0 0 0 0 0 0 0
36895- 0 0 0 0 0 0 0 0 0 0 0 0
36896- 0 0 0 0 0 0 0 0 0 0 0 0
36897- 0 0 0 0 0 0 0 0 0 0 0 0
36898- 0 0 0 0 0 0 0 0 0 10 10 10
36899- 34 34 34 86 86 86 14 14 14 2 2 6
36900-121 87 25 192 133 9 219 162 10 239 182 13
36901-236 186 11 232 195 16 241 208 19 244 214 54
36902-246 218 60 246 218 38 246 215 20 241 208 19
36903-241 208 19 226 184 13 121 87 25 2 2 6
36904- 2 2 6 2 2 6 2 2 6 2 2 6
36905- 50 50 50 82 82 82 34 34 34 10 10 10
36906- 0 0 0 0 0 0 0 0 0 0 0 0
36907- 0 0 0 0 0 0 0 0 0 0 0 0
36908- 0 0 0 0 0 0 0 0 0 0 0 0
36909- 0 0 0 0 0 0 0 0 0 0 0 0
36910- 0 0 0 0 0 0 0 0 0 0 0 0
36911- 0 0 0 0 0 0 0 0 0 0 0 0
36912- 0 0 0 0 0 0 0 0 0 0 0 0
36913- 0 0 0 0 0 0 0 0 0 0 0 0
36914- 0 0 0 0 0 0 0 0 0 0 0 0
36915- 0 0 0 0 0 0 0 0 0 0 0 0
36916- 0 0 0 0 0 0 0 0 0 0 0 0
36917- 0 0 0 0 0 0 0 0 0 0 0 0
36918- 0 0 0 0 0 0 0 0 0 10 10 10
36919- 34 34 34 82 82 82 30 30 30 61 42 6
36920-180 123 7 206 145 10 230 174 11 239 182 13
36921-234 190 10 238 202 15 241 208 19 246 218 74
36922-246 218 38 246 215 20 246 215 20 246 215 20
36923-226 184 13 215 174 15 184 144 12 6 6 6
36924- 2 2 6 2 2 6 2 2 6 2 2 6
36925- 26 26 26 94 94 94 42 42 42 14 14 14
36926- 0 0 0 0 0 0 0 0 0 0 0 0
36927- 0 0 0 0 0 0 0 0 0 0 0 0
36928- 0 0 0 0 0 0 0 0 0 0 0 0
36929- 0 0 0 0 0 0 0 0 0 0 0 0
36930- 0 0 0 0 0 0 0 0 0 0 0 0
36931- 0 0 0 0 0 0 0 0 0 0 0 0
36932- 0 0 0 0 0 0 0 0 0 0 0 0
36933- 0 0 0 0 0 0 0 0 0 0 0 0
36934- 0 0 0 0 0 0 0 0 0 0 0 0
36935- 0 0 0 0 0 0 0 0 0 0 0 0
36936- 0 0 0 0 0 0 0 0 0 0 0 0
36937- 0 0 0 0 0 0 0 0 0 0 0 0
36938- 0 0 0 0 0 0 0 0 0 10 10 10
36939- 30 30 30 78 78 78 50 50 50 104 69 6
36940-192 133 9 216 158 10 236 178 12 236 186 11
36941-232 195 16 241 208 19 244 214 54 245 215 43
36942-246 215 20 246 215 20 241 208 19 198 155 10
36943-200 144 11 216 158 10 156 118 10 2 2 6
36944- 2 2 6 2 2 6 2 2 6 2 2 6
36945- 6 6 6 90 90 90 54 54 54 18 18 18
36946- 6 6 6 0 0 0 0 0 0 0 0 0
36947- 0 0 0 0 0 0 0 0 0 0 0 0
36948- 0 0 0 0 0 0 0 0 0 0 0 0
36949- 0 0 0 0 0 0 0 0 0 0 0 0
36950- 0 0 0 0 0 0 0 0 0 0 0 0
36951- 0 0 0 0 0 0 0 0 0 0 0 0
36952- 0 0 0 0 0 0 0 0 0 0 0 0
36953- 0 0 0 0 0 0 0 0 0 0 0 0
36954- 0 0 0 0 0 0 0 0 0 0 0 0
36955- 0 0 0 0 0 0 0 0 0 0 0 0
36956- 0 0 0 0 0 0 0 0 0 0 0 0
36957- 0 0 0 0 0 0 0 0 0 0 0 0
36958- 0 0 0 0 0 0 0 0 0 10 10 10
36959- 30 30 30 78 78 78 46 46 46 22 22 22
36960-137 92 6 210 162 10 239 182 13 238 190 10
36961-238 202 15 241 208 19 246 215 20 246 215 20
36962-241 208 19 203 166 17 185 133 11 210 150 10
36963-216 158 10 210 150 10 102 78 10 2 2 6
36964- 6 6 6 54 54 54 14 14 14 2 2 6
36965- 2 2 6 62 62 62 74 74 74 30 30 30
36966- 10 10 10 0 0 0 0 0 0 0 0 0
36967- 0 0 0 0 0 0 0 0 0 0 0 0
36968- 0 0 0 0 0 0 0 0 0 0 0 0
36969- 0 0 0 0 0 0 0 0 0 0 0 0
36970- 0 0 0 0 0 0 0 0 0 0 0 0
36971- 0 0 0 0 0 0 0 0 0 0 0 0
36972- 0 0 0 0 0 0 0 0 0 0 0 0
36973- 0 0 0 0 0 0 0 0 0 0 0 0
36974- 0 0 0 0 0 0 0 0 0 0 0 0
36975- 0 0 0 0 0 0 0 0 0 0 0 0
36976- 0 0 0 0 0 0 0 0 0 0 0 0
36977- 0 0 0 0 0 0 0 0 0 0 0 0
36978- 0 0 0 0 0 0 0 0 0 10 10 10
36979- 34 34 34 78 78 78 50 50 50 6 6 6
36980- 94 70 30 139 102 15 190 146 13 226 184 13
36981-232 200 30 232 195 16 215 174 15 190 146 13
36982-168 122 10 192 133 9 210 150 10 213 154 11
36983-202 150 34 182 157 106 101 98 89 2 2 6
36984- 2 2 6 78 78 78 116 116 116 58 58 58
36985- 2 2 6 22 22 22 90 90 90 46 46 46
36986- 18 18 18 6 6 6 0 0 0 0 0 0
36987- 0 0 0 0 0 0 0 0 0 0 0 0
36988- 0 0 0 0 0 0 0 0 0 0 0 0
36989- 0 0 0 0 0 0 0 0 0 0 0 0
36990- 0 0 0 0 0 0 0 0 0 0 0 0
36991- 0 0 0 0 0 0 0 0 0 0 0 0
36992- 0 0 0 0 0 0 0 0 0 0 0 0
36993- 0 0 0 0 0 0 0 0 0 0 0 0
36994- 0 0 0 0 0 0 0 0 0 0 0 0
36995- 0 0 0 0 0 0 0 0 0 0 0 0
36996- 0 0 0 0 0 0 0 0 0 0 0 0
36997- 0 0 0 0 0 0 0 0 0 0 0 0
36998- 0 0 0 0 0 0 0 0 0 10 10 10
36999- 38 38 38 86 86 86 50 50 50 6 6 6
37000-128 128 128 174 154 114 156 107 11 168 122 10
37001-198 155 10 184 144 12 197 138 11 200 144 11
37002-206 145 10 206 145 10 197 138 11 188 164 115
37003-195 195 195 198 198 198 174 174 174 14 14 14
37004- 2 2 6 22 22 22 116 116 116 116 116 116
37005- 22 22 22 2 2 6 74 74 74 70 70 70
37006- 30 30 30 10 10 10 0 0 0 0 0 0
37007- 0 0 0 0 0 0 0 0 0 0 0 0
37008- 0 0 0 0 0 0 0 0 0 0 0 0
37009- 0 0 0 0 0 0 0 0 0 0 0 0
37010- 0 0 0 0 0 0 0 0 0 0 0 0
37011- 0 0 0 0 0 0 0 0 0 0 0 0
37012- 0 0 0 0 0 0 0 0 0 0 0 0
37013- 0 0 0 0 0 0 0 0 0 0 0 0
37014- 0 0 0 0 0 0 0 0 0 0 0 0
37015- 0 0 0 0 0 0 0 0 0 0 0 0
37016- 0 0 0 0 0 0 0 0 0 0 0 0
37017- 0 0 0 0 0 0 0 0 0 0 0 0
37018- 0 0 0 0 0 0 6 6 6 18 18 18
37019- 50 50 50 101 101 101 26 26 26 10 10 10
37020-138 138 138 190 190 190 174 154 114 156 107 11
37021-197 138 11 200 144 11 197 138 11 192 133 9
37022-180 123 7 190 142 34 190 178 144 187 187 187
37023-202 202 202 221 221 221 214 214 214 66 66 66
37024- 2 2 6 2 2 6 50 50 50 62 62 62
37025- 6 6 6 2 2 6 10 10 10 90 90 90
37026- 50 50 50 18 18 18 6 6 6 0 0 0
37027- 0 0 0 0 0 0 0 0 0 0 0 0
37028- 0 0 0 0 0 0 0 0 0 0 0 0
37029- 0 0 0 0 0 0 0 0 0 0 0 0
37030- 0 0 0 0 0 0 0 0 0 0 0 0
37031- 0 0 0 0 0 0 0 0 0 0 0 0
37032- 0 0 0 0 0 0 0 0 0 0 0 0
37033- 0 0 0 0 0 0 0 0 0 0 0 0
37034- 0 0 0 0 0 0 0 0 0 0 0 0
37035- 0 0 0 0 0 0 0 0 0 0 0 0
37036- 0 0 0 0 0 0 0 0 0 0 0 0
37037- 0 0 0 0 0 0 0 0 0 0 0 0
37038- 0 0 0 0 0 0 10 10 10 34 34 34
37039- 74 74 74 74 74 74 2 2 6 6 6 6
37040-144 144 144 198 198 198 190 190 190 178 166 146
37041-154 121 60 156 107 11 156 107 11 168 124 44
37042-174 154 114 187 187 187 190 190 190 210 210 210
37043-246 246 246 253 253 253 253 253 253 182 182 182
37044- 6 6 6 2 2 6 2 2 6 2 2 6
37045- 2 2 6 2 2 6 2 2 6 62 62 62
37046- 74 74 74 34 34 34 14 14 14 0 0 0
37047- 0 0 0 0 0 0 0 0 0 0 0 0
37048- 0 0 0 0 0 0 0 0 0 0 0 0
37049- 0 0 0 0 0 0 0 0 0 0 0 0
37050- 0 0 0 0 0 0 0 0 0 0 0 0
37051- 0 0 0 0 0 0 0 0 0 0 0 0
37052- 0 0 0 0 0 0 0 0 0 0 0 0
37053- 0 0 0 0 0 0 0 0 0 0 0 0
37054- 0 0 0 0 0 0 0 0 0 0 0 0
37055- 0 0 0 0 0 0 0 0 0 0 0 0
37056- 0 0 0 0 0 0 0 0 0 0 0 0
37057- 0 0 0 0 0 0 0 0 0 0 0 0
37058- 0 0 0 10 10 10 22 22 22 54 54 54
37059- 94 94 94 18 18 18 2 2 6 46 46 46
37060-234 234 234 221 221 221 190 190 190 190 190 190
37061-190 190 190 187 187 187 187 187 187 190 190 190
37062-190 190 190 195 195 195 214 214 214 242 242 242
37063-253 253 253 253 253 253 253 253 253 253 253 253
37064- 82 82 82 2 2 6 2 2 6 2 2 6
37065- 2 2 6 2 2 6 2 2 6 14 14 14
37066- 86 86 86 54 54 54 22 22 22 6 6 6
37067- 0 0 0 0 0 0 0 0 0 0 0 0
37068- 0 0 0 0 0 0 0 0 0 0 0 0
37069- 0 0 0 0 0 0 0 0 0 0 0 0
37070- 0 0 0 0 0 0 0 0 0 0 0 0
37071- 0 0 0 0 0 0 0 0 0 0 0 0
37072- 0 0 0 0 0 0 0 0 0 0 0 0
37073- 0 0 0 0 0 0 0 0 0 0 0 0
37074- 0 0 0 0 0 0 0 0 0 0 0 0
37075- 0 0 0 0 0 0 0 0 0 0 0 0
37076- 0 0 0 0 0 0 0 0 0 0 0 0
37077- 0 0 0 0 0 0 0 0 0 0 0 0
37078- 6 6 6 18 18 18 46 46 46 90 90 90
37079- 46 46 46 18 18 18 6 6 6 182 182 182
37080-253 253 253 246 246 246 206 206 206 190 190 190
37081-190 190 190 190 190 190 190 190 190 190 190 190
37082-206 206 206 231 231 231 250 250 250 253 253 253
37083-253 253 253 253 253 253 253 253 253 253 253 253
37084-202 202 202 14 14 14 2 2 6 2 2 6
37085- 2 2 6 2 2 6 2 2 6 2 2 6
37086- 42 42 42 86 86 86 42 42 42 18 18 18
37087- 6 6 6 0 0 0 0 0 0 0 0 0
37088- 0 0 0 0 0 0 0 0 0 0 0 0
37089- 0 0 0 0 0 0 0 0 0 0 0 0
37090- 0 0 0 0 0 0 0 0 0 0 0 0
37091- 0 0 0 0 0 0 0 0 0 0 0 0
37092- 0 0 0 0 0 0 0 0 0 0 0 0
37093- 0 0 0 0 0 0 0 0 0 0 0 0
37094- 0 0 0 0 0 0 0 0 0 0 0 0
37095- 0 0 0 0 0 0 0 0 0 0 0 0
37096- 0 0 0 0 0 0 0 0 0 0 0 0
37097- 0 0 0 0 0 0 0 0 0 6 6 6
37098- 14 14 14 38 38 38 74 74 74 66 66 66
37099- 2 2 6 6 6 6 90 90 90 250 250 250
37100-253 253 253 253 253 253 238 238 238 198 198 198
37101-190 190 190 190 190 190 195 195 195 221 221 221
37102-246 246 246 253 253 253 253 253 253 253 253 253
37103-253 253 253 253 253 253 253 253 253 253 253 253
37104-253 253 253 82 82 82 2 2 6 2 2 6
37105- 2 2 6 2 2 6 2 2 6 2 2 6
37106- 2 2 6 78 78 78 70 70 70 34 34 34
37107- 14 14 14 6 6 6 0 0 0 0 0 0
37108- 0 0 0 0 0 0 0 0 0 0 0 0
37109- 0 0 0 0 0 0 0 0 0 0 0 0
37110- 0 0 0 0 0 0 0 0 0 0 0 0
37111- 0 0 0 0 0 0 0 0 0 0 0 0
37112- 0 0 0 0 0 0 0 0 0 0 0 0
37113- 0 0 0 0 0 0 0 0 0 0 0 0
37114- 0 0 0 0 0 0 0 0 0 0 0 0
37115- 0 0 0 0 0 0 0 0 0 0 0 0
37116- 0 0 0 0 0 0 0 0 0 0 0 0
37117- 0 0 0 0 0 0 0 0 0 14 14 14
37118- 34 34 34 66 66 66 78 78 78 6 6 6
37119- 2 2 6 18 18 18 218 218 218 253 253 253
37120-253 253 253 253 253 253 253 253 253 246 246 246
37121-226 226 226 231 231 231 246 246 246 253 253 253
37122-253 253 253 253 253 253 253 253 253 253 253 253
37123-253 253 253 253 253 253 253 253 253 253 253 253
37124-253 253 253 178 178 178 2 2 6 2 2 6
37125- 2 2 6 2 2 6 2 2 6 2 2 6
37126- 2 2 6 18 18 18 90 90 90 62 62 62
37127- 30 30 30 10 10 10 0 0 0 0 0 0
37128- 0 0 0 0 0 0 0 0 0 0 0 0
37129- 0 0 0 0 0 0 0 0 0 0 0 0
37130- 0 0 0 0 0 0 0 0 0 0 0 0
37131- 0 0 0 0 0 0 0 0 0 0 0 0
37132- 0 0 0 0 0 0 0 0 0 0 0 0
37133- 0 0 0 0 0 0 0 0 0 0 0 0
37134- 0 0 0 0 0 0 0 0 0 0 0 0
37135- 0 0 0 0 0 0 0 0 0 0 0 0
37136- 0 0 0 0 0 0 0 0 0 0 0 0
37137- 0 0 0 0 0 0 10 10 10 26 26 26
37138- 58 58 58 90 90 90 18 18 18 2 2 6
37139- 2 2 6 110 110 110 253 253 253 253 253 253
37140-253 253 253 253 253 253 253 253 253 253 253 253
37141-250 250 250 253 253 253 253 253 253 253 253 253
37142-253 253 253 253 253 253 253 253 253 253 253 253
37143-253 253 253 253 253 253 253 253 253 253 253 253
37144-253 253 253 231 231 231 18 18 18 2 2 6
37145- 2 2 6 2 2 6 2 2 6 2 2 6
37146- 2 2 6 2 2 6 18 18 18 94 94 94
37147- 54 54 54 26 26 26 10 10 10 0 0 0
37148- 0 0 0 0 0 0 0 0 0 0 0 0
37149- 0 0 0 0 0 0 0 0 0 0 0 0
37150- 0 0 0 0 0 0 0 0 0 0 0 0
37151- 0 0 0 0 0 0 0 0 0 0 0 0
37152- 0 0 0 0 0 0 0 0 0 0 0 0
37153- 0 0 0 0 0 0 0 0 0 0 0 0
37154- 0 0 0 0 0 0 0 0 0 0 0 0
37155- 0 0 0 0 0 0 0 0 0 0 0 0
37156- 0 0 0 0 0 0 0 0 0 0 0 0
37157- 0 0 0 6 6 6 22 22 22 50 50 50
37158- 90 90 90 26 26 26 2 2 6 2 2 6
37159- 14 14 14 195 195 195 250 250 250 253 253 253
37160-253 253 253 253 253 253 253 253 253 253 253 253
37161-253 253 253 253 253 253 253 253 253 253 253 253
37162-253 253 253 253 253 253 253 253 253 253 253 253
37163-253 253 253 253 253 253 253 253 253 253 253 253
37164-250 250 250 242 242 242 54 54 54 2 2 6
37165- 2 2 6 2 2 6 2 2 6 2 2 6
37166- 2 2 6 2 2 6 2 2 6 38 38 38
37167- 86 86 86 50 50 50 22 22 22 6 6 6
37168- 0 0 0 0 0 0 0 0 0 0 0 0
37169- 0 0 0 0 0 0 0 0 0 0 0 0
37170- 0 0 0 0 0 0 0 0 0 0 0 0
37171- 0 0 0 0 0 0 0 0 0 0 0 0
37172- 0 0 0 0 0 0 0 0 0 0 0 0
37173- 0 0 0 0 0 0 0 0 0 0 0 0
37174- 0 0 0 0 0 0 0 0 0 0 0 0
37175- 0 0 0 0 0 0 0 0 0 0 0 0
37176- 0 0 0 0 0 0 0 0 0 0 0 0
37177- 6 6 6 14 14 14 38 38 38 82 82 82
37178- 34 34 34 2 2 6 2 2 6 2 2 6
37179- 42 42 42 195 195 195 246 246 246 253 253 253
37180-253 253 253 253 253 253 253 253 253 250 250 250
37181-242 242 242 242 242 242 250 250 250 253 253 253
37182-253 253 253 253 253 253 253 253 253 253 253 253
37183-253 253 253 250 250 250 246 246 246 238 238 238
37184-226 226 226 231 231 231 101 101 101 6 6 6
37185- 2 2 6 2 2 6 2 2 6 2 2 6
37186- 2 2 6 2 2 6 2 2 6 2 2 6
37187- 38 38 38 82 82 82 42 42 42 14 14 14
37188- 6 6 6 0 0 0 0 0 0 0 0 0
37189- 0 0 0 0 0 0 0 0 0 0 0 0
37190- 0 0 0 0 0 0 0 0 0 0 0 0
37191- 0 0 0 0 0 0 0 0 0 0 0 0
37192- 0 0 0 0 0 0 0 0 0 0 0 0
37193- 0 0 0 0 0 0 0 0 0 0 0 0
37194- 0 0 0 0 0 0 0 0 0 0 0 0
37195- 0 0 0 0 0 0 0 0 0 0 0 0
37196- 0 0 0 0 0 0 0 0 0 0 0 0
37197- 10 10 10 26 26 26 62 62 62 66 66 66
37198- 2 2 6 2 2 6 2 2 6 6 6 6
37199- 70 70 70 170 170 170 206 206 206 234 234 234
37200-246 246 246 250 250 250 250 250 250 238 238 238
37201-226 226 226 231 231 231 238 238 238 250 250 250
37202-250 250 250 250 250 250 246 246 246 231 231 231
37203-214 214 214 206 206 206 202 202 202 202 202 202
37204-198 198 198 202 202 202 182 182 182 18 18 18
37205- 2 2 6 2 2 6 2 2 6 2 2 6
37206- 2 2 6 2 2 6 2 2 6 2 2 6
37207- 2 2 6 62 62 62 66 66 66 30 30 30
37208- 10 10 10 0 0 0 0 0 0 0 0 0
37209- 0 0 0 0 0 0 0 0 0 0 0 0
37210- 0 0 0 0 0 0 0 0 0 0 0 0
37211- 0 0 0 0 0 0 0 0 0 0 0 0
37212- 0 0 0 0 0 0 0 0 0 0 0 0
37213- 0 0 0 0 0 0 0 0 0 0 0 0
37214- 0 0 0 0 0 0 0 0 0 0 0 0
37215- 0 0 0 0 0 0 0 0 0 0 0 0
37216- 0 0 0 0 0 0 0 0 0 0 0 0
37217- 14 14 14 42 42 42 82 82 82 18 18 18
37218- 2 2 6 2 2 6 2 2 6 10 10 10
37219- 94 94 94 182 182 182 218 218 218 242 242 242
37220-250 250 250 253 253 253 253 253 253 250 250 250
37221-234 234 234 253 253 253 253 253 253 253 253 253
37222-253 253 253 253 253 253 253 253 253 246 246 246
37223-238 238 238 226 226 226 210 210 210 202 202 202
37224-195 195 195 195 195 195 210 210 210 158 158 158
37225- 6 6 6 14 14 14 50 50 50 14 14 14
37226- 2 2 6 2 2 6 2 2 6 2 2 6
37227- 2 2 6 6 6 6 86 86 86 46 46 46
37228- 18 18 18 6 6 6 0 0 0 0 0 0
37229- 0 0 0 0 0 0 0 0 0 0 0 0
37230- 0 0 0 0 0 0 0 0 0 0 0 0
37231- 0 0 0 0 0 0 0 0 0 0 0 0
37232- 0 0 0 0 0 0 0 0 0 0 0 0
37233- 0 0 0 0 0 0 0 0 0 0 0 0
37234- 0 0 0 0 0 0 0 0 0 0 0 0
37235- 0 0 0 0 0 0 0 0 0 0 0 0
37236- 0 0 0 0 0 0 0 0 0 6 6 6
37237- 22 22 22 54 54 54 70 70 70 2 2 6
37238- 2 2 6 10 10 10 2 2 6 22 22 22
37239-166 166 166 231 231 231 250 250 250 253 253 253
37240-253 253 253 253 253 253 253 253 253 250 250 250
37241-242 242 242 253 253 253 253 253 253 253 253 253
37242-253 253 253 253 253 253 253 253 253 253 253 253
37243-253 253 253 253 253 253 253 253 253 246 246 246
37244-231 231 231 206 206 206 198 198 198 226 226 226
37245- 94 94 94 2 2 6 6 6 6 38 38 38
37246- 30 30 30 2 2 6 2 2 6 2 2 6
37247- 2 2 6 2 2 6 62 62 62 66 66 66
37248- 26 26 26 10 10 10 0 0 0 0 0 0
37249- 0 0 0 0 0 0 0 0 0 0 0 0
37250- 0 0 0 0 0 0 0 0 0 0 0 0
37251- 0 0 0 0 0 0 0 0 0 0 0 0
37252- 0 0 0 0 0 0 0 0 0 0 0 0
37253- 0 0 0 0 0 0 0 0 0 0 0 0
37254- 0 0 0 0 0 0 0 0 0 0 0 0
37255- 0 0 0 0 0 0 0 0 0 0 0 0
37256- 0 0 0 0 0 0 0 0 0 10 10 10
37257- 30 30 30 74 74 74 50 50 50 2 2 6
37258- 26 26 26 26 26 26 2 2 6 106 106 106
37259-238 238 238 253 253 253 253 253 253 253 253 253
37260-253 253 253 253 253 253 253 253 253 253 253 253
37261-253 253 253 253 253 253 253 253 253 253 253 253
37262-253 253 253 253 253 253 253 253 253 253 253 253
37263-253 253 253 253 253 253 253 253 253 253 253 253
37264-253 253 253 246 246 246 218 218 218 202 202 202
37265-210 210 210 14 14 14 2 2 6 2 2 6
37266- 30 30 30 22 22 22 2 2 6 2 2 6
37267- 2 2 6 2 2 6 18 18 18 86 86 86
37268- 42 42 42 14 14 14 0 0 0 0 0 0
37269- 0 0 0 0 0 0 0 0 0 0 0 0
37270- 0 0 0 0 0 0 0 0 0 0 0 0
37271- 0 0 0 0 0 0 0 0 0 0 0 0
37272- 0 0 0 0 0 0 0 0 0 0 0 0
37273- 0 0 0 0 0 0 0 0 0 0 0 0
37274- 0 0 0 0 0 0 0 0 0 0 0 0
37275- 0 0 0 0 0 0 0 0 0 0 0 0
37276- 0 0 0 0 0 0 0 0 0 14 14 14
37277- 42 42 42 90 90 90 22 22 22 2 2 6
37278- 42 42 42 2 2 6 18 18 18 218 218 218
37279-253 253 253 253 253 253 253 253 253 253 253 253
37280-253 253 253 253 253 253 253 253 253 253 253 253
37281-253 253 253 253 253 253 253 253 253 253 253 253
37282-253 253 253 253 253 253 253 253 253 253 253 253
37283-253 253 253 253 253 253 253 253 253 253 253 253
37284-253 253 253 253 253 253 250 250 250 221 221 221
37285-218 218 218 101 101 101 2 2 6 14 14 14
37286- 18 18 18 38 38 38 10 10 10 2 2 6
37287- 2 2 6 2 2 6 2 2 6 78 78 78
37288- 58 58 58 22 22 22 6 6 6 0 0 0
37289- 0 0 0 0 0 0 0 0 0 0 0 0
37290- 0 0 0 0 0 0 0 0 0 0 0 0
37291- 0 0 0 0 0 0 0 0 0 0 0 0
37292- 0 0 0 0 0 0 0 0 0 0 0 0
37293- 0 0 0 0 0 0 0 0 0 0 0 0
37294- 0 0 0 0 0 0 0 0 0 0 0 0
37295- 0 0 0 0 0 0 0 0 0 0 0 0
37296- 0 0 0 0 0 0 6 6 6 18 18 18
37297- 54 54 54 82 82 82 2 2 6 26 26 26
37298- 22 22 22 2 2 6 123 123 123 253 253 253
37299-253 253 253 253 253 253 253 253 253 253 253 253
37300-253 253 253 253 253 253 253 253 253 253 253 253
37301-253 253 253 253 253 253 253 253 253 253 253 253
37302-253 253 253 253 253 253 253 253 253 253 253 253
37303-253 253 253 253 253 253 253 253 253 253 253 253
37304-253 253 253 253 253 253 253 253 253 250 250 250
37305-238 238 238 198 198 198 6 6 6 38 38 38
37306- 58 58 58 26 26 26 38 38 38 2 2 6
37307- 2 2 6 2 2 6 2 2 6 46 46 46
37308- 78 78 78 30 30 30 10 10 10 0 0 0
37309- 0 0 0 0 0 0 0 0 0 0 0 0
37310- 0 0 0 0 0 0 0 0 0 0 0 0
37311- 0 0 0 0 0 0 0 0 0 0 0 0
37312- 0 0 0 0 0 0 0 0 0 0 0 0
37313- 0 0 0 0 0 0 0 0 0 0 0 0
37314- 0 0 0 0 0 0 0 0 0 0 0 0
37315- 0 0 0 0 0 0 0 0 0 0 0 0
37316- 0 0 0 0 0 0 10 10 10 30 30 30
37317- 74 74 74 58 58 58 2 2 6 42 42 42
37318- 2 2 6 22 22 22 231 231 231 253 253 253
37319-253 253 253 253 253 253 253 253 253 253 253 253
37320-253 253 253 253 253 253 253 253 253 250 250 250
37321-253 253 253 253 253 253 253 253 253 253 253 253
37322-253 253 253 253 253 253 253 253 253 253 253 253
37323-253 253 253 253 253 253 253 253 253 253 253 253
37324-253 253 253 253 253 253 253 253 253 253 253 253
37325-253 253 253 246 246 246 46 46 46 38 38 38
37326- 42 42 42 14 14 14 38 38 38 14 14 14
37327- 2 2 6 2 2 6 2 2 6 6 6 6
37328- 86 86 86 46 46 46 14 14 14 0 0 0
37329- 0 0 0 0 0 0 0 0 0 0 0 0
37330- 0 0 0 0 0 0 0 0 0 0 0 0
37331- 0 0 0 0 0 0 0 0 0 0 0 0
37332- 0 0 0 0 0 0 0 0 0 0 0 0
37333- 0 0 0 0 0 0 0 0 0 0 0 0
37334- 0 0 0 0 0 0 0 0 0 0 0 0
37335- 0 0 0 0 0 0 0 0 0 0 0 0
37336- 0 0 0 6 6 6 14 14 14 42 42 42
37337- 90 90 90 18 18 18 18 18 18 26 26 26
37338- 2 2 6 116 116 116 253 253 253 253 253 253
37339-253 253 253 253 253 253 253 253 253 253 253 253
37340-253 253 253 253 253 253 250 250 250 238 238 238
37341-253 253 253 253 253 253 253 253 253 253 253 253
37342-253 253 253 253 253 253 253 253 253 253 253 253
37343-253 253 253 253 253 253 253 253 253 253 253 253
37344-253 253 253 253 253 253 253 253 253 253 253 253
37345-253 253 253 253 253 253 94 94 94 6 6 6
37346- 2 2 6 2 2 6 10 10 10 34 34 34
37347- 2 2 6 2 2 6 2 2 6 2 2 6
37348- 74 74 74 58 58 58 22 22 22 6 6 6
37349- 0 0 0 0 0 0 0 0 0 0 0 0
37350- 0 0 0 0 0 0 0 0 0 0 0 0
37351- 0 0 0 0 0 0 0 0 0 0 0 0
37352- 0 0 0 0 0 0 0 0 0 0 0 0
37353- 0 0 0 0 0 0 0 0 0 0 0 0
37354- 0 0 0 0 0 0 0 0 0 0 0 0
37355- 0 0 0 0 0 0 0 0 0 0 0 0
37356- 0 0 0 10 10 10 26 26 26 66 66 66
37357- 82 82 82 2 2 6 38 38 38 6 6 6
37358- 14 14 14 210 210 210 253 253 253 253 253 253
37359-253 253 253 253 253 253 253 253 253 253 253 253
37360-253 253 253 253 253 253 246 246 246 242 242 242
37361-253 253 253 253 253 253 253 253 253 253 253 253
37362-253 253 253 253 253 253 253 253 253 253 253 253
37363-253 253 253 253 253 253 253 253 253 253 253 253
37364-253 253 253 253 253 253 253 253 253 253 253 253
37365-253 253 253 253 253 253 144 144 144 2 2 6
37366- 2 2 6 2 2 6 2 2 6 46 46 46
37367- 2 2 6 2 2 6 2 2 6 2 2 6
37368- 42 42 42 74 74 74 30 30 30 10 10 10
37369- 0 0 0 0 0 0 0 0 0 0 0 0
37370- 0 0 0 0 0 0 0 0 0 0 0 0
37371- 0 0 0 0 0 0 0 0 0 0 0 0
37372- 0 0 0 0 0 0 0 0 0 0 0 0
37373- 0 0 0 0 0 0 0 0 0 0 0 0
37374- 0 0 0 0 0 0 0 0 0 0 0 0
37375- 0 0 0 0 0 0 0 0 0 0 0 0
37376- 6 6 6 14 14 14 42 42 42 90 90 90
37377- 26 26 26 6 6 6 42 42 42 2 2 6
37378- 74 74 74 250 250 250 253 253 253 253 253 253
37379-253 253 253 253 253 253 253 253 253 253 253 253
37380-253 253 253 253 253 253 242 242 242 242 242 242
37381-253 253 253 253 253 253 253 253 253 253 253 253
37382-253 253 253 253 253 253 253 253 253 253 253 253
37383-253 253 253 253 253 253 253 253 253 253 253 253
37384-253 253 253 253 253 253 253 253 253 253 253 253
37385-253 253 253 253 253 253 182 182 182 2 2 6
37386- 2 2 6 2 2 6 2 2 6 46 46 46
37387- 2 2 6 2 2 6 2 2 6 2 2 6
37388- 10 10 10 86 86 86 38 38 38 10 10 10
37389- 0 0 0 0 0 0 0 0 0 0 0 0
37390- 0 0 0 0 0 0 0 0 0 0 0 0
37391- 0 0 0 0 0 0 0 0 0 0 0 0
37392- 0 0 0 0 0 0 0 0 0 0 0 0
37393- 0 0 0 0 0 0 0 0 0 0 0 0
37394- 0 0 0 0 0 0 0 0 0 0 0 0
37395- 0 0 0 0 0 0 0 0 0 0 0 0
37396- 10 10 10 26 26 26 66 66 66 82 82 82
37397- 2 2 6 22 22 22 18 18 18 2 2 6
37398-149 149 149 253 253 253 253 253 253 253 253 253
37399-253 253 253 253 253 253 253 253 253 253 253 253
37400-253 253 253 253 253 253 234 234 234 242 242 242
37401-253 253 253 253 253 253 253 253 253 253 253 253
37402-253 253 253 253 253 253 253 253 253 253 253 253
37403-253 253 253 253 253 253 253 253 253 253 253 253
37404-253 253 253 253 253 253 253 253 253 253 253 253
37405-253 253 253 253 253 253 206 206 206 2 2 6
37406- 2 2 6 2 2 6 2 2 6 38 38 38
37407- 2 2 6 2 2 6 2 2 6 2 2 6
37408- 6 6 6 86 86 86 46 46 46 14 14 14
37409- 0 0 0 0 0 0 0 0 0 0 0 0
37410- 0 0 0 0 0 0 0 0 0 0 0 0
37411- 0 0 0 0 0 0 0 0 0 0 0 0
37412- 0 0 0 0 0 0 0 0 0 0 0 0
37413- 0 0 0 0 0 0 0 0 0 0 0 0
37414- 0 0 0 0 0 0 0 0 0 0 0 0
37415- 0 0 0 0 0 0 0 0 0 6 6 6
37416- 18 18 18 46 46 46 86 86 86 18 18 18
37417- 2 2 6 34 34 34 10 10 10 6 6 6
37418-210 210 210 253 253 253 253 253 253 253 253 253
37419-253 253 253 253 253 253 253 253 253 253 253 253
37420-253 253 253 253 253 253 234 234 234 242 242 242
37421-253 253 253 253 253 253 253 253 253 253 253 253
37422-253 253 253 253 253 253 253 253 253 253 253 253
37423-253 253 253 253 253 253 253 253 253 253 253 253
37424-253 253 253 253 253 253 253 253 253 253 253 253
37425-253 253 253 253 253 253 221 221 221 6 6 6
37426- 2 2 6 2 2 6 6 6 6 30 30 30
37427- 2 2 6 2 2 6 2 2 6 2 2 6
37428- 2 2 6 82 82 82 54 54 54 18 18 18
37429- 6 6 6 0 0 0 0 0 0 0 0 0
37430- 0 0 0 0 0 0 0 0 0 0 0 0
37431- 0 0 0 0 0 0 0 0 0 0 0 0
37432- 0 0 0 0 0 0 0 0 0 0 0 0
37433- 0 0 0 0 0 0 0 0 0 0 0 0
37434- 0 0 0 0 0 0 0 0 0 0 0 0
37435- 0 0 0 0 0 0 0 0 0 10 10 10
37436- 26 26 26 66 66 66 62 62 62 2 2 6
37437- 2 2 6 38 38 38 10 10 10 26 26 26
37438-238 238 238 253 253 253 253 253 253 253 253 253
37439-253 253 253 253 253 253 253 253 253 253 253 253
37440-253 253 253 253 253 253 231 231 231 238 238 238
37441-253 253 253 253 253 253 253 253 253 253 253 253
37442-253 253 253 253 253 253 253 253 253 253 253 253
37443-253 253 253 253 253 253 253 253 253 253 253 253
37444-253 253 253 253 253 253 253 253 253 253 253 253
37445-253 253 253 253 253 253 231 231 231 6 6 6
37446- 2 2 6 2 2 6 10 10 10 30 30 30
37447- 2 2 6 2 2 6 2 2 6 2 2 6
37448- 2 2 6 66 66 66 58 58 58 22 22 22
37449- 6 6 6 0 0 0 0 0 0 0 0 0
37450- 0 0 0 0 0 0 0 0 0 0 0 0
37451- 0 0 0 0 0 0 0 0 0 0 0 0
37452- 0 0 0 0 0 0 0 0 0 0 0 0
37453- 0 0 0 0 0 0 0 0 0 0 0 0
37454- 0 0 0 0 0 0 0 0 0 0 0 0
37455- 0 0 0 0 0 0 0 0 0 10 10 10
37456- 38 38 38 78 78 78 6 6 6 2 2 6
37457- 2 2 6 46 46 46 14 14 14 42 42 42
37458-246 246 246 253 253 253 253 253 253 253 253 253
37459-253 253 253 253 253 253 253 253 253 253 253 253
37460-253 253 253 253 253 253 231 231 231 242 242 242
37461-253 253 253 253 253 253 253 253 253 253 253 253
37462-253 253 253 253 253 253 253 253 253 253 253 253
37463-253 253 253 253 253 253 253 253 253 253 253 253
37464-253 253 253 253 253 253 253 253 253 253 253 253
37465-253 253 253 253 253 253 234 234 234 10 10 10
37466- 2 2 6 2 2 6 22 22 22 14 14 14
37467- 2 2 6 2 2 6 2 2 6 2 2 6
37468- 2 2 6 66 66 66 62 62 62 22 22 22
37469- 6 6 6 0 0 0 0 0 0 0 0 0
37470- 0 0 0 0 0 0 0 0 0 0 0 0
37471- 0 0 0 0 0 0 0 0 0 0 0 0
37472- 0 0 0 0 0 0 0 0 0 0 0 0
37473- 0 0 0 0 0 0 0 0 0 0 0 0
37474- 0 0 0 0 0 0 0 0 0 0 0 0
37475- 0 0 0 0 0 0 6 6 6 18 18 18
37476- 50 50 50 74 74 74 2 2 6 2 2 6
37477- 14 14 14 70 70 70 34 34 34 62 62 62
37478-250 250 250 253 253 253 253 253 253 253 253 253
37479-253 253 253 253 253 253 253 253 253 253 253 253
37480-253 253 253 253 253 253 231 231 231 246 246 246
37481-253 253 253 253 253 253 253 253 253 253 253 253
37482-253 253 253 253 253 253 253 253 253 253 253 253
37483-253 253 253 253 253 253 253 253 253 253 253 253
37484-253 253 253 253 253 253 253 253 253 253 253 253
37485-253 253 253 253 253 253 234 234 234 14 14 14
37486- 2 2 6 2 2 6 30 30 30 2 2 6
37487- 2 2 6 2 2 6 2 2 6 2 2 6
37488- 2 2 6 66 66 66 62 62 62 22 22 22
37489- 6 6 6 0 0 0 0 0 0 0 0 0
37490- 0 0 0 0 0 0 0 0 0 0 0 0
37491- 0 0 0 0 0 0 0 0 0 0 0 0
37492- 0 0 0 0 0 0 0 0 0 0 0 0
37493- 0 0 0 0 0 0 0 0 0 0 0 0
37494- 0 0 0 0 0 0 0 0 0 0 0 0
37495- 0 0 0 0 0 0 6 6 6 18 18 18
37496- 54 54 54 62 62 62 2 2 6 2 2 6
37497- 2 2 6 30 30 30 46 46 46 70 70 70
37498-250 250 250 253 253 253 253 253 253 253 253 253
37499-253 253 253 253 253 253 253 253 253 253 253 253
37500-253 253 253 253 253 253 231 231 231 246 246 246
37501-253 253 253 253 253 253 253 253 253 253 253 253
37502-253 253 253 253 253 253 253 253 253 253 253 253
37503-253 253 253 253 253 253 253 253 253 253 253 253
37504-253 253 253 253 253 253 253 253 253 253 253 253
37505-253 253 253 253 253 253 226 226 226 10 10 10
37506- 2 2 6 6 6 6 30 30 30 2 2 6
37507- 2 2 6 2 2 6 2 2 6 2 2 6
37508- 2 2 6 66 66 66 58 58 58 22 22 22
37509- 6 6 6 0 0 0 0 0 0 0 0 0
37510- 0 0 0 0 0 0 0 0 0 0 0 0
37511- 0 0 0 0 0 0 0 0 0 0 0 0
37512- 0 0 0 0 0 0 0 0 0 0 0 0
37513- 0 0 0 0 0 0 0 0 0 0 0 0
37514- 0 0 0 0 0 0 0 0 0 0 0 0
37515- 0 0 0 0 0 0 6 6 6 22 22 22
37516- 58 58 58 62 62 62 2 2 6 2 2 6
37517- 2 2 6 2 2 6 30 30 30 78 78 78
37518-250 250 250 253 253 253 253 253 253 253 253 253
37519-253 253 253 253 253 253 253 253 253 253 253 253
37520-253 253 253 253 253 253 231 231 231 246 246 246
37521-253 253 253 253 253 253 253 253 253 253 253 253
37522-253 253 253 253 253 253 253 253 253 253 253 253
37523-253 253 253 253 253 253 253 253 253 253 253 253
37524-253 253 253 253 253 253 253 253 253 253 253 253
37525-253 253 253 253 253 253 206 206 206 2 2 6
37526- 22 22 22 34 34 34 18 14 6 22 22 22
37527- 26 26 26 18 18 18 6 6 6 2 2 6
37528- 2 2 6 82 82 82 54 54 54 18 18 18
37529- 6 6 6 0 0 0 0 0 0 0 0 0
37530- 0 0 0 0 0 0 0 0 0 0 0 0
37531- 0 0 0 0 0 0 0 0 0 0 0 0
37532- 0 0 0 0 0 0 0 0 0 0 0 0
37533- 0 0 0 0 0 0 0 0 0 0 0 0
37534- 0 0 0 0 0 0 0 0 0 0 0 0
37535- 0 0 0 0 0 0 6 6 6 26 26 26
37536- 62 62 62 106 106 106 74 54 14 185 133 11
37537-210 162 10 121 92 8 6 6 6 62 62 62
37538-238 238 238 253 253 253 253 253 253 253 253 253
37539-253 253 253 253 253 253 253 253 253 253 253 253
37540-253 253 253 253 253 253 231 231 231 246 246 246
37541-253 253 253 253 253 253 253 253 253 253 253 253
37542-253 253 253 253 253 253 253 253 253 253 253 253
37543-253 253 253 253 253 253 253 253 253 253 253 253
37544-253 253 253 253 253 253 253 253 253 253 253 253
37545-253 253 253 253 253 253 158 158 158 18 18 18
37546- 14 14 14 2 2 6 2 2 6 2 2 6
37547- 6 6 6 18 18 18 66 66 66 38 38 38
37548- 6 6 6 94 94 94 50 50 50 18 18 18
37549- 6 6 6 0 0 0 0 0 0 0 0 0
37550- 0 0 0 0 0 0 0 0 0 0 0 0
37551- 0 0 0 0 0 0 0 0 0 0 0 0
37552- 0 0 0 0 0 0 0 0 0 0 0 0
37553- 0 0 0 0 0 0 0 0 0 0 0 0
37554- 0 0 0 0 0 0 0 0 0 6 6 6
37555- 10 10 10 10 10 10 18 18 18 38 38 38
37556- 78 78 78 142 134 106 216 158 10 242 186 14
37557-246 190 14 246 190 14 156 118 10 10 10 10
37558- 90 90 90 238 238 238 253 253 253 253 253 253
37559-253 253 253 253 253 253 253 253 253 253 253 253
37560-253 253 253 253 253 253 231 231 231 250 250 250
37561-253 253 253 253 253 253 253 253 253 253 253 253
37562-253 253 253 253 253 253 253 253 253 253 253 253
37563-253 253 253 253 253 253 253 253 253 253 253 253
37564-253 253 253 253 253 253 253 253 253 246 230 190
37565-238 204 91 238 204 91 181 142 44 37 26 9
37566- 2 2 6 2 2 6 2 2 6 2 2 6
37567- 2 2 6 2 2 6 38 38 38 46 46 46
37568- 26 26 26 106 106 106 54 54 54 18 18 18
37569- 6 6 6 0 0 0 0 0 0 0 0 0
37570- 0 0 0 0 0 0 0 0 0 0 0 0
37571- 0 0 0 0 0 0 0 0 0 0 0 0
37572- 0 0 0 0 0 0 0 0 0 0 0 0
37573- 0 0 0 0 0 0 0 0 0 0 0 0
37574- 0 0 0 6 6 6 14 14 14 22 22 22
37575- 30 30 30 38 38 38 50 50 50 70 70 70
37576-106 106 106 190 142 34 226 170 11 242 186 14
37577-246 190 14 246 190 14 246 190 14 154 114 10
37578- 6 6 6 74 74 74 226 226 226 253 253 253
37579-253 253 253 253 253 253 253 253 253 253 253 253
37580-253 253 253 253 253 253 231 231 231 250 250 250
37581-253 253 253 253 253 253 253 253 253 253 253 253
37582-253 253 253 253 253 253 253 253 253 253 253 253
37583-253 253 253 253 253 253 253 253 253 253 253 253
37584-253 253 253 253 253 253 253 253 253 228 184 62
37585-241 196 14 241 208 19 232 195 16 38 30 10
37586- 2 2 6 2 2 6 2 2 6 2 2 6
37587- 2 2 6 6 6 6 30 30 30 26 26 26
37588-203 166 17 154 142 90 66 66 66 26 26 26
37589- 6 6 6 0 0 0 0 0 0 0 0 0
37590- 0 0 0 0 0 0 0 0 0 0 0 0
37591- 0 0 0 0 0 0 0 0 0 0 0 0
37592- 0 0 0 0 0 0 0 0 0 0 0 0
37593- 0 0 0 0 0 0 0 0 0 0 0 0
37594- 6 6 6 18 18 18 38 38 38 58 58 58
37595- 78 78 78 86 86 86 101 101 101 123 123 123
37596-175 146 61 210 150 10 234 174 13 246 186 14
37597-246 190 14 246 190 14 246 190 14 238 190 10
37598-102 78 10 2 2 6 46 46 46 198 198 198
37599-253 253 253 253 253 253 253 253 253 253 253 253
37600-253 253 253 253 253 253 234 234 234 242 242 242
37601-253 253 253 253 253 253 253 253 253 253 253 253
37602-253 253 253 253 253 253 253 253 253 253 253 253
37603-253 253 253 253 253 253 253 253 253 253 253 253
37604-253 253 253 253 253 253 253 253 253 224 178 62
37605-242 186 14 241 196 14 210 166 10 22 18 6
37606- 2 2 6 2 2 6 2 2 6 2 2 6
37607- 2 2 6 2 2 6 6 6 6 121 92 8
37608-238 202 15 232 195 16 82 82 82 34 34 34
37609- 10 10 10 0 0 0 0 0 0 0 0 0
37610- 0 0 0 0 0 0 0 0 0 0 0 0
37611- 0 0 0 0 0 0 0 0 0 0 0 0
37612- 0 0 0 0 0 0 0 0 0 0 0 0
37613- 0 0 0 0 0 0 0 0 0 0 0 0
37614- 14 14 14 38 38 38 70 70 70 154 122 46
37615-190 142 34 200 144 11 197 138 11 197 138 11
37616-213 154 11 226 170 11 242 186 14 246 190 14
37617-246 190 14 246 190 14 246 190 14 246 190 14
37618-225 175 15 46 32 6 2 2 6 22 22 22
37619-158 158 158 250 250 250 253 253 253 253 253 253
37620-253 253 253 253 253 253 253 253 253 253 253 253
37621-253 253 253 253 253 253 253 253 253 253 253 253
37622-253 253 253 253 253 253 253 253 253 253 253 253
37623-253 253 253 253 253 253 253 253 253 253 253 253
37624-253 253 253 250 250 250 242 242 242 224 178 62
37625-239 182 13 236 186 11 213 154 11 46 32 6
37626- 2 2 6 2 2 6 2 2 6 2 2 6
37627- 2 2 6 2 2 6 61 42 6 225 175 15
37628-238 190 10 236 186 11 112 100 78 42 42 42
37629- 14 14 14 0 0 0 0 0 0 0 0 0
37630- 0 0 0 0 0 0 0 0 0 0 0 0
37631- 0 0 0 0 0 0 0 0 0 0 0 0
37632- 0 0 0 0 0 0 0 0 0 0 0 0
37633- 0 0 0 0 0 0 0 0 0 6 6 6
37634- 22 22 22 54 54 54 154 122 46 213 154 11
37635-226 170 11 230 174 11 226 170 11 226 170 11
37636-236 178 12 242 186 14 246 190 14 246 190 14
37637-246 190 14 246 190 14 246 190 14 246 190 14
37638-241 196 14 184 144 12 10 10 10 2 2 6
37639- 6 6 6 116 116 116 242 242 242 253 253 253
37640-253 253 253 253 253 253 253 253 253 253 253 253
37641-253 253 253 253 253 253 253 253 253 253 253 253
37642-253 253 253 253 253 253 253 253 253 253 253 253
37643-253 253 253 253 253 253 253 253 253 253 253 253
37644-253 253 253 231 231 231 198 198 198 214 170 54
37645-236 178 12 236 178 12 210 150 10 137 92 6
37646- 18 14 6 2 2 6 2 2 6 2 2 6
37647- 6 6 6 70 47 6 200 144 11 236 178 12
37648-239 182 13 239 182 13 124 112 88 58 58 58
37649- 22 22 22 6 6 6 0 0 0 0 0 0
37650- 0 0 0 0 0 0 0 0 0 0 0 0
37651- 0 0 0 0 0 0 0 0 0 0 0 0
37652- 0 0 0 0 0 0 0 0 0 0 0 0
37653- 0 0 0 0 0 0 0 0 0 10 10 10
37654- 30 30 30 70 70 70 180 133 36 226 170 11
37655-239 182 13 242 186 14 242 186 14 246 186 14
37656-246 190 14 246 190 14 246 190 14 246 190 14
37657-246 190 14 246 190 14 246 190 14 246 190 14
37658-246 190 14 232 195 16 98 70 6 2 2 6
37659- 2 2 6 2 2 6 66 66 66 221 221 221
37660-253 253 253 253 253 253 253 253 253 253 253 253
37661-253 253 253 253 253 253 253 253 253 253 253 253
37662-253 253 253 253 253 253 253 253 253 253 253 253
37663-253 253 253 253 253 253 253 253 253 253 253 253
37664-253 253 253 206 206 206 198 198 198 214 166 58
37665-230 174 11 230 174 11 216 158 10 192 133 9
37666-163 110 8 116 81 8 102 78 10 116 81 8
37667-167 114 7 197 138 11 226 170 11 239 182 13
37668-242 186 14 242 186 14 162 146 94 78 78 78
37669- 34 34 34 14 14 14 6 6 6 0 0 0
37670- 0 0 0 0 0 0 0 0 0 0 0 0
37671- 0 0 0 0 0 0 0 0 0 0 0 0
37672- 0 0 0 0 0 0 0 0 0 0 0 0
37673- 0 0 0 0 0 0 0 0 0 6 6 6
37674- 30 30 30 78 78 78 190 142 34 226 170 11
37675-239 182 13 246 190 14 246 190 14 246 190 14
37676-246 190 14 246 190 14 246 190 14 246 190 14
37677-246 190 14 246 190 14 246 190 14 246 190 14
37678-246 190 14 241 196 14 203 166 17 22 18 6
37679- 2 2 6 2 2 6 2 2 6 38 38 38
37680-218 218 218 253 253 253 253 253 253 253 253 253
37681-253 253 253 253 253 253 253 253 253 253 253 253
37682-253 253 253 253 253 253 253 253 253 253 253 253
37683-253 253 253 253 253 253 253 253 253 253 253 253
37684-250 250 250 206 206 206 198 198 198 202 162 69
37685-226 170 11 236 178 12 224 166 10 210 150 10
37686-200 144 11 197 138 11 192 133 9 197 138 11
37687-210 150 10 226 170 11 242 186 14 246 190 14
37688-246 190 14 246 186 14 225 175 15 124 112 88
37689- 62 62 62 30 30 30 14 14 14 6 6 6
37690- 0 0 0 0 0 0 0 0 0 0 0 0
37691- 0 0 0 0 0 0 0 0 0 0 0 0
37692- 0 0 0 0 0 0 0 0 0 0 0 0
37693- 0 0 0 0 0 0 0 0 0 10 10 10
37694- 30 30 30 78 78 78 174 135 50 224 166 10
37695-239 182 13 246 190 14 246 190 14 246 190 14
37696-246 190 14 246 190 14 246 190 14 246 190 14
37697-246 190 14 246 190 14 246 190 14 246 190 14
37698-246 190 14 246 190 14 241 196 14 139 102 15
37699- 2 2 6 2 2 6 2 2 6 2 2 6
37700- 78 78 78 250 250 250 253 253 253 253 253 253
37701-253 253 253 253 253 253 253 253 253 253 253 253
37702-253 253 253 253 253 253 253 253 253 253 253 253
37703-253 253 253 253 253 253 253 253 253 253 253 253
37704-250 250 250 214 214 214 198 198 198 190 150 46
37705-219 162 10 236 178 12 234 174 13 224 166 10
37706-216 158 10 213 154 11 213 154 11 216 158 10
37707-226 170 11 239 182 13 246 190 14 246 190 14
37708-246 190 14 246 190 14 242 186 14 206 162 42
37709-101 101 101 58 58 58 30 30 30 14 14 14
37710- 6 6 6 0 0 0 0 0 0 0 0 0
37711- 0 0 0 0 0 0 0 0 0 0 0 0
37712- 0 0 0 0 0 0 0 0 0 0 0 0
37713- 0 0 0 0 0 0 0 0 0 10 10 10
37714- 30 30 30 74 74 74 174 135 50 216 158 10
37715-236 178 12 246 190 14 246 190 14 246 190 14
37716-246 190 14 246 190 14 246 190 14 246 190 14
37717-246 190 14 246 190 14 246 190 14 246 190 14
37718-246 190 14 246 190 14 241 196 14 226 184 13
37719- 61 42 6 2 2 6 2 2 6 2 2 6
37720- 22 22 22 238 238 238 253 253 253 253 253 253
37721-253 253 253 253 253 253 253 253 253 253 253 253
37722-253 253 253 253 253 253 253 253 253 253 253 253
37723-253 253 253 253 253 253 253 253 253 253 253 253
37724-253 253 253 226 226 226 187 187 187 180 133 36
37725-216 158 10 236 178 12 239 182 13 236 178 12
37726-230 174 11 226 170 11 226 170 11 230 174 11
37727-236 178 12 242 186 14 246 190 14 246 190 14
37728-246 190 14 246 190 14 246 186 14 239 182 13
37729-206 162 42 106 106 106 66 66 66 34 34 34
37730- 14 14 14 6 6 6 0 0 0 0 0 0
37731- 0 0 0 0 0 0 0 0 0 0 0 0
37732- 0 0 0 0 0 0 0 0 0 0 0 0
37733- 0 0 0 0 0 0 0 0 0 6 6 6
37734- 26 26 26 70 70 70 163 133 67 213 154 11
37735-236 178 12 246 190 14 246 190 14 246 190 14
37736-246 190 14 246 190 14 246 190 14 246 190 14
37737-246 190 14 246 190 14 246 190 14 246 190 14
37738-246 190 14 246 190 14 246 190 14 241 196 14
37739-190 146 13 18 14 6 2 2 6 2 2 6
37740- 46 46 46 246 246 246 253 253 253 253 253 253
37741-253 253 253 253 253 253 253 253 253 253 253 253
37742-253 253 253 253 253 253 253 253 253 253 253 253
37743-253 253 253 253 253 253 253 253 253 253 253 253
37744-253 253 253 221 221 221 86 86 86 156 107 11
37745-216 158 10 236 178 12 242 186 14 246 186 14
37746-242 186 14 239 182 13 239 182 13 242 186 14
37747-242 186 14 246 186 14 246 190 14 246 190 14
37748-246 190 14 246 190 14 246 190 14 246 190 14
37749-242 186 14 225 175 15 142 122 72 66 66 66
37750- 30 30 30 10 10 10 0 0 0 0 0 0
37751- 0 0 0 0 0 0 0 0 0 0 0 0
37752- 0 0 0 0 0 0 0 0 0 0 0 0
37753- 0 0 0 0 0 0 0 0 0 6 6 6
37754- 26 26 26 70 70 70 163 133 67 210 150 10
37755-236 178 12 246 190 14 246 190 14 246 190 14
37756-246 190 14 246 190 14 246 190 14 246 190 14
37757-246 190 14 246 190 14 246 190 14 246 190 14
37758-246 190 14 246 190 14 246 190 14 246 190 14
37759-232 195 16 121 92 8 34 34 34 106 106 106
37760-221 221 221 253 253 253 253 253 253 253 253 253
37761-253 253 253 253 253 253 253 253 253 253 253 253
37762-253 253 253 253 253 253 253 253 253 253 253 253
37763-253 253 253 253 253 253 253 253 253 253 253 253
37764-242 242 242 82 82 82 18 14 6 163 110 8
37765-216 158 10 236 178 12 242 186 14 246 190 14
37766-246 190 14 246 190 14 246 190 14 246 190 14
37767-246 190 14 246 190 14 246 190 14 246 190 14
37768-246 190 14 246 190 14 246 190 14 246 190 14
37769-246 190 14 246 190 14 242 186 14 163 133 67
37770- 46 46 46 18 18 18 6 6 6 0 0 0
37771- 0 0 0 0 0 0 0 0 0 0 0 0
37772- 0 0 0 0 0 0 0 0 0 0 0 0
37773- 0 0 0 0 0 0 0 0 0 10 10 10
37774- 30 30 30 78 78 78 163 133 67 210 150 10
37775-236 178 12 246 186 14 246 190 14 246 190 14
37776-246 190 14 246 190 14 246 190 14 246 190 14
37777-246 190 14 246 190 14 246 190 14 246 190 14
37778-246 190 14 246 190 14 246 190 14 246 190 14
37779-241 196 14 215 174 15 190 178 144 253 253 253
37780-253 253 253 253 253 253 253 253 253 253 253 253
37781-253 253 253 253 253 253 253 253 253 253 253 253
37782-253 253 253 253 253 253 253 253 253 253 253 253
37783-253 253 253 253 253 253 253 253 253 218 218 218
37784- 58 58 58 2 2 6 22 18 6 167 114 7
37785-216 158 10 236 178 12 246 186 14 246 190 14
37786-246 190 14 246 190 14 246 190 14 246 190 14
37787-246 190 14 246 190 14 246 190 14 246 190 14
37788-246 190 14 246 190 14 246 190 14 246 190 14
37789-246 190 14 246 186 14 242 186 14 190 150 46
37790- 54 54 54 22 22 22 6 6 6 0 0 0
37791- 0 0 0 0 0 0 0 0 0 0 0 0
37792- 0 0 0 0 0 0 0 0 0 0 0 0
37793- 0 0 0 0 0 0 0 0 0 14 14 14
37794- 38 38 38 86 86 86 180 133 36 213 154 11
37795-236 178 12 246 186 14 246 190 14 246 190 14
37796-246 190 14 246 190 14 246 190 14 246 190 14
37797-246 190 14 246 190 14 246 190 14 246 190 14
37798-246 190 14 246 190 14 246 190 14 246 190 14
37799-246 190 14 232 195 16 190 146 13 214 214 214
37800-253 253 253 253 253 253 253 253 253 253 253 253
37801-253 253 253 253 253 253 253 253 253 253 253 253
37802-253 253 253 253 253 253 253 253 253 253 253 253
37803-253 253 253 250 250 250 170 170 170 26 26 26
37804- 2 2 6 2 2 6 37 26 9 163 110 8
37805-219 162 10 239 182 13 246 186 14 246 190 14
37806-246 190 14 246 190 14 246 190 14 246 190 14
37807-246 190 14 246 190 14 246 190 14 246 190 14
37808-246 190 14 246 190 14 246 190 14 246 190 14
37809-246 186 14 236 178 12 224 166 10 142 122 72
37810- 46 46 46 18 18 18 6 6 6 0 0 0
37811- 0 0 0 0 0 0 0 0 0 0 0 0
37812- 0 0 0 0 0 0 0 0 0 0 0 0
37813- 0 0 0 0 0 0 6 6 6 18 18 18
37814- 50 50 50 109 106 95 192 133 9 224 166 10
37815-242 186 14 246 190 14 246 190 14 246 190 14
37816-246 190 14 246 190 14 246 190 14 246 190 14
37817-246 190 14 246 190 14 246 190 14 246 190 14
37818-246 190 14 246 190 14 246 190 14 246 190 14
37819-242 186 14 226 184 13 210 162 10 142 110 46
37820-226 226 226 253 253 253 253 253 253 253 253 253
37821-253 253 253 253 253 253 253 253 253 253 253 253
37822-253 253 253 253 253 253 253 253 253 253 253 253
37823-198 198 198 66 66 66 2 2 6 2 2 6
37824- 2 2 6 2 2 6 50 34 6 156 107 11
37825-219 162 10 239 182 13 246 186 14 246 190 14
37826-246 190 14 246 190 14 246 190 14 246 190 14
37827-246 190 14 246 190 14 246 190 14 246 190 14
37828-246 190 14 246 190 14 246 190 14 242 186 14
37829-234 174 13 213 154 11 154 122 46 66 66 66
37830- 30 30 30 10 10 10 0 0 0 0 0 0
37831- 0 0 0 0 0 0 0 0 0 0 0 0
37832- 0 0 0 0 0 0 0 0 0 0 0 0
37833- 0 0 0 0 0 0 6 6 6 22 22 22
37834- 58 58 58 154 121 60 206 145 10 234 174 13
37835-242 186 14 246 186 14 246 190 14 246 190 14
37836-246 190 14 246 190 14 246 190 14 246 190 14
37837-246 190 14 246 190 14 246 190 14 246 190 14
37838-246 190 14 246 190 14 246 190 14 246 190 14
37839-246 186 14 236 178 12 210 162 10 163 110 8
37840- 61 42 6 138 138 138 218 218 218 250 250 250
37841-253 253 253 253 253 253 253 253 253 250 250 250
37842-242 242 242 210 210 210 144 144 144 66 66 66
37843- 6 6 6 2 2 6 2 2 6 2 2 6
37844- 2 2 6 2 2 6 61 42 6 163 110 8
37845-216 158 10 236 178 12 246 190 14 246 190 14
37846-246 190 14 246 190 14 246 190 14 246 190 14
37847-246 190 14 246 190 14 246 190 14 246 190 14
37848-246 190 14 239 182 13 230 174 11 216 158 10
37849-190 142 34 124 112 88 70 70 70 38 38 38
37850- 18 18 18 6 6 6 0 0 0 0 0 0
37851- 0 0 0 0 0 0 0 0 0 0 0 0
37852- 0 0 0 0 0 0 0 0 0 0 0 0
37853- 0 0 0 0 0 0 6 6 6 22 22 22
37854- 62 62 62 168 124 44 206 145 10 224 166 10
37855-236 178 12 239 182 13 242 186 14 242 186 14
37856-246 186 14 246 190 14 246 190 14 246 190 14
37857-246 190 14 246 190 14 246 190 14 246 190 14
37858-246 190 14 246 190 14 246 190 14 246 190 14
37859-246 190 14 236 178 12 216 158 10 175 118 6
37860- 80 54 7 2 2 6 6 6 6 30 30 30
37861- 54 54 54 62 62 62 50 50 50 38 38 38
37862- 14 14 14 2 2 6 2 2 6 2 2 6
37863- 2 2 6 2 2 6 2 2 6 2 2 6
37864- 2 2 6 6 6 6 80 54 7 167 114 7
37865-213 154 11 236 178 12 246 190 14 246 190 14
37866-246 190 14 246 190 14 246 190 14 246 190 14
37867-246 190 14 242 186 14 239 182 13 239 182 13
37868-230 174 11 210 150 10 174 135 50 124 112 88
37869- 82 82 82 54 54 54 34 34 34 18 18 18
37870- 6 6 6 0 0 0 0 0 0 0 0 0
37871- 0 0 0 0 0 0 0 0 0 0 0 0
37872- 0 0 0 0 0 0 0 0 0 0 0 0
37873- 0 0 0 0 0 0 6 6 6 18 18 18
37874- 50 50 50 158 118 36 192 133 9 200 144 11
37875-216 158 10 219 162 10 224 166 10 226 170 11
37876-230 174 11 236 178 12 239 182 13 239 182 13
37877-242 186 14 246 186 14 246 190 14 246 190 14
37878-246 190 14 246 190 14 246 190 14 246 190 14
37879-246 186 14 230 174 11 210 150 10 163 110 8
37880-104 69 6 10 10 10 2 2 6 2 2 6
37881- 2 2 6 2 2 6 2 2 6 2 2 6
37882- 2 2 6 2 2 6 2 2 6 2 2 6
37883- 2 2 6 2 2 6 2 2 6 2 2 6
37884- 2 2 6 6 6 6 91 60 6 167 114 7
37885-206 145 10 230 174 11 242 186 14 246 190 14
37886-246 190 14 246 190 14 246 186 14 242 186 14
37887-239 182 13 230 174 11 224 166 10 213 154 11
37888-180 133 36 124 112 88 86 86 86 58 58 58
37889- 38 38 38 22 22 22 10 10 10 6 6 6
37890- 0 0 0 0 0 0 0 0 0 0 0 0
37891- 0 0 0 0 0 0 0 0 0 0 0 0
37892- 0 0 0 0 0 0 0 0 0 0 0 0
37893- 0 0 0 0 0 0 0 0 0 14 14 14
37894- 34 34 34 70 70 70 138 110 50 158 118 36
37895-167 114 7 180 123 7 192 133 9 197 138 11
37896-200 144 11 206 145 10 213 154 11 219 162 10
37897-224 166 10 230 174 11 239 182 13 242 186 14
37898-246 186 14 246 186 14 246 186 14 246 186 14
37899-239 182 13 216 158 10 185 133 11 152 99 6
37900-104 69 6 18 14 6 2 2 6 2 2 6
37901- 2 2 6 2 2 6 2 2 6 2 2 6
37902- 2 2 6 2 2 6 2 2 6 2 2 6
37903- 2 2 6 2 2 6 2 2 6 2 2 6
37904- 2 2 6 6 6 6 80 54 7 152 99 6
37905-192 133 9 219 162 10 236 178 12 239 182 13
37906-246 186 14 242 186 14 239 182 13 236 178 12
37907-224 166 10 206 145 10 192 133 9 154 121 60
37908- 94 94 94 62 62 62 42 42 42 22 22 22
37909- 14 14 14 6 6 6 0 0 0 0 0 0
37910- 0 0 0 0 0 0 0 0 0 0 0 0
37911- 0 0 0 0 0 0 0 0 0 0 0 0
37912- 0 0 0 0 0 0 0 0 0 0 0 0
37913- 0 0 0 0 0 0 0 0 0 6 6 6
37914- 18 18 18 34 34 34 58 58 58 78 78 78
37915-101 98 89 124 112 88 142 110 46 156 107 11
37916-163 110 8 167 114 7 175 118 6 180 123 7
37917-185 133 11 197 138 11 210 150 10 219 162 10
37918-226 170 11 236 178 12 236 178 12 234 174 13
37919-219 162 10 197 138 11 163 110 8 130 83 6
37920- 91 60 6 10 10 10 2 2 6 2 2 6
37921- 18 18 18 38 38 38 38 38 38 38 38 38
37922- 38 38 38 38 38 38 38 38 38 38 38 38
37923- 38 38 38 38 38 38 26 26 26 2 2 6
37924- 2 2 6 6 6 6 70 47 6 137 92 6
37925-175 118 6 200 144 11 219 162 10 230 174 11
37926-234 174 13 230 174 11 219 162 10 210 150 10
37927-192 133 9 163 110 8 124 112 88 82 82 82
37928- 50 50 50 30 30 30 14 14 14 6 6 6
37929- 0 0 0 0 0 0 0 0 0 0 0 0
37930- 0 0 0 0 0 0 0 0 0 0 0 0
37931- 0 0 0 0 0 0 0 0 0 0 0 0
37932- 0 0 0 0 0 0 0 0 0 0 0 0
37933- 0 0 0 0 0 0 0 0 0 0 0 0
37934- 6 6 6 14 14 14 22 22 22 34 34 34
37935- 42 42 42 58 58 58 74 74 74 86 86 86
37936-101 98 89 122 102 70 130 98 46 121 87 25
37937-137 92 6 152 99 6 163 110 8 180 123 7
37938-185 133 11 197 138 11 206 145 10 200 144 11
37939-180 123 7 156 107 11 130 83 6 104 69 6
37940- 50 34 6 54 54 54 110 110 110 101 98 89
37941- 86 86 86 82 82 82 78 78 78 78 78 78
37942- 78 78 78 78 78 78 78 78 78 78 78 78
37943- 78 78 78 82 82 82 86 86 86 94 94 94
37944-106 106 106 101 101 101 86 66 34 124 80 6
37945-156 107 11 180 123 7 192 133 9 200 144 11
37946-206 145 10 200 144 11 192 133 9 175 118 6
37947-139 102 15 109 106 95 70 70 70 42 42 42
37948- 22 22 22 10 10 10 0 0 0 0 0 0
37949- 0 0 0 0 0 0 0 0 0 0 0 0
37950- 0 0 0 0 0 0 0 0 0 0 0 0
37951- 0 0 0 0 0 0 0 0 0 0 0 0
37952- 0 0 0 0 0 0 0 0 0 0 0 0
37953- 0 0 0 0 0 0 0 0 0 0 0 0
37954- 0 0 0 0 0 0 6 6 6 10 10 10
37955- 14 14 14 22 22 22 30 30 30 38 38 38
37956- 50 50 50 62 62 62 74 74 74 90 90 90
37957-101 98 89 112 100 78 121 87 25 124 80 6
37958-137 92 6 152 99 6 152 99 6 152 99 6
37959-138 86 6 124 80 6 98 70 6 86 66 30
37960-101 98 89 82 82 82 58 58 58 46 46 46
37961- 38 38 38 34 34 34 34 34 34 34 34 34
37962- 34 34 34 34 34 34 34 34 34 34 34 34
37963- 34 34 34 34 34 34 38 38 38 42 42 42
37964- 54 54 54 82 82 82 94 86 76 91 60 6
37965-134 86 6 156 107 11 167 114 7 175 118 6
37966-175 118 6 167 114 7 152 99 6 121 87 25
37967-101 98 89 62 62 62 34 34 34 18 18 18
37968- 6 6 6 0 0 0 0 0 0 0 0 0
37969- 0 0 0 0 0 0 0 0 0 0 0 0
37970- 0 0 0 0 0 0 0 0 0 0 0 0
37971- 0 0 0 0 0 0 0 0 0 0 0 0
37972- 0 0 0 0 0 0 0 0 0 0 0 0
37973- 0 0 0 0 0 0 0 0 0 0 0 0
37974- 0 0 0 0 0 0 0 0 0 0 0 0
37975- 0 0 0 6 6 6 6 6 6 10 10 10
37976- 18 18 18 22 22 22 30 30 30 42 42 42
37977- 50 50 50 66 66 66 86 86 86 101 98 89
37978-106 86 58 98 70 6 104 69 6 104 69 6
37979-104 69 6 91 60 6 82 62 34 90 90 90
37980- 62 62 62 38 38 38 22 22 22 14 14 14
37981- 10 10 10 10 10 10 10 10 10 10 10 10
37982- 10 10 10 10 10 10 6 6 6 10 10 10
37983- 10 10 10 10 10 10 10 10 10 14 14 14
37984- 22 22 22 42 42 42 70 70 70 89 81 66
37985- 80 54 7 104 69 6 124 80 6 137 92 6
37986-134 86 6 116 81 8 100 82 52 86 86 86
37987- 58 58 58 30 30 30 14 14 14 6 6 6
37988- 0 0 0 0 0 0 0 0 0 0 0 0
37989- 0 0 0 0 0 0 0 0 0 0 0 0
37990- 0 0 0 0 0 0 0 0 0 0 0 0
37991- 0 0 0 0 0 0 0 0 0 0 0 0
37992- 0 0 0 0 0 0 0 0 0 0 0 0
37993- 0 0 0 0 0 0 0 0 0 0 0 0
37994- 0 0 0 0 0 0 0 0 0 0 0 0
37995- 0 0 0 0 0 0 0 0 0 0 0 0
37996- 0 0 0 6 6 6 10 10 10 14 14 14
37997- 18 18 18 26 26 26 38 38 38 54 54 54
37998- 70 70 70 86 86 86 94 86 76 89 81 66
37999- 89 81 66 86 86 86 74 74 74 50 50 50
38000- 30 30 30 14 14 14 6 6 6 0 0 0
38001- 0 0 0 0 0 0 0 0 0 0 0 0
38002- 0 0 0 0 0 0 0 0 0 0 0 0
38003- 0 0 0 0 0 0 0 0 0 0 0 0
38004- 6 6 6 18 18 18 34 34 34 58 58 58
38005- 82 82 82 89 81 66 89 81 66 89 81 66
38006- 94 86 66 94 86 76 74 74 74 50 50 50
38007- 26 26 26 14 14 14 6 6 6 0 0 0
38008- 0 0 0 0 0 0 0 0 0 0 0 0
38009- 0 0 0 0 0 0 0 0 0 0 0 0
38010- 0 0 0 0 0 0 0 0 0 0 0 0
38011- 0 0 0 0 0 0 0 0 0 0 0 0
38012- 0 0 0 0 0 0 0 0 0 0 0 0
38013- 0 0 0 0 0 0 0 0 0 0 0 0
38014- 0 0 0 0 0 0 0 0 0 0 0 0
38015- 0 0 0 0 0 0 0 0 0 0 0 0
38016- 0 0 0 0 0 0 0 0 0 0 0 0
38017- 6 6 6 6 6 6 14 14 14 18 18 18
38018- 30 30 30 38 38 38 46 46 46 54 54 54
38019- 50 50 50 42 42 42 30 30 30 18 18 18
38020- 10 10 10 0 0 0 0 0 0 0 0 0
38021- 0 0 0 0 0 0 0 0 0 0 0 0
38022- 0 0 0 0 0 0 0 0 0 0 0 0
38023- 0 0 0 0 0 0 0 0 0 0 0 0
38024- 0 0 0 6 6 6 14 14 14 26 26 26
38025- 38 38 38 50 50 50 58 58 58 58 58 58
38026- 54 54 54 42 42 42 30 30 30 18 18 18
38027- 10 10 10 0 0 0 0 0 0 0 0 0
38028- 0 0 0 0 0 0 0 0 0 0 0 0
38029- 0 0 0 0 0 0 0 0 0 0 0 0
38030- 0 0 0 0 0 0 0 0 0 0 0 0
38031- 0 0 0 0 0 0 0 0 0 0 0 0
38032- 0 0 0 0 0 0 0 0 0 0 0 0
38033- 0 0 0 0 0 0 0 0 0 0 0 0
38034- 0 0 0 0 0 0 0 0 0 0 0 0
38035- 0 0 0 0 0 0 0 0 0 0 0 0
38036- 0 0 0 0 0 0 0 0 0 0 0 0
38037- 0 0 0 0 0 0 0 0 0 6 6 6
38038- 6 6 6 10 10 10 14 14 14 18 18 18
38039- 18 18 18 14 14 14 10 10 10 6 6 6
38040- 0 0 0 0 0 0 0 0 0 0 0 0
38041- 0 0 0 0 0 0 0 0 0 0 0 0
38042- 0 0 0 0 0 0 0 0 0 0 0 0
38043- 0 0 0 0 0 0 0 0 0 0 0 0
38044- 0 0 0 0 0 0 0 0 0 6 6 6
38045- 14 14 14 18 18 18 22 22 22 22 22 22
38046- 18 18 18 14 14 14 10 10 10 6 6 6
38047- 0 0 0 0 0 0 0 0 0 0 0 0
38048- 0 0 0 0 0 0 0 0 0 0 0 0
38049- 0 0 0 0 0 0 0 0 0 0 0 0
38050- 0 0 0 0 0 0 0 0 0 0 0 0
38051- 0 0 0 0 0 0 0 0 0 0 0 0
38052+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38053+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38054+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38055+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38056+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38057+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38058+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38059+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38060+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38061+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38062+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38063+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38064+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38065+4 4 4 4 4 4
38066+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38067+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38068+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38069+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38070+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38071+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38072+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38073+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38074+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38075+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38076+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38077+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38078+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38079+4 4 4 4 4 4
38080+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38081+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38082+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38083+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38084+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38085+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38086+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38087+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38088+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38089+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38090+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38091+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38092+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38093+4 4 4 4 4 4
38094+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38095+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38096+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38097+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38098+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38099+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38100+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38101+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38102+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38103+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38104+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38105+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38106+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38107+4 4 4 4 4 4
38108+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38109+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38110+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38111+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38112+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38113+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38114+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38115+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38116+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38117+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38118+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38119+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38120+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38121+4 4 4 4 4 4
38122+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38123+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38124+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38125+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38126+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38127+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38128+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38129+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38130+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38131+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38132+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38133+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38134+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38135+4 4 4 4 4 4
38136+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38137+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38138+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38139+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38140+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
38141+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
38142+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38143+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38144+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38145+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
38146+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38147+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
38148+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38149+4 4 4 4 4 4
38150+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38151+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38152+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38153+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38154+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
38155+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
38156+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38157+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38158+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38159+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
38160+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
38161+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
38162+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38163+4 4 4 4 4 4
38164+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38165+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38166+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38167+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38168+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
38169+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
38170+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38171+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38172+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38173+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
38174+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
38175+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
38176+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
38177+4 4 4 4 4 4
38178+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38179+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38180+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38181+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
38182+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
38183+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
38184+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
38185+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38186+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
38187+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
38188+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
38189+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
38190+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
38191+4 4 4 4 4 4
38192+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38193+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38194+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38195+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
38196+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
38197+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
38198+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
38199+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38200+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
38201+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
38202+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
38203+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
38204+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
38205+4 4 4 4 4 4
38206+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38207+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38208+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
38209+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
38210+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
38211+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
38212+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
38213+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
38214+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
38215+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
38216+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
38217+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
38218+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
38219+4 4 4 4 4 4
38220+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38221+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38222+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
38223+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
38224+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
38225+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
38226+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
38227+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
38228+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
38229+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
38230+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
38231+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
38232+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
38233+4 4 4 4 4 4
38234+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38235+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38236+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
38237+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
38238+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
38239+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
38240+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
38241+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
38242+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
38243+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
38244+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
38245+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
38246+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
38247+4 4 4 4 4 4
38248+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38249+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38250+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
38251+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
38252+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
38253+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
38254+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
38255+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
38256+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
38257+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
38258+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
38259+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
38260+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
38261+4 4 4 4 4 4
38262+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38263+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38264+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
38265+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
38266+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
38267+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
38268+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
38269+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
38270+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
38271+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
38272+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
38273+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
38274+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
38275+4 4 4 4 4 4
38276+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38277+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
38278+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
38279+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
38280+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
38281+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
38282+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
38283+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
38284+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
38285+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
38286+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
38287+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
38288+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
38289+4 4 4 4 4 4
38290+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38291+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
38292+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
38293+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
38294+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
38295+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
38296+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
38297+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
38298+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
38299+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
38300+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
38301+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
38302+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
38303+0 0 0 4 4 4
38304+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
38305+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
38306+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
38307+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
38308+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
38309+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
38310+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
38311+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
38312+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
38313+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
38314+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
38315+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
38316+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
38317+2 0 0 0 0 0
38318+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
38319+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
38320+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
38321+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
38322+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
38323+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
38324+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
38325+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
38326+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
38327+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
38328+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
38329+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
38330+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
38331+37 38 37 0 0 0
38332+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
38333+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
38334+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
38335+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
38336+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
38337+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
38338+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
38339+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
38340+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
38341+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
38342+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
38343+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
38344+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
38345+85 115 134 4 0 0
38346+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
38347+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
38348+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
38349+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
38350+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
38351+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
38352+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
38353+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
38354+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
38355+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
38356+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
38357+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
38358+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
38359+60 73 81 4 0 0
38360+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
38361+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
38362+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
38363+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
38364+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
38365+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
38366+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
38367+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
38368+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
38369+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
38370+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
38371+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
38372+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
38373+16 19 21 4 0 0
38374+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
38375+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
38376+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
38377+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
38378+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
38379+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
38380+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
38381+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
38382+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
38383+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
38384+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
38385+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
38386+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
38387+4 0 0 4 3 3
38388+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
38389+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
38390+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
38391+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
38392+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
38393+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
38394+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
38395+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
38396+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
38397+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
38398+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
38399+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
38400+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
38401+3 2 2 4 4 4
38402+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
38403+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
38404+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
38405+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
38406+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
38407+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
38408+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
38409+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
38410+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
38411+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
38412+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
38413+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
38414+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
38415+4 4 4 4 4 4
38416+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
38417+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
38418+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
38419+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
38420+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
38421+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
38422+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
38423+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
38424+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
38425+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
38426+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
38427+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
38428+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
38429+4 4 4 4 4 4
38430+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
38431+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
38432+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
38433+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
38434+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
38435+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
38436+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
38437+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
38438+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
38439+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
38440+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
38441+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
38442+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
38443+5 5 5 5 5 5
38444+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
38445+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
38446+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
38447+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
38448+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
38449+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38450+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
38451+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
38452+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
38453+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
38454+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
38455+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
38456+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
38457+5 5 5 4 4 4
38458+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
38459+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
38460+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
38461+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
38462+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38463+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
38464+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
38465+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
38466+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
38467+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
38468+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
38469+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
38470+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38471+4 4 4 4 4 4
38472+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
38473+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
38474+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
38475+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
38476+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
38477+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38478+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38479+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
38480+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
38481+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
38482+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
38483+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
38484+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38485+4 4 4 4 4 4
38486+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
38487+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
38488+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
38489+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
38490+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38491+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
38492+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
38493+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
38494+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
38495+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
38496+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
38497+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38498+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38499+4 4 4 4 4 4
38500+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
38501+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
38502+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
38503+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
38504+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38505+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38506+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
38507+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
38508+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
38509+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
38510+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
38511+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38512+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38513+4 4 4 4 4 4
38514+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
38515+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
38516+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
38517+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
38518+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38519+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
38520+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
38521+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
38522+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
38523+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
38524+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38525+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38526+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38527+4 4 4 4 4 4
38528+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
38529+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
38530+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
38531+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
38532+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
38533+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
38534+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
38535+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
38536+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
38537+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
38538+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
38539+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38540+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38541+4 4 4 4 4 4
38542+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
38543+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
38544+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
38545+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
38546+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
38547+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
38548+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
38549+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
38550+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
38551+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
38552+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
38553+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38554+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38555+4 4 4 4 4 4
38556+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
38557+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
38558+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
38559+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
38560+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
38561+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
38562+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
38563+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
38564+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
38565+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
38566+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38567+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38568+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38569+4 4 4 4 4 4
38570+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
38571+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
38572+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
38573+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
38574+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38575+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
38576+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
38577+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
38578+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
38579+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
38580+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38581+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38582+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38583+4 4 4 4 4 4
38584+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
38585+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
38586+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
38587+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
38588+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38589+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
38590+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
38591+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
38592+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
38593+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
38594+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38595+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38596+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38597+4 4 4 4 4 4
38598+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
38599+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
38600+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
38601+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
38602+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38603+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
38604+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
38605+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
38606+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
38607+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38608+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38609+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38610+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38611+4 4 4 4 4 4
38612+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
38613+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
38614+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
38615+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
38616+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
38617+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
38618+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
38619+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
38620+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38621+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38622+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38623+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38624+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38625+4 4 4 4 4 4
38626+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
38627+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
38628+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
38629+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
38630+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38631+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
38632+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
38633+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
38634+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38635+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38636+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38637+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38638+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38639+4 4 4 4 4 4
38640+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
38641+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
38642+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
38643+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
38644+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
38645+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
38646+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
38647+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
38648+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38649+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38650+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38651+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38652+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38653+4 4 4 4 4 4
38654+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
38655+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
38656+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38657+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
38658+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
38659+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
38660+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
38661+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
38662+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
38663+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38664+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38665+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38666+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38667+4 4 4 4 4 4
38668+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
38669+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
38670+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
38671+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
38672+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
38673+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
38674+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
38675+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
38676+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38677+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38678+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38679+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38680+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38681+4 4 4 4 4 4
38682+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
38683+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
38684+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38685+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
38686+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
38687+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
38688+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
38689+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
38690+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
38691+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38692+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38693+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38694+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38695+4 4 4 4 4 4
38696+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
38697+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
38698+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
38699+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
38700+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
38701+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
38702+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
38703+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
38704+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38705+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38706+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38707+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38708+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38709+4 4 4 4 4 4
38710+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38711+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
38712+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
38713+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
38714+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
38715+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
38716+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
38717+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
38718+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38719+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38720+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38721+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38722+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38723+4 4 4 4 4 4
38724+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
38725+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
38726+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
38727+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
38728+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
38729+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
38730+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38731+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
38732+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
38733+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38734+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38735+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38736+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38737+4 4 4 4 4 4
38738+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38739+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
38740+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
38741+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
38742+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
38743+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
38744+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
38745+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
38746+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
38747+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38748+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38749+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38750+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38751+4 4 4 4 4 4
38752+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
38753+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
38754+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
38755+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
38756+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
38757+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
38758+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
38759+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
38760+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
38761+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38762+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38763+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38764+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38765+4 4 4 4 4 4
38766+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38767+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
38768+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
38769+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
38770+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
38771+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
38772+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
38773+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
38774+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
38775+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38776+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38777+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38778+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38779+4 4 4 4 4 4
38780+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
38781+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
38782+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
38783+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
38784+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
38785+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
38786+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
38787+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
38788+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
38789+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38790+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38791+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38792+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38793+4 4 4 4 4 4
38794+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
38795+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
38796+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
38797+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
38798+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
38799+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
38800+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
38801+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
38802+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
38803+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
38804+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38805+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38806+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38807+4 4 4 4 4 4
38808+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
38809+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
38810+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
38811+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
38812+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
38813+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
38814+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
38815+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
38816+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
38817+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
38818+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
38819+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38820+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38821+4 4 4 4 4 4
38822+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
38823+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
38824+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
38825+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
38826+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
38827+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
38828+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
38829+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
38830+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
38831+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
38832+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38833+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38834+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38835+4 4 4 4 4 4
38836+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
38837+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
38838+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
38839+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
38840+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
38841+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
38842+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38843+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
38844+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
38845+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
38846+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38847+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38848+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38849+4 4 4 4 4 4
38850+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
38851+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
38852+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
38853+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
38854+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
38855+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
38856+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
38857+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
38858+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
38859+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
38860+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38861+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38862+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38863+4 4 4 4 4 4
38864+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
38865+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
38866+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
38867+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
38868+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
38869+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
38870+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
38871+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
38872+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
38873+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
38874+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38875+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38876+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38877+4 4 4 4 4 4
38878+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
38879+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
38880+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
38881+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
38882+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
38883+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
38884+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
38885+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
38886+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
38887+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
38888+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38889+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38890+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38891+4 4 4 4 4 4
38892+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
38893+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
38894+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
38895+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
38896+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
38897+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
38898+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
38899+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
38900+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
38901+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
38902+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38903+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38904+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38905+4 4 4 4 4 4
38906+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
38907+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
38908+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
38909+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
38910+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
38911+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
38912+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
38913+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
38914+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
38915+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
38916+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38917+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38918+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38919+4 4 4 4 4 4
38920+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
38921+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
38922+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
38923+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
38924+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
38925+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
38926+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
38927+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
38928+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
38929+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38930+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38931+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38932+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38933+4 4 4 4 4 4
38934+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
38935+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
38936+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
38937+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
38938+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
38939+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
38940+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
38941+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
38942+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
38943+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38944+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38945+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38946+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38947+4 4 4 4 4 4
38948+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
38949+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
38950+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
38951+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
38952+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
38953+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
38954+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
38955+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
38956+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38957+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38958+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38959+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38960+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38961+4 4 4 4 4 4
38962+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
38963+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
38964+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
38965+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
38966+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
38967+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
38968+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
38969+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
38970+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38971+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38972+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38973+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38974+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38975+4 4 4 4 4 4
38976+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
38977+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
38978+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
38979+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
38980+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
38981+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
38982+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
38983+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
38984+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38985+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38986+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38987+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38988+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38989+4 4 4 4 4 4
38990+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
38991+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
38992+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
38993+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
38994+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
38995+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
38996+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
38997+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
38998+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
38999+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39000+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39001+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39002+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39003+4 4 4 4 4 4
39004+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39005+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
39006+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
39007+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
39008+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
39009+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
39010+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
39011+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
39012+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39013+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39014+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39015+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39016+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39017+4 4 4 4 4 4
39018+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39019+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
39020+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
39021+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
39022+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
39023+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
39024+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
39025+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39026+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39027+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39028+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39029+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39030+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39031+4 4 4 4 4 4
39032+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39033+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39034+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
39035+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
39036+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
39037+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
39038+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
39039+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
39040+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39041+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39042+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39043+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39044+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39045+4 4 4 4 4 4
39046+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39047+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39048+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39049+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
39050+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
39051+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
39052+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
39053+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39054+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39055+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39056+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39057+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39058+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39059+4 4 4 4 4 4
39060+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39061+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39062+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39063+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
39064+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
39065+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
39066+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
39067+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39068+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39069+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39070+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39071+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39072+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39073+4 4 4 4 4 4
39074+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39075+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39076+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39077+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
39078+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
39079+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
39080+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
39081+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39082+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39083+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39084+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39085+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39086+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39087+4 4 4 4 4 4
39088+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39089+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39090+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39091+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
39092+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
39093+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
39094+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
39095+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39096+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39097+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39098+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39099+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39100+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39101+4 4 4 4 4 4
39102+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39103+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39104+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39105+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
39106+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
39107+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
39108+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
39109+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39110+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39111+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39112+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39113+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39114+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39115+4 4 4 4 4 4
39116+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39117+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39118+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39119+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39120+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
39121+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
39122+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
39123+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39124+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39125+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39126+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39127+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39128+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39129+4 4 4 4 4 4
39130+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39131+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39132+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39133+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39134+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
39135+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
39136+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39137+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39138+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39139+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39140+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39141+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39142+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39143+4 4 4 4 4 4
39144+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39145+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39146+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39147+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39148+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
39149+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
39150+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39151+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39152+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39153+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39154+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39155+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39156+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39157+4 4 4 4 4 4
39158+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39159+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39160+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39161+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39162+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
39163+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
39164+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39165+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39166+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39167+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39168+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39169+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39170+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
39171+4 4 4 4 4 4
39172diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
39173index 3473e75..c930142 100644
39174--- a/drivers/video/udlfb.c
39175+++ b/drivers/video/udlfb.c
39176@@ -619,11 +619,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
39177 dlfb_urb_completion(urb);
39178
39179 error:
39180- atomic_add(bytes_sent, &dev->bytes_sent);
39181- atomic_add(bytes_identical, &dev->bytes_identical);
39182- atomic_add(width*height*2, &dev->bytes_rendered);
39183+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
39184+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
39185+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
39186 end_cycles = get_cycles();
39187- atomic_add(((unsigned int) ((end_cycles - start_cycles)
39188+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
39189 >> 10)), /* Kcycles */
39190 &dev->cpu_kcycles_used);
39191
39192@@ -744,11 +744,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
39193 dlfb_urb_completion(urb);
39194
39195 error:
39196- atomic_add(bytes_sent, &dev->bytes_sent);
39197- atomic_add(bytes_identical, &dev->bytes_identical);
39198- atomic_add(bytes_rendered, &dev->bytes_rendered);
39199+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
39200+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
39201+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
39202 end_cycles = get_cycles();
39203- atomic_add(((unsigned int) ((end_cycles - start_cycles)
39204+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
39205 >> 10)), /* Kcycles */
39206 &dev->cpu_kcycles_used);
39207 }
39208@@ -1368,7 +1368,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
39209 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39210 struct dlfb_data *dev = fb_info->par;
39211 return snprintf(buf, PAGE_SIZE, "%u\n",
39212- atomic_read(&dev->bytes_rendered));
39213+ atomic_read_unchecked(&dev->bytes_rendered));
39214 }
39215
39216 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
39217@@ -1376,7 +1376,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
39218 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39219 struct dlfb_data *dev = fb_info->par;
39220 return snprintf(buf, PAGE_SIZE, "%u\n",
39221- atomic_read(&dev->bytes_identical));
39222+ atomic_read_unchecked(&dev->bytes_identical));
39223 }
39224
39225 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
39226@@ -1384,7 +1384,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
39227 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39228 struct dlfb_data *dev = fb_info->par;
39229 return snprintf(buf, PAGE_SIZE, "%u\n",
39230- atomic_read(&dev->bytes_sent));
39231+ atomic_read_unchecked(&dev->bytes_sent));
39232 }
39233
39234 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
39235@@ -1392,7 +1392,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
39236 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39237 struct dlfb_data *dev = fb_info->par;
39238 return snprintf(buf, PAGE_SIZE, "%u\n",
39239- atomic_read(&dev->cpu_kcycles_used));
39240+ atomic_read_unchecked(&dev->cpu_kcycles_used));
39241 }
39242
39243 static ssize_t edid_show(
39244@@ -1449,10 +1449,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
39245 struct fb_info *fb_info = dev_get_drvdata(fbdev);
39246 struct dlfb_data *dev = fb_info->par;
39247
39248- atomic_set(&dev->bytes_rendered, 0);
39249- atomic_set(&dev->bytes_identical, 0);
39250- atomic_set(&dev->bytes_sent, 0);
39251- atomic_set(&dev->cpu_kcycles_used, 0);
39252+ atomic_set_unchecked(&dev->bytes_rendered, 0);
39253+ atomic_set_unchecked(&dev->bytes_identical, 0);
39254+ atomic_set_unchecked(&dev->bytes_sent, 0);
39255+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
39256
39257 return count;
39258 }
39259diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
39260index 7f8472c..9842e87 100644
39261--- a/drivers/video/uvesafb.c
39262+++ b/drivers/video/uvesafb.c
39263@@ -19,6 +19,7 @@
39264 #include <linux/io.h>
39265 #include <linux/mutex.h>
39266 #include <linux/slab.h>
39267+#include <linux/moduleloader.h>
39268 #include <video/edid.h>
39269 #include <video/uvesafb.h>
39270 #ifdef CONFIG_X86
39271@@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
39272 NULL,
39273 };
39274
39275- return call_usermodehelper(v86d_path, argv, envp, 1);
39276+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
39277 }
39278
39279 /*
39280@@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
39281 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
39282 par->pmi_setpal = par->ypan = 0;
39283 } else {
39284+
39285+#ifdef CONFIG_PAX_KERNEXEC
39286+#ifdef CONFIG_MODULES
39287+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
39288+#endif
39289+ if (!par->pmi_code) {
39290+ par->pmi_setpal = par->ypan = 0;
39291+ return 0;
39292+ }
39293+#endif
39294+
39295 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
39296 + task->t.regs.edi);
39297+
39298+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39299+ pax_open_kernel();
39300+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
39301+ pax_close_kernel();
39302+
39303+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
39304+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
39305+#else
39306 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
39307 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
39308+#endif
39309+
39310 printk(KERN_INFO "uvesafb: protected mode interface info at "
39311 "%04x:%04x\n",
39312 (u16)task->t.regs.es, (u16)task->t.regs.edi);
39313@@ -1821,6 +1844,11 @@ out:
39314 if (par->vbe_modes)
39315 kfree(par->vbe_modes);
39316
39317+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39318+ if (par->pmi_code)
39319+ module_free_exec(NULL, par->pmi_code);
39320+#endif
39321+
39322 framebuffer_release(info);
39323 return err;
39324 }
39325@@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platform_device *dev)
39326 kfree(par->vbe_state_orig);
39327 if (par->vbe_state_saved)
39328 kfree(par->vbe_state_saved);
39329+
39330+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39331+ if (par->pmi_code)
39332+ module_free_exec(NULL, par->pmi_code);
39333+#endif
39334+
39335 }
39336
39337 framebuffer_release(info);
39338diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
39339index 501b340..86bd4cf 100644
39340--- a/drivers/video/vesafb.c
39341+++ b/drivers/video/vesafb.c
39342@@ -9,6 +9,7 @@
39343 */
39344
39345 #include <linux/module.h>
39346+#include <linux/moduleloader.h>
39347 #include <linux/kernel.h>
39348 #include <linux/errno.h>
39349 #include <linux/string.h>
39350@@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
39351 static int vram_total __initdata; /* Set total amount of memory */
39352 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
39353 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
39354-static void (*pmi_start)(void) __read_mostly;
39355-static void (*pmi_pal) (void) __read_mostly;
39356+static void (*pmi_start)(void) __read_only;
39357+static void (*pmi_pal) (void) __read_only;
39358 static int depth __read_mostly;
39359 static int vga_compat __read_mostly;
39360 /* --------------------------------------------------------------------- */
39361@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
39362 unsigned int size_vmode;
39363 unsigned int size_remap;
39364 unsigned int size_total;
39365+ void *pmi_code = NULL;
39366
39367 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
39368 return -ENODEV;
39369@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
39370 size_remap = size_total;
39371 vesafb_fix.smem_len = size_remap;
39372
39373-#ifndef __i386__
39374- screen_info.vesapm_seg = 0;
39375-#endif
39376-
39377 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
39378 printk(KERN_WARNING
39379 "vesafb: cannot reserve video memory at 0x%lx\n",
39380@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
39381 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
39382 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
39383
39384+#ifdef __i386__
39385+
39386+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39387+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
39388+ if (!pmi_code)
39389+#elif !defined(CONFIG_PAX_KERNEXEC)
39390+ if (0)
39391+#endif
39392+
39393+#endif
39394+ screen_info.vesapm_seg = 0;
39395+
39396 if (screen_info.vesapm_seg) {
39397- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
39398- screen_info.vesapm_seg,screen_info.vesapm_off);
39399+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
39400+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
39401 }
39402
39403 if (screen_info.vesapm_seg < 0xc000)
39404@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
39405
39406 if (ypan || pmi_setpal) {
39407 unsigned short *pmi_base;
39408+
39409 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
39410- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
39411- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
39412+
39413+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39414+ pax_open_kernel();
39415+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
39416+#else
39417+ pmi_code = pmi_base;
39418+#endif
39419+
39420+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
39421+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
39422+
39423+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39424+ pmi_start = ktva_ktla(pmi_start);
39425+ pmi_pal = ktva_ktla(pmi_pal);
39426+ pax_close_kernel();
39427+#endif
39428+
39429 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
39430 if (pmi_base[3]) {
39431 printk(KERN_INFO "vesafb: pmi: ports = ");
39432@@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
39433 info->node, info->fix.id);
39434 return 0;
39435 err:
39436+
39437+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39438+ module_free_exec(NULL, pmi_code);
39439+#endif
39440+
39441 if (info->screen_base)
39442 iounmap(info->screen_base);
39443 framebuffer_release(info);
39444diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
39445index 88714ae..16c2e11 100644
39446--- a/drivers/video/via/via_clock.h
39447+++ b/drivers/video/via/via_clock.h
39448@@ -56,7 +56,7 @@ struct via_clock {
39449
39450 void (*set_engine_pll_state)(u8 state);
39451 void (*set_engine_pll)(struct via_pll_config config);
39452-};
39453+} __no_const;
39454
39455
39456 static inline u32 get_pll_internal_frequency(u32 ref_freq,
39457diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
39458index e56c934..fc22f4b 100644
39459--- a/drivers/xen/xen-pciback/conf_space.h
39460+++ b/drivers/xen/xen-pciback/conf_space.h
39461@@ -44,15 +44,15 @@ struct config_field {
39462 struct {
39463 conf_dword_write write;
39464 conf_dword_read read;
39465- } dw;
39466+ } __no_const dw;
39467 struct {
39468 conf_word_write write;
39469 conf_word_read read;
39470- } w;
39471+ } __no_const w;
39472 struct {
39473 conf_byte_write write;
39474 conf_byte_read read;
39475- } b;
39476+ } __no_const b;
39477 } u;
39478 struct list_head list;
39479 };
39480diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
39481index 879ed88..bc03a01 100644
39482--- a/fs/9p/vfs_inode.c
39483+++ b/fs/9p/vfs_inode.c
39484@@ -1286,7 +1286,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
39485 void
39486 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
39487 {
39488- char *s = nd_get_link(nd);
39489+ const char *s = nd_get_link(nd);
39490
39491 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
39492 IS_ERR(s) ? "<error>" : s);
39493diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
39494index 79e2ca7..5828ad1 100644
39495--- a/fs/Kconfig.binfmt
39496+++ b/fs/Kconfig.binfmt
39497@@ -86,7 +86,7 @@ config HAVE_AOUT
39498
39499 config BINFMT_AOUT
39500 tristate "Kernel support for a.out and ECOFF binaries"
39501- depends on HAVE_AOUT
39502+ depends on HAVE_AOUT && BROKEN
39503 ---help---
39504 A.out (Assembler.OUTput) is a set of formats for libraries and
39505 executables used in the earliest versions of UNIX. Linux used
39506diff --git a/fs/aio.c b/fs/aio.c
39507index 969beb0..09fab51 100644
39508--- a/fs/aio.c
39509+++ b/fs/aio.c
39510@@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx *ctx)
39511 size += sizeof(struct io_event) * nr_events;
39512 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
39513
39514- if (nr_pages < 0)
39515+ if (nr_pages <= 0)
39516 return -EINVAL;
39517
39518 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
39519@@ -1461,22 +1461,27 @@ static ssize_t aio_fsync(struct kiocb *iocb)
39520 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
39521 {
39522 ssize_t ret;
39523+ struct iovec iovstack;
39524
39525 #ifdef CONFIG_COMPAT
39526 if (compat)
39527 ret = compat_rw_copy_check_uvector(type,
39528 (struct compat_iovec __user *)kiocb->ki_buf,
39529- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
39530+ kiocb->ki_nbytes, 1, &iovstack,
39531 &kiocb->ki_iovec, 1);
39532 else
39533 #endif
39534 ret = rw_copy_check_uvector(type,
39535 (struct iovec __user *)kiocb->ki_buf,
39536- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
39537+ kiocb->ki_nbytes, 1, &iovstack,
39538 &kiocb->ki_iovec, 1);
39539 if (ret < 0)
39540 goto out;
39541
39542+ if (kiocb->ki_iovec == &iovstack) {
39543+ kiocb->ki_inline_vec = iovstack;
39544+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
39545+ }
39546 kiocb->ki_nr_segs = kiocb->ki_nbytes;
39547 kiocb->ki_cur_seg = 0;
39548 /* ki_nbytes/left now reflect bytes instead of segs */
39549diff --git a/fs/attr.c b/fs/attr.c
39550index 7ee7ba4..0c61a60 100644
39551--- a/fs/attr.c
39552+++ b/fs/attr.c
39553@@ -99,6 +99,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
39554 unsigned long limit;
39555
39556 limit = rlimit(RLIMIT_FSIZE);
39557+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
39558 if (limit != RLIM_INFINITY && offset > limit)
39559 goto out_sig;
39560 if (offset > inode->i_sb->s_maxbytes)
39561diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
39562index e1fbdee..cd5ea56 100644
39563--- a/fs/autofs4/waitq.c
39564+++ b/fs/autofs4/waitq.c
39565@@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
39566 {
39567 unsigned long sigpipe, flags;
39568 mm_segment_t fs;
39569- const char *data = (const char *)addr;
39570+ const char __user *data = (const char __force_user *)addr;
39571 ssize_t wr = 0;
39572
39573 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
39574diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
39575index 8342ca6..82fd192 100644
39576--- a/fs/befs/linuxvfs.c
39577+++ b/fs/befs/linuxvfs.c
39578@@ -503,7 +503,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
39579 {
39580 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
39581 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
39582- char *link = nd_get_link(nd);
39583+ const char *link = nd_get_link(nd);
39584 if (!IS_ERR(link))
39585 kfree(link);
39586 }
39587diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
39588index a6395bd..a5b24c4 100644
39589--- a/fs/binfmt_aout.c
39590+++ b/fs/binfmt_aout.c
39591@@ -16,6 +16,7 @@
39592 #include <linux/string.h>
39593 #include <linux/fs.h>
39594 #include <linux/file.h>
39595+#include <linux/security.h>
39596 #include <linux/stat.h>
39597 #include <linux/fcntl.h>
39598 #include <linux/ptrace.h>
39599@@ -86,6 +87,8 @@ static int aout_core_dump(struct coredump_params *cprm)
39600 #endif
39601 # define START_STACK(u) ((void __user *)u.start_stack)
39602
39603+ memset(&dump, 0, sizeof(dump));
39604+
39605 fs = get_fs();
39606 set_fs(KERNEL_DS);
39607 has_dumped = 1;
39608@@ -97,10 +100,12 @@ static int aout_core_dump(struct coredump_params *cprm)
39609
39610 /* If the size of the dump file exceeds the rlimit, then see what would happen
39611 if we wrote the stack, but not the data area. */
39612+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
39613 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
39614 dump.u_dsize = 0;
39615
39616 /* Make sure we have enough room to write the stack and data areas. */
39617+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
39618 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
39619 dump.u_ssize = 0;
39620
39621@@ -234,6 +239,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
39622 rlim = rlimit(RLIMIT_DATA);
39623 if (rlim >= RLIM_INFINITY)
39624 rlim = ~0;
39625+
39626+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
39627 if (ex.a_data + ex.a_bss > rlim)
39628 return -ENOMEM;
39629
39630@@ -262,6 +269,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
39631 install_exec_creds(bprm);
39632 current->flags &= ~PF_FORKNOEXEC;
39633
39634+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39635+ current->mm->pax_flags = 0UL;
39636+#endif
39637+
39638+#ifdef CONFIG_PAX_PAGEEXEC
39639+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
39640+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
39641+
39642+#ifdef CONFIG_PAX_EMUTRAMP
39643+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
39644+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
39645+#endif
39646+
39647+#ifdef CONFIG_PAX_MPROTECT
39648+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
39649+ current->mm->pax_flags |= MF_PAX_MPROTECT;
39650+#endif
39651+
39652+ }
39653+#endif
39654+
39655 if (N_MAGIC(ex) == OMAGIC) {
39656 unsigned long text_addr, map_size;
39657 loff_t pos;
39658@@ -334,7 +362,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
39659
39660 down_write(&current->mm->mmap_sem);
39661 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
39662- PROT_READ | PROT_WRITE | PROT_EXEC,
39663+ PROT_READ | PROT_WRITE,
39664 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
39665 fd_offset + ex.a_text);
39666 up_write(&current->mm->mmap_sem);
39667diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
39668index 21ac5ee..c1090ea 100644
39669--- a/fs/binfmt_elf.c
39670+++ b/fs/binfmt_elf.c
39671@@ -32,6 +32,7 @@
39672 #include <linux/elf.h>
39673 #include <linux/utsname.h>
39674 #include <linux/coredump.h>
39675+#include <linux/xattr.h>
39676 #include <asm/uaccess.h>
39677 #include <asm/param.h>
39678 #include <asm/page.h>
39679@@ -51,6 +52,10 @@ static int elf_core_dump(struct coredump_params *cprm);
39680 #define elf_core_dump NULL
39681 #endif
39682
39683+#ifdef CONFIG_PAX_MPROTECT
39684+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
39685+#endif
39686+
39687 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
39688 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
39689 #else
39690@@ -70,6 +75,11 @@ static struct linux_binfmt elf_format = {
39691 .load_binary = load_elf_binary,
39692 .load_shlib = load_elf_library,
39693 .core_dump = elf_core_dump,
39694+
39695+#ifdef CONFIG_PAX_MPROTECT
39696+ .handle_mprotect= elf_handle_mprotect,
39697+#endif
39698+
39699 .min_coredump = ELF_EXEC_PAGESIZE,
39700 };
39701
39702@@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
39703
39704 static int set_brk(unsigned long start, unsigned long end)
39705 {
39706+ unsigned long e = end;
39707+
39708 start = ELF_PAGEALIGN(start);
39709 end = ELF_PAGEALIGN(end);
39710 if (end > start) {
39711@@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
39712 if (BAD_ADDR(addr))
39713 return addr;
39714 }
39715- current->mm->start_brk = current->mm->brk = end;
39716+ current->mm->start_brk = current->mm->brk = e;
39717 return 0;
39718 }
39719
39720@@ -148,12 +160,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
39721 elf_addr_t __user *u_rand_bytes;
39722 const char *k_platform = ELF_PLATFORM;
39723 const char *k_base_platform = ELF_BASE_PLATFORM;
39724- unsigned char k_rand_bytes[16];
39725+ u32 k_rand_bytes[4];
39726 int items;
39727 elf_addr_t *elf_info;
39728 int ei_index = 0;
39729 const struct cred *cred = current_cred();
39730 struct vm_area_struct *vma;
39731+ unsigned long saved_auxv[AT_VECTOR_SIZE];
39732
39733 /*
39734 * In some cases (e.g. Hyper-Threading), we want to avoid L1
39735@@ -195,8 +208,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
39736 * Generate 16 random bytes for userspace PRNG seeding.
39737 */
39738 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
39739- u_rand_bytes = (elf_addr_t __user *)
39740- STACK_ALLOC(p, sizeof(k_rand_bytes));
39741+ srandom32(k_rand_bytes[0] ^ random32());
39742+ srandom32(k_rand_bytes[1] ^ random32());
39743+ srandom32(k_rand_bytes[2] ^ random32());
39744+ srandom32(k_rand_bytes[3] ^ random32());
39745+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
39746+ u_rand_bytes = (elf_addr_t __user *) p;
39747 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
39748 return -EFAULT;
39749
39750@@ -308,9 +325,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
39751 return -EFAULT;
39752 current->mm->env_end = p;
39753
39754+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
39755+
39756 /* Put the elf_info on the stack in the right place. */
39757 sp = (elf_addr_t __user *)envp + 1;
39758- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
39759+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
39760 return -EFAULT;
39761 return 0;
39762 }
39763@@ -381,10 +400,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
39764 {
39765 struct elf_phdr *elf_phdata;
39766 struct elf_phdr *eppnt;
39767- unsigned long load_addr = 0;
39768+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
39769 int load_addr_set = 0;
39770 unsigned long last_bss = 0, elf_bss = 0;
39771- unsigned long error = ~0UL;
39772+ unsigned long error = -EINVAL;
39773 unsigned long total_size;
39774 int retval, i, size;
39775
39776@@ -430,6 +449,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
39777 goto out_close;
39778 }
39779
39780+#ifdef CONFIG_PAX_SEGMEXEC
39781+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
39782+ pax_task_size = SEGMEXEC_TASK_SIZE;
39783+#endif
39784+
39785 eppnt = elf_phdata;
39786 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
39787 if (eppnt->p_type == PT_LOAD) {
39788@@ -473,8 +497,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
39789 k = load_addr + eppnt->p_vaddr;
39790 if (BAD_ADDR(k) ||
39791 eppnt->p_filesz > eppnt->p_memsz ||
39792- eppnt->p_memsz > TASK_SIZE ||
39793- TASK_SIZE - eppnt->p_memsz < k) {
39794+ eppnt->p_memsz > pax_task_size ||
39795+ pax_task_size - eppnt->p_memsz < k) {
39796 error = -ENOMEM;
39797 goto out_close;
39798 }
39799@@ -528,6 +552,348 @@ out:
39800 return error;
39801 }
39802
39803+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
39804+{
39805+ unsigned long pax_flags = 0UL;
39806+
39807+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39808+
39809+#ifdef CONFIG_PAX_PAGEEXEC
39810+ if (elf_phdata->p_flags & PF_PAGEEXEC)
39811+ pax_flags |= MF_PAX_PAGEEXEC;
39812+#endif
39813+
39814+#ifdef CONFIG_PAX_SEGMEXEC
39815+ if (elf_phdata->p_flags & PF_SEGMEXEC)
39816+ pax_flags |= MF_PAX_SEGMEXEC;
39817+#endif
39818+
39819+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39820+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39821+ if ((__supported_pte_mask & _PAGE_NX))
39822+ pax_flags &= ~MF_PAX_SEGMEXEC;
39823+ else
39824+ pax_flags &= ~MF_PAX_PAGEEXEC;
39825+ }
39826+#endif
39827+
39828+#ifdef CONFIG_PAX_EMUTRAMP
39829+ if (elf_phdata->p_flags & PF_EMUTRAMP)
39830+ pax_flags |= MF_PAX_EMUTRAMP;
39831+#endif
39832+
39833+#ifdef CONFIG_PAX_MPROTECT
39834+ if (elf_phdata->p_flags & PF_MPROTECT)
39835+ pax_flags |= MF_PAX_MPROTECT;
39836+#endif
39837+
39838+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39839+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
39840+ pax_flags |= MF_PAX_RANDMMAP;
39841+#endif
39842+
39843+#endif
39844+
39845+ return pax_flags;
39846+}
39847+
39848+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
39849+{
39850+ unsigned long pax_flags = 0UL;
39851+
39852+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39853+
39854+#ifdef CONFIG_PAX_PAGEEXEC
39855+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
39856+ pax_flags |= MF_PAX_PAGEEXEC;
39857+#endif
39858+
39859+#ifdef CONFIG_PAX_SEGMEXEC
39860+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
39861+ pax_flags |= MF_PAX_SEGMEXEC;
39862+#endif
39863+
39864+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39865+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39866+ if ((__supported_pte_mask & _PAGE_NX))
39867+ pax_flags &= ~MF_PAX_SEGMEXEC;
39868+ else
39869+ pax_flags &= ~MF_PAX_PAGEEXEC;
39870+ }
39871+#endif
39872+
39873+#ifdef CONFIG_PAX_EMUTRAMP
39874+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
39875+ pax_flags |= MF_PAX_EMUTRAMP;
39876+#endif
39877+
39878+#ifdef CONFIG_PAX_MPROTECT
39879+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
39880+ pax_flags |= MF_PAX_MPROTECT;
39881+#endif
39882+
39883+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39884+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
39885+ pax_flags |= MF_PAX_RANDMMAP;
39886+#endif
39887+
39888+#endif
39889+
39890+ return pax_flags;
39891+}
39892+
39893+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
39894+{
39895+ unsigned long pax_flags = 0UL;
39896+
39897+#ifdef CONFIG_PAX_EI_PAX
39898+
39899+#ifdef CONFIG_PAX_PAGEEXEC
39900+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
39901+ pax_flags |= MF_PAX_PAGEEXEC;
39902+#endif
39903+
39904+#ifdef CONFIG_PAX_SEGMEXEC
39905+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
39906+ pax_flags |= MF_PAX_SEGMEXEC;
39907+#endif
39908+
39909+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39910+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39911+ if ((__supported_pte_mask & _PAGE_NX))
39912+ pax_flags &= ~MF_PAX_SEGMEXEC;
39913+ else
39914+ pax_flags &= ~MF_PAX_PAGEEXEC;
39915+ }
39916+#endif
39917+
39918+#ifdef CONFIG_PAX_EMUTRAMP
39919+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
39920+ pax_flags |= MF_PAX_EMUTRAMP;
39921+#endif
39922+
39923+#ifdef CONFIG_PAX_MPROTECT
39924+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
39925+ pax_flags |= MF_PAX_MPROTECT;
39926+#endif
39927+
39928+#ifdef CONFIG_PAX_ASLR
39929+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
39930+ pax_flags |= MF_PAX_RANDMMAP;
39931+#endif
39932+
39933+#else
39934+
39935+#ifdef CONFIG_PAX_PAGEEXEC
39936+ pax_flags |= MF_PAX_PAGEEXEC;
39937+#endif
39938+
39939+#ifdef CONFIG_PAX_MPROTECT
39940+ pax_flags |= MF_PAX_MPROTECT;
39941+#endif
39942+
39943+#ifdef CONFIG_PAX_RANDMMAP
39944+ pax_flags |= MF_PAX_RANDMMAP;
39945+#endif
39946+
39947+#ifdef CONFIG_PAX_SEGMEXEC
39948+ if (!(__supported_pte_mask & _PAGE_NX)) {
39949+ pax_flags &= ~MF_PAX_PAGEEXEC;
39950+ pax_flags |= MF_PAX_SEGMEXEC;
39951+ }
39952+#endif
39953+
39954+#endif
39955+
39956+ return pax_flags;
39957+}
39958+
39959+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
39960+{
39961+
39962+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39963+ unsigned long i;
39964+
39965+ for (i = 0UL; i < elf_ex->e_phnum; i++)
39966+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
39967+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
39968+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
39969+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
39970+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
39971+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
39972+ return ~0UL;
39973+
39974+#ifdef CONFIG_PAX_SOFTMODE
39975+ if (pax_softmode)
39976+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
39977+ else
39978+#endif
39979+
39980+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
39981+ break;
39982+ }
39983+#endif
39984+
39985+ return ~0UL;
39986+}
39987+
39988+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
39989+{
39990+ unsigned long pax_flags = 0UL;
39991+
39992+#ifdef CONFIG_PAX_PAGEEXEC
39993+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
39994+ pax_flags |= MF_PAX_PAGEEXEC;
39995+#endif
39996+
39997+#ifdef CONFIG_PAX_SEGMEXEC
39998+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
39999+ pax_flags |= MF_PAX_SEGMEXEC;
40000+#endif
40001+
40002+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
40003+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40004+ if ((__supported_pte_mask & _PAGE_NX))
40005+ pax_flags &= ~MF_PAX_SEGMEXEC;
40006+ else
40007+ pax_flags &= ~MF_PAX_PAGEEXEC;
40008+ }
40009+#endif
40010+
40011+#ifdef CONFIG_PAX_EMUTRAMP
40012+ if (pax_flags_softmode & MF_PAX_EMUTRAMP)
40013+ pax_flags |= MF_PAX_EMUTRAMP;
40014+#endif
40015+
40016+#ifdef CONFIG_PAX_MPROTECT
40017+ if (pax_flags_softmode & MF_PAX_MPROTECT)
40018+ pax_flags |= MF_PAX_MPROTECT;
40019+#endif
40020+
40021+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
40022+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
40023+ pax_flags |= MF_PAX_RANDMMAP;
40024+#endif
40025+
40026+ return pax_flags;
40027+}
40028+
40029+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
40030+{
40031+ unsigned long pax_flags = 0UL;
40032+
40033+#ifdef CONFIG_PAX_PAGEEXEC
40034+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
40035+ pax_flags |= MF_PAX_PAGEEXEC;
40036+#endif
40037+
40038+#ifdef CONFIG_PAX_SEGMEXEC
40039+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
40040+ pax_flags |= MF_PAX_SEGMEXEC;
40041+#endif
40042+
40043+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
40044+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40045+ if ((__supported_pte_mask & _PAGE_NX))
40046+ pax_flags &= ~MF_PAX_SEGMEXEC;
40047+ else
40048+ pax_flags &= ~MF_PAX_PAGEEXEC;
40049+ }
40050+#endif
40051+
40052+#ifdef CONFIG_PAX_EMUTRAMP
40053+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
40054+ pax_flags |= MF_PAX_EMUTRAMP;
40055+#endif
40056+
40057+#ifdef CONFIG_PAX_MPROTECT
40058+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
40059+ pax_flags |= MF_PAX_MPROTECT;
40060+#endif
40061+
40062+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
40063+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
40064+ pax_flags |= MF_PAX_RANDMMAP;
40065+#endif
40066+
40067+ return pax_flags;
40068+}
40069+
40070+static unsigned long pax_parse_xattr_pax(struct file * const file)
40071+{
40072+
40073+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
40074+ ssize_t xattr_size, i;
40075+ unsigned char xattr_value[5];
40076+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
40077+
40078+ xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
40079+ if (xattr_size <= 0)
40080+ return ~0UL;
40081+
40082+ for (i = 0; i < xattr_size; i++)
40083+ switch (xattr_value[i]) {
40084+ default:
40085+ return ~0UL;
40086+
40087+#define parse_flag(option1, option2, flag) \
40088+ case option1: \
40089+ pax_flags_hardmode |= MF_PAX_##flag; \
40090+ break; \
40091+ case option2: \
40092+ pax_flags_softmode |= MF_PAX_##flag; \
40093+ break;
40094+
40095+ parse_flag('p', 'P', PAGEEXEC);
40096+ parse_flag('e', 'E', EMUTRAMP);
40097+ parse_flag('m', 'M', MPROTECT);
40098+ parse_flag('r', 'R', RANDMMAP);
40099+ parse_flag('s', 'S', SEGMEXEC);
40100+
40101+#undef parse_flag
40102+ }
40103+
40104+ if (pax_flags_hardmode & pax_flags_softmode)
40105+ return ~0UL;
40106+
40107+#ifdef CONFIG_PAX_SOFTMODE
40108+ if (pax_softmode)
40109+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
40110+ else
40111+#endif
40112+
40113+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
40114+#else
40115+ return ~0UL;
40116+#endif
40117+}
40118+
40119+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
40120+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
40121+{
40122+ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
40123+
40124+ pax_flags = pax_parse_ei_pax(elf_ex);
40125+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
40126+ xattr_pax_flags = pax_parse_xattr_pax(file);
40127+
40128+ if (pt_pax_flags == ~0UL)
40129+ pt_pax_flags = xattr_pax_flags;
40130+ else if (xattr_pax_flags == ~0UL)
40131+ xattr_pax_flags = pt_pax_flags;
40132+ if (pt_pax_flags != xattr_pax_flags)
40133+ return -EINVAL;
40134+ if (pt_pax_flags != ~0UL)
40135+ pax_flags = pt_pax_flags;
40136+
40137+ if (0 > pax_check_flags(&pax_flags))
40138+ return -EINVAL;
40139+
40140+ current->mm->pax_flags = pax_flags;
40141+ return 0;
40142+}
40143+#endif
40144+
40145 /*
40146 * These are the functions used to load ELF style executables and shared
40147 * libraries. There is no binary dependent code anywhere else.
40148@@ -544,6 +910,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
40149 {
40150 unsigned int random_variable = 0;
40151
40152+#ifdef CONFIG_PAX_RANDUSTACK
40153+ if (randomize_va_space)
40154+ return stack_top - current->mm->delta_stack;
40155+#endif
40156+
40157 if ((current->flags & PF_RANDOMIZE) &&
40158 !(current->personality & ADDR_NO_RANDOMIZE)) {
40159 random_variable = get_random_int() & STACK_RND_MASK;
40160@@ -562,7 +933,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40161 unsigned long load_addr = 0, load_bias = 0;
40162 int load_addr_set = 0;
40163 char * elf_interpreter = NULL;
40164- unsigned long error;
40165+ unsigned long error = 0;
40166 struct elf_phdr *elf_ppnt, *elf_phdata;
40167 unsigned long elf_bss, elf_brk;
40168 int retval, i;
40169@@ -572,11 +943,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40170 unsigned long start_code, end_code, start_data, end_data;
40171 unsigned long reloc_func_desc __maybe_unused = 0;
40172 int executable_stack = EXSTACK_DEFAULT;
40173- unsigned long def_flags = 0;
40174 struct {
40175 struct elfhdr elf_ex;
40176 struct elfhdr interp_elf_ex;
40177 } *loc;
40178+ unsigned long pax_task_size = TASK_SIZE;
40179
40180 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
40181 if (!loc) {
40182@@ -713,11 +1084,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40183
40184 /* OK, This is the point of no return */
40185 current->flags &= ~PF_FORKNOEXEC;
40186- current->mm->def_flags = def_flags;
40187+
40188+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
40189+ current->mm->pax_flags = 0UL;
40190+#endif
40191+
40192+#ifdef CONFIG_PAX_DLRESOLVE
40193+ current->mm->call_dl_resolve = 0UL;
40194+#endif
40195+
40196+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
40197+ current->mm->call_syscall = 0UL;
40198+#endif
40199+
40200+#ifdef CONFIG_PAX_ASLR
40201+ current->mm->delta_mmap = 0UL;
40202+ current->mm->delta_stack = 0UL;
40203+#endif
40204+
40205+ current->mm->def_flags = 0;
40206+
40207+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
40208+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
40209+ send_sig(SIGKILL, current, 0);
40210+ goto out_free_dentry;
40211+ }
40212+#endif
40213+
40214+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
40215+ pax_set_initial_flags(bprm);
40216+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
40217+ if (pax_set_initial_flags_func)
40218+ (pax_set_initial_flags_func)(bprm);
40219+#endif
40220+
40221+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
40222+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
40223+ current->mm->context.user_cs_limit = PAGE_SIZE;
40224+ current->mm->def_flags |= VM_PAGEEXEC;
40225+ }
40226+#endif
40227+
40228+#ifdef CONFIG_PAX_SEGMEXEC
40229+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
40230+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
40231+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
40232+ pax_task_size = SEGMEXEC_TASK_SIZE;
40233+ current->mm->def_flags |= VM_NOHUGEPAGE;
40234+ }
40235+#endif
40236+
40237+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
40238+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40239+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
40240+ put_cpu();
40241+ }
40242+#endif
40243
40244 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
40245 may depend on the personality. */
40246 SET_PERSONALITY(loc->elf_ex);
40247+
40248+#ifdef CONFIG_PAX_ASLR
40249+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
40250+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
40251+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
40252+ }
40253+#endif
40254+
40255+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
40256+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40257+ executable_stack = EXSTACK_DISABLE_X;
40258+ current->personality &= ~READ_IMPLIES_EXEC;
40259+ } else
40260+#endif
40261+
40262 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
40263 current->personality |= READ_IMPLIES_EXEC;
40264
40265@@ -808,6 +1249,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40266 #else
40267 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
40268 #endif
40269+
40270+#ifdef CONFIG_PAX_RANDMMAP
40271+ /* PaX: randomize base address at the default exe base if requested */
40272+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
40273+#ifdef CONFIG_SPARC64
40274+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
40275+#else
40276+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
40277+#endif
40278+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
40279+ elf_flags |= MAP_FIXED;
40280+ }
40281+#endif
40282+
40283 }
40284
40285 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
40286@@ -840,9 +1295,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40287 * allowed task size. Note that p_filesz must always be
40288 * <= p_memsz so it is only necessary to check p_memsz.
40289 */
40290- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
40291- elf_ppnt->p_memsz > TASK_SIZE ||
40292- TASK_SIZE - elf_ppnt->p_memsz < k) {
40293+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
40294+ elf_ppnt->p_memsz > pax_task_size ||
40295+ pax_task_size - elf_ppnt->p_memsz < k) {
40296 /* set_brk can never work. Avoid overflows. */
40297 send_sig(SIGKILL, current, 0);
40298 retval = -EINVAL;
40299@@ -870,6 +1325,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40300 start_data += load_bias;
40301 end_data += load_bias;
40302
40303+#ifdef CONFIG_PAX_RANDMMAP
40304+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
40305+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
40306+#endif
40307+
40308 /* Calling set_brk effectively mmaps the pages that we need
40309 * for the bss and break sections. We must do this before
40310 * mapping in the interpreter, to make sure it doesn't wind
40311@@ -881,9 +1341,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
40312 goto out_free_dentry;
40313 }
40314 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
40315- send_sig(SIGSEGV, current, 0);
40316- retval = -EFAULT; /* Nobody gets to see this, but.. */
40317- goto out_free_dentry;
40318+ /*
40319+ * This bss-zeroing can fail if the ELF
40320+ * file specifies odd protections. So
40321+ * we don't check the return value
40322+ */
40323 }
40324
40325 if (elf_interpreter) {
40326@@ -1098,7 +1560,7 @@ out:
40327 * Decide what to dump of a segment, part, all or none.
40328 */
40329 static unsigned long vma_dump_size(struct vm_area_struct *vma,
40330- unsigned long mm_flags)
40331+ unsigned long mm_flags, long signr)
40332 {
40333 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
40334
40335@@ -1132,7 +1594,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
40336 if (vma->vm_file == NULL)
40337 return 0;
40338
40339- if (FILTER(MAPPED_PRIVATE))
40340+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
40341 goto whole;
40342
40343 /*
40344@@ -1354,9 +1816,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
40345 {
40346 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
40347 int i = 0;
40348- do
40349+ do {
40350 i += 2;
40351- while (auxv[i - 2] != AT_NULL);
40352+ } while (auxv[i - 2] != AT_NULL);
40353 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
40354 }
40355
40356@@ -1862,14 +2324,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
40357 }
40358
40359 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
40360- unsigned long mm_flags)
40361+ struct coredump_params *cprm)
40362 {
40363 struct vm_area_struct *vma;
40364 size_t size = 0;
40365
40366 for (vma = first_vma(current, gate_vma); vma != NULL;
40367 vma = next_vma(vma, gate_vma))
40368- size += vma_dump_size(vma, mm_flags);
40369+ size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40370 return size;
40371 }
40372
40373@@ -1963,7 +2425,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40374
40375 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
40376
40377- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
40378+ offset += elf_core_vma_data_size(gate_vma, cprm);
40379 offset += elf_core_extra_data_size();
40380 e_shoff = offset;
40381
40382@@ -1977,10 +2439,12 @@ static int elf_core_dump(struct coredump_params *cprm)
40383 offset = dataoff;
40384
40385 size += sizeof(*elf);
40386+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
40387 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
40388 goto end_coredump;
40389
40390 size += sizeof(*phdr4note);
40391+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
40392 if (size > cprm->limit
40393 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
40394 goto end_coredump;
40395@@ -1994,7 +2458,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40396 phdr.p_offset = offset;
40397 phdr.p_vaddr = vma->vm_start;
40398 phdr.p_paddr = 0;
40399- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
40400+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40401 phdr.p_memsz = vma->vm_end - vma->vm_start;
40402 offset += phdr.p_filesz;
40403 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
40404@@ -2005,6 +2469,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40405 phdr.p_align = ELF_EXEC_PAGESIZE;
40406
40407 size += sizeof(phdr);
40408+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
40409 if (size > cprm->limit
40410 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
40411 goto end_coredump;
40412@@ -2029,7 +2494,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40413 unsigned long addr;
40414 unsigned long end;
40415
40416- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
40417+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
40418
40419 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
40420 struct page *page;
40421@@ -2038,6 +2503,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40422 page = get_dump_page(addr);
40423 if (page) {
40424 void *kaddr = kmap(page);
40425+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
40426 stop = ((size += PAGE_SIZE) > cprm->limit) ||
40427 !dump_write(cprm->file, kaddr,
40428 PAGE_SIZE);
40429@@ -2055,6 +2521,7 @@ static int elf_core_dump(struct coredump_params *cprm)
40430
40431 if (e_phnum == PN_XNUM) {
40432 size += sizeof(*shdr4extnum);
40433+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
40434 if (size > cprm->limit
40435 || !dump_write(cprm->file, shdr4extnum,
40436 sizeof(*shdr4extnum)))
40437@@ -2075,6 +2542,97 @@ out:
40438
40439 #endif /* CONFIG_ELF_CORE */
40440
40441+#ifdef CONFIG_PAX_MPROTECT
40442+/* PaX: non-PIC ELF libraries need relocations on their executable segments
40443+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
40444+ * we'll remove VM_MAYWRITE for good on RELRO segments.
40445+ *
40446+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
40447+ * basis because we want to allow the common case and not the special ones.
40448+ */
40449+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
40450+{
40451+ struct elfhdr elf_h;
40452+ struct elf_phdr elf_p;
40453+ unsigned long i;
40454+ unsigned long oldflags;
40455+ bool is_textrel_rw, is_textrel_rx, is_relro;
40456+
40457+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
40458+ return;
40459+
40460+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
40461+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
40462+
40463+#ifdef CONFIG_PAX_ELFRELOCS
40464+ /* possible TEXTREL */
40465+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
40466+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
40467+#else
40468+ is_textrel_rw = false;
40469+ is_textrel_rx = false;
40470+#endif
40471+
40472+ /* possible RELRO */
40473+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
40474+
40475+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
40476+ return;
40477+
40478+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
40479+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
40480+
40481+#ifdef CONFIG_PAX_ETEXECRELOCS
40482+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
40483+#else
40484+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
40485+#endif
40486+
40487+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
40488+ !elf_check_arch(&elf_h) ||
40489+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
40490+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
40491+ return;
40492+
40493+ for (i = 0UL; i < elf_h.e_phnum; i++) {
40494+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
40495+ return;
40496+ switch (elf_p.p_type) {
40497+ case PT_DYNAMIC:
40498+ if (!is_textrel_rw && !is_textrel_rx)
40499+ continue;
40500+ i = 0UL;
40501+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
40502+ elf_dyn dyn;
40503+
40504+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
40505+ return;
40506+ if (dyn.d_tag == DT_NULL)
40507+ return;
40508+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
40509+ gr_log_textrel(vma);
40510+ if (is_textrel_rw)
40511+ vma->vm_flags |= VM_MAYWRITE;
40512+ else
40513+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
40514+ vma->vm_flags &= ~VM_MAYWRITE;
40515+ return;
40516+ }
40517+ i++;
40518+ }
40519+ return;
40520+
40521+ case PT_GNU_RELRO:
40522+ if (!is_relro)
40523+ continue;
40524+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
40525+ vma->vm_flags &= ~VM_MAYWRITE;
40526+ return;
40527+ }
40528+ }
40529+}
40530+#endif
40531+
40532 static int __init init_elf_binfmt(void)
40533 {
40534 return register_binfmt(&elf_format);
40535diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
40536index 1bffbe0..c8c283e 100644
40537--- a/fs/binfmt_flat.c
40538+++ b/fs/binfmt_flat.c
40539@@ -567,7 +567,9 @@ static int load_flat_file(struct linux_binprm * bprm,
40540 realdatastart = (unsigned long) -ENOMEM;
40541 printk("Unable to allocate RAM for process data, errno %d\n",
40542 (int)-realdatastart);
40543+ down_write(&current->mm->mmap_sem);
40544 do_munmap(current->mm, textpos, text_len);
40545+ up_write(&current->mm->mmap_sem);
40546 ret = realdatastart;
40547 goto err;
40548 }
40549@@ -591,8 +593,10 @@ static int load_flat_file(struct linux_binprm * bprm,
40550 }
40551 if (IS_ERR_VALUE(result)) {
40552 printk("Unable to read data+bss, errno %d\n", (int)-result);
40553+ down_write(&current->mm->mmap_sem);
40554 do_munmap(current->mm, textpos, text_len);
40555 do_munmap(current->mm, realdatastart, len);
40556+ up_write(&current->mm->mmap_sem);
40557 ret = result;
40558 goto err;
40559 }
40560@@ -661,8 +665,10 @@ static int load_flat_file(struct linux_binprm * bprm,
40561 }
40562 if (IS_ERR_VALUE(result)) {
40563 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
40564+ down_write(&current->mm->mmap_sem);
40565 do_munmap(current->mm, textpos, text_len + data_len + extra +
40566 MAX_SHARED_LIBS * sizeof(unsigned long));
40567+ up_write(&current->mm->mmap_sem);
40568 ret = result;
40569 goto err;
40570 }
40571diff --git a/fs/bio.c b/fs/bio.c
40572index b1fe82c..84da0a9 100644
40573--- a/fs/bio.c
40574+++ b/fs/bio.c
40575@@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
40576 const int read = bio_data_dir(bio) == READ;
40577 struct bio_map_data *bmd = bio->bi_private;
40578 int i;
40579- char *p = bmd->sgvecs[0].iov_base;
40580+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
40581
40582 __bio_for_each_segment(bvec, bio, i, 0) {
40583 char *addr = page_address(bvec->bv_page);
40584diff --git a/fs/block_dev.c b/fs/block_dev.c
40585index b07f1da..9efcb92 100644
40586--- a/fs/block_dev.c
40587+++ b/fs/block_dev.c
40588@@ -681,7 +681,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
40589 else if (bdev->bd_contains == bdev)
40590 return true; /* is a whole device which isn't held */
40591
40592- else if (whole->bd_holder == bd_may_claim)
40593+ else if (whole->bd_holder == (void *)bd_may_claim)
40594 return true; /* is a partition of a device that is being partitioned */
40595 else if (whole->bd_holder != NULL)
40596 return false; /* is a partition of a held device */
40597diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
40598index dede441..f2a2507 100644
40599--- a/fs/btrfs/ctree.c
40600+++ b/fs/btrfs/ctree.c
40601@@ -488,9 +488,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
40602 free_extent_buffer(buf);
40603 add_root_to_dirty_list(root);
40604 } else {
40605- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
40606- parent_start = parent->start;
40607- else
40608+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
40609+ if (parent)
40610+ parent_start = parent->start;
40611+ else
40612+ parent_start = 0;
40613+ } else
40614 parent_start = 0;
40615
40616 WARN_ON(trans->transid != btrfs_header_generation(parent));
40617diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
40618index fd1a06d..6e9033d 100644
40619--- a/fs/btrfs/inode.c
40620+++ b/fs/btrfs/inode.c
40621@@ -6895,7 +6895,7 @@ fail:
40622 return -ENOMEM;
40623 }
40624
40625-static int btrfs_getattr(struct vfsmount *mnt,
40626+int btrfs_getattr(struct vfsmount *mnt,
40627 struct dentry *dentry, struct kstat *stat)
40628 {
40629 struct inode *inode = dentry->d_inode;
40630@@ -6909,6 +6909,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
40631 return 0;
40632 }
40633
40634+EXPORT_SYMBOL(btrfs_getattr);
40635+
40636+dev_t get_btrfs_dev_from_inode(struct inode *inode)
40637+{
40638+ return BTRFS_I(inode)->root->anon_dev;
40639+}
40640+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
40641+
40642 /*
40643 * If a file is moved, it will inherit the cow and compression flags of the new
40644 * directory.
40645diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
40646index c04f02c..f5c9e2e 100644
40647--- a/fs/btrfs/ioctl.c
40648+++ b/fs/btrfs/ioctl.c
40649@@ -2733,9 +2733,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
40650 for (i = 0; i < num_types; i++) {
40651 struct btrfs_space_info *tmp;
40652
40653+ /* Don't copy in more than we allocated */
40654 if (!slot_count)
40655 break;
40656
40657+ slot_count--;
40658+
40659 info = NULL;
40660 rcu_read_lock();
40661 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
40662@@ -2757,15 +2760,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
40663 memcpy(dest, &space, sizeof(space));
40664 dest++;
40665 space_args.total_spaces++;
40666- slot_count--;
40667 }
40668- if (!slot_count)
40669- break;
40670 }
40671 up_read(&info->groups_sem);
40672 }
40673
40674- user_dest = (struct btrfs_ioctl_space_info *)
40675+ user_dest = (struct btrfs_ioctl_space_info __user *)
40676 (arg + sizeof(struct btrfs_ioctl_space_args));
40677
40678 if (copy_to_user(user_dest, dest_orig, alloc_size))
40679diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
40680index cfb5543..1ae7347 100644
40681--- a/fs/btrfs/relocation.c
40682+++ b/fs/btrfs/relocation.c
40683@@ -1244,7 +1244,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
40684 }
40685 spin_unlock(&rc->reloc_root_tree.lock);
40686
40687- BUG_ON((struct btrfs_root *)node->data != root);
40688+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
40689
40690 if (!del) {
40691 spin_lock(&rc->reloc_root_tree.lock);
40692diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
40693index 622f469..e8d2d55 100644
40694--- a/fs/cachefiles/bind.c
40695+++ b/fs/cachefiles/bind.c
40696@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
40697 args);
40698
40699 /* start by checking things over */
40700- ASSERT(cache->fstop_percent >= 0 &&
40701- cache->fstop_percent < cache->fcull_percent &&
40702+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
40703 cache->fcull_percent < cache->frun_percent &&
40704 cache->frun_percent < 100);
40705
40706- ASSERT(cache->bstop_percent >= 0 &&
40707- cache->bstop_percent < cache->bcull_percent &&
40708+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
40709 cache->bcull_percent < cache->brun_percent &&
40710 cache->brun_percent < 100);
40711
40712diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
40713index 0a1467b..6a53245 100644
40714--- a/fs/cachefiles/daemon.c
40715+++ b/fs/cachefiles/daemon.c
40716@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
40717 if (n > buflen)
40718 return -EMSGSIZE;
40719
40720- if (copy_to_user(_buffer, buffer, n) != 0)
40721+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
40722 return -EFAULT;
40723
40724 return n;
40725@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
40726 if (test_bit(CACHEFILES_DEAD, &cache->flags))
40727 return -EIO;
40728
40729- if (datalen < 0 || datalen > PAGE_SIZE - 1)
40730+ if (datalen > PAGE_SIZE - 1)
40731 return -EOPNOTSUPP;
40732
40733 /* drag the command string into the kernel so we can parse it */
40734@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
40735 if (args[0] != '%' || args[1] != '\0')
40736 return -EINVAL;
40737
40738- if (fstop < 0 || fstop >= cache->fcull_percent)
40739+ if (fstop >= cache->fcull_percent)
40740 return cachefiles_daemon_range_error(cache, args);
40741
40742 cache->fstop_percent = fstop;
40743@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
40744 if (args[0] != '%' || args[1] != '\0')
40745 return -EINVAL;
40746
40747- if (bstop < 0 || bstop >= cache->bcull_percent)
40748+ if (bstop >= cache->bcull_percent)
40749 return cachefiles_daemon_range_error(cache, args);
40750
40751 cache->bstop_percent = bstop;
40752diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
40753index bd6bc1b..b627b53 100644
40754--- a/fs/cachefiles/internal.h
40755+++ b/fs/cachefiles/internal.h
40756@@ -57,7 +57,7 @@ struct cachefiles_cache {
40757 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
40758 struct rb_root active_nodes; /* active nodes (can't be culled) */
40759 rwlock_t active_lock; /* lock for active_nodes */
40760- atomic_t gravecounter; /* graveyard uniquifier */
40761+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
40762 unsigned frun_percent; /* when to stop culling (% files) */
40763 unsigned fcull_percent; /* when to start culling (% files) */
40764 unsigned fstop_percent; /* when to stop allocating (% files) */
40765@@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
40766 * proc.c
40767 */
40768 #ifdef CONFIG_CACHEFILES_HISTOGRAM
40769-extern atomic_t cachefiles_lookup_histogram[HZ];
40770-extern atomic_t cachefiles_mkdir_histogram[HZ];
40771-extern atomic_t cachefiles_create_histogram[HZ];
40772+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40773+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40774+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
40775
40776 extern int __init cachefiles_proc_init(void);
40777 extern void cachefiles_proc_cleanup(void);
40778 static inline
40779-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
40780+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
40781 {
40782 unsigned long jif = jiffies - start_jif;
40783 if (jif >= HZ)
40784 jif = HZ - 1;
40785- atomic_inc(&histogram[jif]);
40786+ atomic_inc_unchecked(&histogram[jif]);
40787 }
40788
40789 #else
40790diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
40791index a0358c2..d6137f2 100644
40792--- a/fs/cachefiles/namei.c
40793+++ b/fs/cachefiles/namei.c
40794@@ -318,7 +318,7 @@ try_again:
40795 /* first step is to make up a grave dentry in the graveyard */
40796 sprintf(nbuffer, "%08x%08x",
40797 (uint32_t) get_seconds(),
40798- (uint32_t) atomic_inc_return(&cache->gravecounter));
40799+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
40800
40801 /* do the multiway lock magic */
40802 trap = lock_rename(cache->graveyard, dir);
40803diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
40804index eccd339..4c1d995 100644
40805--- a/fs/cachefiles/proc.c
40806+++ b/fs/cachefiles/proc.c
40807@@ -14,9 +14,9 @@
40808 #include <linux/seq_file.h>
40809 #include "internal.h"
40810
40811-atomic_t cachefiles_lookup_histogram[HZ];
40812-atomic_t cachefiles_mkdir_histogram[HZ];
40813-atomic_t cachefiles_create_histogram[HZ];
40814+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40815+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40816+atomic_unchecked_t cachefiles_create_histogram[HZ];
40817
40818 /*
40819 * display the latency histogram
40820@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
40821 return 0;
40822 default:
40823 index = (unsigned long) v - 3;
40824- x = atomic_read(&cachefiles_lookup_histogram[index]);
40825- y = atomic_read(&cachefiles_mkdir_histogram[index]);
40826- z = atomic_read(&cachefiles_create_histogram[index]);
40827+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
40828+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
40829+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
40830 if (x == 0 && y == 0 && z == 0)
40831 return 0;
40832
40833diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
40834index 0e3c092..818480e 100644
40835--- a/fs/cachefiles/rdwr.c
40836+++ b/fs/cachefiles/rdwr.c
40837@@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
40838 old_fs = get_fs();
40839 set_fs(KERNEL_DS);
40840 ret = file->f_op->write(
40841- file, (const void __user *) data, len, &pos);
40842+ file, (const void __force_user *) data, len, &pos);
40843 set_fs(old_fs);
40844 kunmap(page);
40845 if (ret != len)
40846diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
40847index 9895400..fa40a7d 100644
40848--- a/fs/ceph/dir.c
40849+++ b/fs/ceph/dir.c
40850@@ -244,7 +244,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
40851 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
40852 struct ceph_mds_client *mdsc = fsc->mdsc;
40853 unsigned frag = fpos_frag(filp->f_pos);
40854- int off = fpos_off(filp->f_pos);
40855+ unsigned int off = fpos_off(filp->f_pos);
40856 int err;
40857 u32 ftype;
40858 struct ceph_mds_reply_info_parsed *rinfo;
40859diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
40860index 84e8c07..6170d31 100644
40861--- a/fs/cifs/cifs_debug.c
40862+++ b/fs/cifs/cifs_debug.c
40863@@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
40864
40865 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
40866 #ifdef CONFIG_CIFS_STATS2
40867- atomic_set(&totBufAllocCount, 0);
40868- atomic_set(&totSmBufAllocCount, 0);
40869+ atomic_set_unchecked(&totBufAllocCount, 0);
40870+ atomic_set_unchecked(&totSmBufAllocCount, 0);
40871 #endif /* CONFIG_CIFS_STATS2 */
40872 spin_lock(&cifs_tcp_ses_lock);
40873 list_for_each(tmp1, &cifs_tcp_ses_list) {
40874@@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
40875 tcon = list_entry(tmp3,
40876 struct cifs_tcon,
40877 tcon_list);
40878- atomic_set(&tcon->num_smbs_sent, 0);
40879- atomic_set(&tcon->num_writes, 0);
40880- atomic_set(&tcon->num_reads, 0);
40881- atomic_set(&tcon->num_oplock_brks, 0);
40882- atomic_set(&tcon->num_opens, 0);
40883- atomic_set(&tcon->num_posixopens, 0);
40884- atomic_set(&tcon->num_posixmkdirs, 0);
40885- atomic_set(&tcon->num_closes, 0);
40886- atomic_set(&tcon->num_deletes, 0);
40887- atomic_set(&tcon->num_mkdirs, 0);
40888- atomic_set(&tcon->num_rmdirs, 0);
40889- atomic_set(&tcon->num_renames, 0);
40890- atomic_set(&tcon->num_t2renames, 0);
40891- atomic_set(&tcon->num_ffirst, 0);
40892- atomic_set(&tcon->num_fnext, 0);
40893- atomic_set(&tcon->num_fclose, 0);
40894- atomic_set(&tcon->num_hardlinks, 0);
40895- atomic_set(&tcon->num_symlinks, 0);
40896- atomic_set(&tcon->num_locks, 0);
40897+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
40898+ atomic_set_unchecked(&tcon->num_writes, 0);
40899+ atomic_set_unchecked(&tcon->num_reads, 0);
40900+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
40901+ atomic_set_unchecked(&tcon->num_opens, 0);
40902+ atomic_set_unchecked(&tcon->num_posixopens, 0);
40903+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
40904+ atomic_set_unchecked(&tcon->num_closes, 0);
40905+ atomic_set_unchecked(&tcon->num_deletes, 0);
40906+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
40907+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
40908+ atomic_set_unchecked(&tcon->num_renames, 0);
40909+ atomic_set_unchecked(&tcon->num_t2renames, 0);
40910+ atomic_set_unchecked(&tcon->num_ffirst, 0);
40911+ atomic_set_unchecked(&tcon->num_fnext, 0);
40912+ atomic_set_unchecked(&tcon->num_fclose, 0);
40913+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
40914+ atomic_set_unchecked(&tcon->num_symlinks, 0);
40915+ atomic_set_unchecked(&tcon->num_locks, 0);
40916 }
40917 }
40918 }
40919@@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
40920 smBufAllocCount.counter, cifs_min_small);
40921 #ifdef CONFIG_CIFS_STATS2
40922 seq_printf(m, "Total Large %d Small %d Allocations\n",
40923- atomic_read(&totBufAllocCount),
40924- atomic_read(&totSmBufAllocCount));
40925+ atomic_read_unchecked(&totBufAllocCount),
40926+ atomic_read_unchecked(&totSmBufAllocCount));
40927 #endif /* CONFIG_CIFS_STATS2 */
40928
40929 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
40930@@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
40931 if (tcon->need_reconnect)
40932 seq_puts(m, "\tDISCONNECTED ");
40933 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
40934- atomic_read(&tcon->num_smbs_sent),
40935- atomic_read(&tcon->num_oplock_brks));
40936+ atomic_read_unchecked(&tcon->num_smbs_sent),
40937+ atomic_read_unchecked(&tcon->num_oplock_brks));
40938 seq_printf(m, "\nReads: %d Bytes: %lld",
40939- atomic_read(&tcon->num_reads),
40940+ atomic_read_unchecked(&tcon->num_reads),
40941 (long long)(tcon->bytes_read));
40942 seq_printf(m, "\nWrites: %d Bytes: %lld",
40943- atomic_read(&tcon->num_writes),
40944+ atomic_read_unchecked(&tcon->num_writes),
40945 (long long)(tcon->bytes_written));
40946 seq_printf(m, "\nFlushes: %d",
40947- atomic_read(&tcon->num_flushes));
40948+ atomic_read_unchecked(&tcon->num_flushes));
40949 seq_printf(m, "\nLocks: %d HardLinks: %d "
40950 "Symlinks: %d",
40951- atomic_read(&tcon->num_locks),
40952- atomic_read(&tcon->num_hardlinks),
40953- atomic_read(&tcon->num_symlinks));
40954+ atomic_read_unchecked(&tcon->num_locks),
40955+ atomic_read_unchecked(&tcon->num_hardlinks),
40956+ atomic_read_unchecked(&tcon->num_symlinks));
40957 seq_printf(m, "\nOpens: %d Closes: %d "
40958 "Deletes: %d",
40959- atomic_read(&tcon->num_opens),
40960- atomic_read(&tcon->num_closes),
40961- atomic_read(&tcon->num_deletes));
40962+ atomic_read_unchecked(&tcon->num_opens),
40963+ atomic_read_unchecked(&tcon->num_closes),
40964+ atomic_read_unchecked(&tcon->num_deletes));
40965 seq_printf(m, "\nPosix Opens: %d "
40966 "Posix Mkdirs: %d",
40967- atomic_read(&tcon->num_posixopens),
40968- atomic_read(&tcon->num_posixmkdirs));
40969+ atomic_read_unchecked(&tcon->num_posixopens),
40970+ atomic_read_unchecked(&tcon->num_posixmkdirs));
40971 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
40972- atomic_read(&tcon->num_mkdirs),
40973- atomic_read(&tcon->num_rmdirs));
40974+ atomic_read_unchecked(&tcon->num_mkdirs),
40975+ atomic_read_unchecked(&tcon->num_rmdirs));
40976 seq_printf(m, "\nRenames: %d T2 Renames %d",
40977- atomic_read(&tcon->num_renames),
40978- atomic_read(&tcon->num_t2renames));
40979+ atomic_read_unchecked(&tcon->num_renames),
40980+ atomic_read_unchecked(&tcon->num_t2renames));
40981 seq_printf(m, "\nFindFirst: %d FNext %d "
40982 "FClose %d",
40983- atomic_read(&tcon->num_ffirst),
40984- atomic_read(&tcon->num_fnext),
40985- atomic_read(&tcon->num_fclose));
40986+ atomic_read_unchecked(&tcon->num_ffirst),
40987+ atomic_read_unchecked(&tcon->num_fnext),
40988+ atomic_read_unchecked(&tcon->num_fclose));
40989 }
40990 }
40991 }
40992diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
40993index 8f1fe32..38f9e27 100644
40994--- a/fs/cifs/cifsfs.c
40995+++ b/fs/cifs/cifsfs.c
40996@@ -989,7 +989,7 @@ cifs_init_request_bufs(void)
40997 cifs_req_cachep = kmem_cache_create("cifs_request",
40998 CIFSMaxBufSize +
40999 MAX_CIFS_HDR_SIZE, 0,
41000- SLAB_HWCACHE_ALIGN, NULL);
41001+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
41002 if (cifs_req_cachep == NULL)
41003 return -ENOMEM;
41004
41005@@ -1016,7 +1016,7 @@ cifs_init_request_bufs(void)
41006 efficient to alloc 1 per page off the slab compared to 17K (5page)
41007 alloc of large cifs buffers even when page debugging is on */
41008 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
41009- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
41010+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
41011 NULL);
41012 if (cifs_sm_req_cachep == NULL) {
41013 mempool_destroy(cifs_req_poolp);
41014@@ -1101,8 +1101,8 @@ init_cifs(void)
41015 atomic_set(&bufAllocCount, 0);
41016 atomic_set(&smBufAllocCount, 0);
41017 #ifdef CONFIG_CIFS_STATS2
41018- atomic_set(&totBufAllocCount, 0);
41019- atomic_set(&totSmBufAllocCount, 0);
41020+ atomic_set_unchecked(&totBufAllocCount, 0);
41021+ atomic_set_unchecked(&totSmBufAllocCount, 0);
41022 #endif /* CONFIG_CIFS_STATS2 */
41023
41024 atomic_set(&midCount, 0);
41025diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
41026index 8238aa1..0347196 100644
41027--- a/fs/cifs/cifsglob.h
41028+++ b/fs/cifs/cifsglob.h
41029@@ -392,28 +392,28 @@ struct cifs_tcon {
41030 __u16 Flags; /* optional support bits */
41031 enum statusEnum tidStatus;
41032 #ifdef CONFIG_CIFS_STATS
41033- atomic_t num_smbs_sent;
41034- atomic_t num_writes;
41035- atomic_t num_reads;
41036- atomic_t num_flushes;
41037- atomic_t num_oplock_brks;
41038- atomic_t num_opens;
41039- atomic_t num_closes;
41040- atomic_t num_deletes;
41041- atomic_t num_mkdirs;
41042- atomic_t num_posixopens;
41043- atomic_t num_posixmkdirs;
41044- atomic_t num_rmdirs;
41045- atomic_t num_renames;
41046- atomic_t num_t2renames;
41047- atomic_t num_ffirst;
41048- atomic_t num_fnext;
41049- atomic_t num_fclose;
41050- atomic_t num_hardlinks;
41051- atomic_t num_symlinks;
41052- atomic_t num_locks;
41053- atomic_t num_acl_get;
41054- atomic_t num_acl_set;
41055+ atomic_unchecked_t num_smbs_sent;
41056+ atomic_unchecked_t num_writes;
41057+ atomic_unchecked_t num_reads;
41058+ atomic_unchecked_t num_flushes;
41059+ atomic_unchecked_t num_oplock_brks;
41060+ atomic_unchecked_t num_opens;
41061+ atomic_unchecked_t num_closes;
41062+ atomic_unchecked_t num_deletes;
41063+ atomic_unchecked_t num_mkdirs;
41064+ atomic_unchecked_t num_posixopens;
41065+ atomic_unchecked_t num_posixmkdirs;
41066+ atomic_unchecked_t num_rmdirs;
41067+ atomic_unchecked_t num_renames;
41068+ atomic_unchecked_t num_t2renames;
41069+ atomic_unchecked_t num_ffirst;
41070+ atomic_unchecked_t num_fnext;
41071+ atomic_unchecked_t num_fclose;
41072+ atomic_unchecked_t num_hardlinks;
41073+ atomic_unchecked_t num_symlinks;
41074+ atomic_unchecked_t num_locks;
41075+ atomic_unchecked_t num_acl_get;
41076+ atomic_unchecked_t num_acl_set;
41077 #ifdef CONFIG_CIFS_STATS2
41078 unsigned long long time_writes;
41079 unsigned long long time_reads;
41080@@ -628,7 +628,7 @@ convert_delimiter(char *path, char delim)
41081 }
41082
41083 #ifdef CONFIG_CIFS_STATS
41084-#define cifs_stats_inc atomic_inc
41085+#define cifs_stats_inc atomic_inc_unchecked
41086
41087 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
41088 unsigned int bytes)
41089@@ -985,8 +985,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
41090 /* Various Debug counters */
41091 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
41092 #ifdef CONFIG_CIFS_STATS2
41093-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
41094-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
41095+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
41096+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
41097 #endif
41098 GLOBAL_EXTERN atomic_t smBufAllocCount;
41099 GLOBAL_EXTERN atomic_t midCount;
41100diff --git a/fs/cifs/link.c b/fs/cifs/link.c
41101index 6b0e064..94e6c3c 100644
41102--- a/fs/cifs/link.c
41103+++ b/fs/cifs/link.c
41104@@ -600,7 +600,7 @@ symlink_exit:
41105
41106 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
41107 {
41108- char *p = nd_get_link(nd);
41109+ const char *p = nd_get_link(nd);
41110 if (!IS_ERR(p))
41111 kfree(p);
41112 }
41113diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
41114index 703ef5c..2a44ed5 100644
41115--- a/fs/cifs/misc.c
41116+++ b/fs/cifs/misc.c
41117@@ -156,7 +156,7 @@ cifs_buf_get(void)
41118 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
41119 atomic_inc(&bufAllocCount);
41120 #ifdef CONFIG_CIFS_STATS2
41121- atomic_inc(&totBufAllocCount);
41122+ atomic_inc_unchecked(&totBufAllocCount);
41123 #endif /* CONFIG_CIFS_STATS2 */
41124 }
41125
41126@@ -191,7 +191,7 @@ cifs_small_buf_get(void)
41127 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
41128 atomic_inc(&smBufAllocCount);
41129 #ifdef CONFIG_CIFS_STATS2
41130- atomic_inc(&totSmBufAllocCount);
41131+ atomic_inc_unchecked(&totSmBufAllocCount);
41132 #endif /* CONFIG_CIFS_STATS2 */
41133
41134 }
41135diff --git a/fs/coda/cache.c b/fs/coda/cache.c
41136index 6901578..d402eb5 100644
41137--- a/fs/coda/cache.c
41138+++ b/fs/coda/cache.c
41139@@ -24,7 +24,7 @@
41140 #include "coda_linux.h"
41141 #include "coda_cache.h"
41142
41143-static atomic_t permission_epoch = ATOMIC_INIT(0);
41144+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
41145
41146 /* replace or extend an acl cache hit */
41147 void coda_cache_enter(struct inode *inode, int mask)
41148@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
41149 struct coda_inode_info *cii = ITOC(inode);
41150
41151 spin_lock(&cii->c_lock);
41152- cii->c_cached_epoch = atomic_read(&permission_epoch);
41153+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
41154 if (cii->c_uid != current_fsuid()) {
41155 cii->c_uid = current_fsuid();
41156 cii->c_cached_perm = mask;
41157@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
41158 {
41159 struct coda_inode_info *cii = ITOC(inode);
41160 spin_lock(&cii->c_lock);
41161- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
41162+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
41163 spin_unlock(&cii->c_lock);
41164 }
41165
41166 /* remove all acl caches */
41167 void coda_cache_clear_all(struct super_block *sb)
41168 {
41169- atomic_inc(&permission_epoch);
41170+ atomic_inc_unchecked(&permission_epoch);
41171 }
41172
41173
41174@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
41175 spin_lock(&cii->c_lock);
41176 hit = (mask & cii->c_cached_perm) == mask &&
41177 cii->c_uid == current_fsuid() &&
41178- cii->c_cached_epoch == atomic_read(&permission_epoch);
41179+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
41180 spin_unlock(&cii->c_lock);
41181
41182 return hit;
41183diff --git a/fs/compat.c b/fs/compat.c
41184index c987875..08771ca 100644
41185--- a/fs/compat.c
41186+++ b/fs/compat.c
41187@@ -132,8 +132,8 @@ asmlinkage long compat_sys_utimes(const char __user *filename, struct compat_tim
41188 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
41189 {
41190 compat_ino_t ino = stat->ino;
41191- typeof(ubuf->st_uid) uid = 0;
41192- typeof(ubuf->st_gid) gid = 0;
41193+ typeof(((struct compat_stat *)0)->st_uid) uid = 0;
41194+ typeof(((struct compat_stat *)0)->st_gid) gid = 0;
41195 int err;
41196
41197 SET_UID(uid, stat->uid);
41198@@ -504,7 +504,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
41199
41200 set_fs(KERNEL_DS);
41201 /* The __user pointer cast is valid because of the set_fs() */
41202- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
41203+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
41204 set_fs(oldfs);
41205 /* truncating is ok because it's a user address */
41206 if (!ret)
41207@@ -562,7 +562,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
41208 goto out;
41209
41210 ret = -EINVAL;
41211- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
41212+ if (nr_segs > UIO_MAXIOV)
41213 goto out;
41214 if (nr_segs > fast_segs) {
41215 ret = -ENOMEM;
41216@@ -845,6 +845,7 @@ struct compat_old_linux_dirent {
41217
41218 struct compat_readdir_callback {
41219 struct compat_old_linux_dirent __user *dirent;
41220+ struct file * file;
41221 int result;
41222 };
41223
41224@@ -862,6 +863,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
41225 buf->result = -EOVERFLOW;
41226 return -EOVERFLOW;
41227 }
41228+
41229+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41230+ return 0;
41231+
41232 buf->result++;
41233 dirent = buf->dirent;
41234 if (!access_ok(VERIFY_WRITE, dirent,
41235@@ -894,6 +899,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
41236
41237 buf.result = 0;
41238 buf.dirent = dirent;
41239+ buf.file = file;
41240
41241 error = vfs_readdir(file, compat_fillonedir, &buf);
41242 if (buf.result)
41243@@ -914,6 +920,7 @@ struct compat_linux_dirent {
41244 struct compat_getdents_callback {
41245 struct compat_linux_dirent __user *current_dir;
41246 struct compat_linux_dirent __user *previous;
41247+ struct file * file;
41248 int count;
41249 int error;
41250 };
41251@@ -935,6 +942,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
41252 buf->error = -EOVERFLOW;
41253 return -EOVERFLOW;
41254 }
41255+
41256+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41257+ return 0;
41258+
41259 dirent = buf->previous;
41260 if (dirent) {
41261 if (__put_user(offset, &dirent->d_off))
41262@@ -982,6 +993,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
41263 buf.previous = NULL;
41264 buf.count = count;
41265 buf.error = 0;
41266+ buf.file = file;
41267
41268 error = vfs_readdir(file, compat_filldir, &buf);
41269 if (error >= 0)
41270@@ -1003,6 +1015,7 @@ out:
41271 struct compat_getdents_callback64 {
41272 struct linux_dirent64 __user *current_dir;
41273 struct linux_dirent64 __user *previous;
41274+ struct file * file;
41275 int count;
41276 int error;
41277 };
41278@@ -1019,6 +1032,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
41279 buf->error = -EINVAL; /* only used if we fail.. */
41280 if (reclen > buf->count)
41281 return -EINVAL;
41282+
41283+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41284+ return 0;
41285+
41286 dirent = buf->previous;
41287
41288 if (dirent) {
41289@@ -1070,13 +1087,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
41290 buf.previous = NULL;
41291 buf.count = count;
41292 buf.error = 0;
41293+ buf.file = file;
41294
41295 error = vfs_readdir(file, compat_filldir64, &buf);
41296 if (error >= 0)
41297 error = buf.error;
41298 lastdirent = buf.previous;
41299 if (lastdirent) {
41300- typeof(lastdirent->d_off) d_off = file->f_pos;
41301+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
41302 if (__put_user_unaligned(d_off, &lastdirent->d_off))
41303 error = -EFAULT;
41304 else
41305diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
41306index 112e45a..b59845b 100644
41307--- a/fs/compat_binfmt_elf.c
41308+++ b/fs/compat_binfmt_elf.c
41309@@ -30,11 +30,13 @@
41310 #undef elf_phdr
41311 #undef elf_shdr
41312 #undef elf_note
41313+#undef elf_dyn
41314 #undef elf_addr_t
41315 #define elfhdr elf32_hdr
41316 #define elf_phdr elf32_phdr
41317 #define elf_shdr elf32_shdr
41318 #define elf_note elf32_note
41319+#define elf_dyn Elf32_Dyn
41320 #define elf_addr_t Elf32_Addr
41321
41322 /*
41323diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
41324index 51352de..93292ff 100644
41325--- a/fs/compat_ioctl.c
41326+++ b/fs/compat_ioctl.c
41327@@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
41328
41329 err = get_user(palp, &up->palette);
41330 err |= get_user(length, &up->length);
41331+ if (err)
41332+ return -EFAULT;
41333
41334 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
41335 err = put_user(compat_ptr(palp), &up_native->palette);
41336@@ -621,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
41337 return -EFAULT;
41338 if (__get_user(udata, &ss32->iomem_base))
41339 return -EFAULT;
41340- ss.iomem_base = compat_ptr(udata);
41341+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
41342 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
41343 __get_user(ss.port_high, &ss32->port_high))
41344 return -EFAULT;
41345@@ -796,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
41346 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
41347 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
41348 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
41349- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
41350+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
41351 return -EFAULT;
41352
41353 return ioctl_preallocate(file, p);
41354@@ -1644,8 +1646,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
41355 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
41356 {
41357 unsigned int a, b;
41358- a = *(unsigned int *)p;
41359- b = *(unsigned int *)q;
41360+ a = *(const unsigned int *)p;
41361+ b = *(const unsigned int *)q;
41362 if (a > b)
41363 return 1;
41364 if (a < b)
41365diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
41366index 9a37a9b..35792b6 100644
41367--- a/fs/configfs/dir.c
41368+++ b/fs/configfs/dir.c
41369@@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
41370 }
41371 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
41372 struct configfs_dirent *next;
41373- const char * name;
41374+ const unsigned char * name;
41375+ char d_name[sizeof(next->s_dentry->d_iname)];
41376 int len;
41377 struct inode *inode = NULL;
41378
41379@@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
41380 continue;
41381
41382 name = configfs_get_name(next);
41383- len = strlen(name);
41384+ if (next->s_dentry && name == next->s_dentry->d_iname) {
41385+ len = next->s_dentry->d_name.len;
41386+ memcpy(d_name, name, len);
41387+ name = d_name;
41388+ } else
41389+ len = strlen(name);
41390
41391 /*
41392 * We'll have a dentry and an inode for
41393diff --git a/fs/dcache.c b/fs/dcache.c
41394index f7908ae..920a680 100644
41395--- a/fs/dcache.c
41396+++ b/fs/dcache.c
41397@@ -3042,7 +3042,7 @@ void __init vfs_caches_init(unsigned long mempages)
41398 mempages -= reserve;
41399
41400 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
41401- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
41402+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
41403
41404 dcache_init();
41405 inode_init();
41406diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
41407index f3a257d..715ac0f 100644
41408--- a/fs/debugfs/inode.c
41409+++ b/fs/debugfs/inode.c
41410@@ -261,7 +261,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
41411 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
41412 {
41413 return debugfs_create_file(name,
41414+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
41415+ S_IFDIR | S_IRWXU,
41416+#else
41417 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
41418+#endif
41419 parent, NULL, NULL);
41420 }
41421 EXPORT_SYMBOL_GPL(debugfs_create_dir);
41422diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
41423index 2a83425..b082cec 100644
41424--- a/fs/ecryptfs/crypto.c
41425+++ b/fs/ecryptfs/crypto.c
41426@@ -417,17 +417,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
41427 (unsigned long long)(extent_base + extent_offset), rc);
41428 goto out;
41429 }
41430- if (unlikely(ecryptfs_verbosity > 0)) {
41431- ecryptfs_printk(KERN_DEBUG, "Encrypting extent "
41432- "with iv:\n");
41433- ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
41434- ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
41435- "encryption:\n");
41436- ecryptfs_dump_hex((char *)
41437- (page_address(page)
41438- + (extent_offset * crypt_stat->extent_size)),
41439- 8);
41440- }
41441 rc = ecryptfs_encrypt_page_offset(crypt_stat, enc_extent_page, 0,
41442 page, (extent_offset
41443 * crypt_stat->extent_size),
41444@@ -440,14 +429,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
41445 goto out;
41446 }
41447 rc = 0;
41448- if (unlikely(ecryptfs_verbosity > 0)) {
41449- ecryptfs_printk(KERN_DEBUG, "Encrypt extent [0x%.16llx]; "
41450- "rc = [%d]\n",
41451- (unsigned long long)(extent_base + extent_offset), rc);
41452- ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
41453- "encryption:\n");
41454- ecryptfs_dump_hex((char *)(page_address(enc_extent_page)), 8);
41455- }
41456 out:
41457 return rc;
41458 }
41459@@ -543,17 +524,6 @@ static int ecryptfs_decrypt_extent(struct page *page,
41460 (unsigned long long)(extent_base + extent_offset), rc);
41461 goto out;
41462 }
41463- if (unlikely(ecryptfs_verbosity > 0)) {
41464- ecryptfs_printk(KERN_DEBUG, "Decrypting extent "
41465- "with iv:\n");
41466- ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
41467- ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
41468- "decryption:\n");
41469- ecryptfs_dump_hex((char *)
41470- (page_address(enc_extent_page)
41471- + (extent_offset * crypt_stat->extent_size)),
41472- 8);
41473- }
41474 rc = ecryptfs_decrypt_page_offset(crypt_stat, page,
41475 (extent_offset
41476 * crypt_stat->extent_size),
41477@@ -567,16 +537,6 @@ static int ecryptfs_decrypt_extent(struct page *page,
41478 goto out;
41479 }
41480 rc = 0;
41481- if (unlikely(ecryptfs_verbosity > 0)) {
41482- ecryptfs_printk(KERN_DEBUG, "Decrypt extent [0x%.16llx]; "
41483- "rc = [%d]\n",
41484- (unsigned long long)(extent_base + extent_offset), rc);
41485- ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
41486- "decryption:\n");
41487- ecryptfs_dump_hex((char *)(page_address(page)
41488- + (extent_offset
41489- * crypt_stat->extent_size)), 8);
41490- }
41491 out:
41492 return rc;
41493 }
41494diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
41495index 32f90a3..a766407 100644
41496--- a/fs/ecryptfs/inode.c
41497+++ b/fs/ecryptfs/inode.c
41498@@ -691,7 +691,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
41499 old_fs = get_fs();
41500 set_fs(get_ds());
41501 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
41502- (char __user *)lower_buf,
41503+ (char __force_user *)lower_buf,
41504 lower_bufsiz);
41505 set_fs(old_fs);
41506 if (rc < 0)
41507@@ -737,7 +737,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
41508 }
41509 old_fs = get_fs();
41510 set_fs(get_ds());
41511- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
41512+ rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
41513 set_fs(old_fs);
41514 if (rc < 0) {
41515 kfree(buf);
41516@@ -752,7 +752,7 @@ out:
41517 static void
41518 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
41519 {
41520- char *buf = nd_get_link(nd);
41521+ const char *buf = nd_get_link(nd);
41522 if (!IS_ERR(buf)) {
41523 /* Free the char* */
41524 kfree(buf);
41525@@ -841,18 +841,6 @@ static int truncate_upper(struct dentry *dentry, struct iattr *ia,
41526 size_t num_zeros = (PAGE_CACHE_SIZE
41527 - (ia->ia_size & ~PAGE_CACHE_MASK));
41528
41529-
41530- /*
41531- * XXX(truncate) this should really happen at the begginning
41532- * of ->setattr. But the code is too messy to that as part
41533- * of a larger patch. ecryptfs is also totally missing out
41534- * on the inode_change_ok check at the beginning of
41535- * ->setattr while would include this.
41536- */
41537- rc = inode_newsize_ok(inode, ia->ia_size);
41538- if (rc)
41539- goto out;
41540-
41541 if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
41542 truncate_setsize(inode, ia->ia_size);
41543 lower_ia->ia_size = ia->ia_size;
41544@@ -902,6 +890,28 @@ out:
41545 return rc;
41546 }
41547
41548+static int ecryptfs_inode_newsize_ok(struct inode *inode, loff_t offset)
41549+{
41550+ struct ecryptfs_crypt_stat *crypt_stat;
41551+ loff_t lower_oldsize, lower_newsize;
41552+
41553+ crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat;
41554+ lower_oldsize = upper_size_to_lower_size(crypt_stat,
41555+ i_size_read(inode));
41556+ lower_newsize = upper_size_to_lower_size(crypt_stat, offset);
41557+ if (lower_newsize > lower_oldsize) {
41558+ /*
41559+ * The eCryptfs inode and the new *lower* size are mixed here
41560+ * because we may not have the lower i_mutex held and/or it may
41561+ * not be appropriate to call inode_newsize_ok() with inodes
41562+ * from other filesystems.
41563+ */
41564+ return inode_newsize_ok(inode, lower_newsize);
41565+ }
41566+
41567+ return 0;
41568+}
41569+
41570 /**
41571 * ecryptfs_truncate
41572 * @dentry: The ecryptfs layer dentry
41573@@ -918,6 +928,10 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
41574 struct iattr lower_ia = { .ia_valid = 0 };
41575 int rc;
41576
41577+ rc = ecryptfs_inode_newsize_ok(dentry->d_inode, new_length);
41578+ if (rc)
41579+ return rc;
41580+
41581 rc = truncate_upper(dentry, &ia, &lower_ia);
41582 if (!rc && lower_ia.ia_valid & ATTR_SIZE) {
41583 struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
41584@@ -997,6 +1011,16 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
41585 }
41586 }
41587 mutex_unlock(&crypt_stat->cs_mutex);
41588+
41589+ rc = inode_change_ok(inode, ia);
41590+ if (rc)
41591+ goto out;
41592+ if (ia->ia_valid & ATTR_SIZE) {
41593+ rc = ecryptfs_inode_newsize_ok(inode, ia->ia_size);
41594+ if (rc)
41595+ goto out;
41596+ }
41597+
41598 if (S_ISREG(inode->i_mode)) {
41599 rc = filemap_write_and_wait(inode->i_mapping);
41600 if (rc)
41601diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
41602index 940a82e..d3cdeea 100644
41603--- a/fs/ecryptfs/miscdev.c
41604+++ b/fs/ecryptfs/miscdev.c
41605@@ -328,7 +328,7 @@ check_list:
41606 goto out_unlock_msg_ctx;
41607 i = 5;
41608 if (msg_ctx->msg) {
41609- if (copy_to_user(&buf[i], packet_length, packet_length_size))
41610+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
41611 goto out_unlock_msg_ctx;
41612 i += packet_length_size;
41613 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
41614@@ -409,11 +409,47 @@ ecryptfs_miscdev_write(struct file *file, const char __user *buf,
41615 ssize_t sz = 0;
41616 char *data;
41617 uid_t euid = current_euid();
41618+ unsigned char packet_size_peek[3];
41619 int rc;
41620
41621- if (count == 0)
41622+ if (count == 0) {
41623 goto out;
41624+ } else if (count == (1 + 4)) {
41625+ /* Likely a harmless MSG_HELO or MSG_QUIT - no packet length */
41626+ goto memdup;
41627+ } else if (count < (1 + 4 + 1)
41628+ || count > (1 + 4 + 2 + sizeof(struct ecryptfs_message) + 4
41629+ + ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES)) {
41630+ printk(KERN_WARNING "%s: Acceptable packet size range is "
41631+ "[%d-%lu], but amount of data written is [%zu].",
41632+ __func__, (1 + 4 + 1),
41633+ (1 + 4 + 2 + sizeof(struct ecryptfs_message) + 4
41634+ + ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES), count);
41635+ return -EINVAL;
41636+ }
41637
41638+ if (copy_from_user(packet_size_peek, (buf + 1 + 4),
41639+ sizeof(packet_size_peek))) {
41640+ printk(KERN_WARNING "%s: Error while inspecting packet size\n",
41641+ __func__);
41642+ return -EFAULT;
41643+ }
41644+
41645+ rc = ecryptfs_parse_packet_length(packet_size_peek, &packet_size,
41646+ &packet_size_length);
41647+ if (rc) {
41648+ printk(KERN_WARNING "%s: Error parsing packet length; "
41649+ "rc = [%d]\n", __func__, rc);
41650+ return rc;
41651+ }
41652+
41653+ if ((1 + 4 + packet_size_length + packet_size) != count) {
41654+ printk(KERN_WARNING "%s: Invalid packet size [%zu]\n", __func__,
41655+ packet_size);
41656+ return -EINVAL;
41657+ }
41658+
41659+memdup:
41660 data = memdup_user(buf, count);
41661 if (IS_ERR(data)) {
41662 printk(KERN_ERR "%s: memdup_user returned error [%ld]\n",
41663@@ -435,23 +471,7 @@ ecryptfs_miscdev_write(struct file *file, const char __user *buf,
41664 }
41665 memcpy(&counter_nbo, &data[i], 4);
41666 seq = be32_to_cpu(counter_nbo);
41667- i += 4;
41668- rc = ecryptfs_parse_packet_length(&data[i], &packet_size,
41669- &packet_size_length);
41670- if (rc) {
41671- printk(KERN_WARNING "%s: Error parsing packet length; "
41672- "rc = [%d]\n", __func__, rc);
41673- goto out_free;
41674- }
41675- i += packet_size_length;
41676- if ((1 + 4 + packet_size_length + packet_size) != count) {
41677- printk(KERN_WARNING "%s: (1 + packet_size_length([%zd])"
41678- " + packet_size([%zd]))([%zd]) != "
41679- "count([%zd]). Invalid packet format.\n",
41680- __func__, packet_size_length, packet_size,
41681- (1 + packet_size_length + packet_size), count);
41682- goto out_free;
41683- }
41684+ i += 4 + packet_size_length;
41685 rc = ecryptfs_miscdev_response(&data[i], packet_size,
41686 euid, current_user_ns(),
41687 task_pid(current), seq);
41688diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
41689index 3745f7c..7d040a8 100644
41690--- a/fs/ecryptfs/read_write.c
41691+++ b/fs/ecryptfs/read_write.c
41692@@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
41693 return -EIO;
41694 fs_save = get_fs();
41695 set_fs(get_ds());
41696- rc = vfs_write(lower_file, data, size, &offset);
41697+ rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
41698 set_fs(fs_save);
41699 mark_inode_dirty_sync(ecryptfs_inode);
41700 return rc;
41701@@ -130,13 +130,18 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
41702 pgoff_t ecryptfs_page_idx = (pos >> PAGE_CACHE_SHIFT);
41703 size_t start_offset_in_page = (pos & ~PAGE_CACHE_MASK);
41704 size_t num_bytes = (PAGE_CACHE_SIZE - start_offset_in_page);
41705- size_t total_remaining_bytes = ((offset + size) - pos);
41706+ loff_t total_remaining_bytes = ((offset + size) - pos);
41707+
41708+ if (fatal_signal_pending(current)) {
41709+ rc = -EINTR;
41710+ break;
41711+ }
41712
41713 if (num_bytes > total_remaining_bytes)
41714 num_bytes = total_remaining_bytes;
41715 if (pos < offset) {
41716 /* remaining zeros to write, up to destination offset */
41717- size_t total_remaining_zeros = (offset - pos);
41718+ loff_t total_remaining_zeros = (offset - pos);
41719
41720 if (num_bytes > total_remaining_zeros)
41721 num_bytes = total_remaining_zeros;
41722@@ -193,15 +198,19 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
41723 }
41724 pos += num_bytes;
41725 }
41726- if ((offset + size) > ecryptfs_file_size) {
41727- i_size_write(ecryptfs_inode, (offset + size));
41728+ if (pos > ecryptfs_file_size) {
41729+ i_size_write(ecryptfs_inode, pos);
41730 if (crypt_stat->flags & ECRYPTFS_ENCRYPTED) {
41731- rc = ecryptfs_write_inode_size_to_metadata(
41732+ int rc2;
41733+
41734+ rc2 = ecryptfs_write_inode_size_to_metadata(
41735 ecryptfs_inode);
41736- if (rc) {
41737+ if (rc2) {
41738 printk(KERN_ERR "Problem with "
41739 "ecryptfs_write_inode_size_to_metadata; "
41740- "rc = [%d]\n", rc);
41741+ "rc = [%d]\n", rc2);
41742+ if (!rc)
41743+ rc = rc2;
41744 goto out;
41745 }
41746 }
41747@@ -235,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
41748 return -EIO;
41749 fs_save = get_fs();
41750 set_fs(get_ds());
41751- rc = vfs_read(lower_file, data, size, &offset);
41752+ rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
41753 set_fs(fs_save);
41754 return rc;
41755 }
41756diff --git a/fs/exec.c b/fs/exec.c
41757index 3625464..fac01f4 100644
41758--- a/fs/exec.c
41759+++ b/fs/exec.c
41760@@ -55,12 +55,28 @@
41761 #include <linux/pipe_fs_i.h>
41762 #include <linux/oom.h>
41763 #include <linux/compat.h>
41764+#include <linux/random.h>
41765+#include <linux/seq_file.h>
41766+
41767+#ifdef CONFIG_PAX_REFCOUNT
41768+#include <linux/kallsyms.h>
41769+#include <linux/kdebug.h>
41770+#endif
41771
41772 #include <asm/uaccess.h>
41773 #include <asm/mmu_context.h>
41774 #include <asm/tlb.h>
41775 #include "internal.h"
41776
41777+#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
41778+void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
41779+#endif
41780+
41781+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
41782+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
41783+EXPORT_SYMBOL(pax_set_initial_flags_func);
41784+#endif
41785+
41786 int core_uses_pid;
41787 char core_pattern[CORENAME_MAX_SIZE] = "core";
41788 unsigned int core_pipe_limit;
41789@@ -70,7 +86,7 @@ struct core_name {
41790 char *corename;
41791 int used, size;
41792 };
41793-static atomic_t call_count = ATOMIC_INIT(1);
41794+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
41795
41796 /* The maximal length of core_pattern is also specified in sysctl.c */
41797
41798@@ -188,18 +204,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
41799 int write)
41800 {
41801 struct page *page;
41802- int ret;
41803
41804-#ifdef CONFIG_STACK_GROWSUP
41805- if (write) {
41806- ret = expand_downwards(bprm->vma, pos);
41807- if (ret < 0)
41808- return NULL;
41809- }
41810-#endif
41811- ret = get_user_pages(current, bprm->mm, pos,
41812- 1, write, 1, &page, NULL);
41813- if (ret <= 0)
41814+ if (0 > expand_downwards(bprm->vma, pos))
41815+ return NULL;
41816+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
41817 return NULL;
41818
41819 if (write) {
41820@@ -274,6 +282,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
41821 vma->vm_end = STACK_TOP_MAX;
41822 vma->vm_start = vma->vm_end - PAGE_SIZE;
41823 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
41824+
41825+#ifdef CONFIG_PAX_SEGMEXEC
41826+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
41827+#endif
41828+
41829 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
41830 INIT_LIST_HEAD(&vma->anon_vma_chain);
41831
41832@@ -288,6 +301,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
41833 mm->stack_vm = mm->total_vm = 1;
41834 up_write(&mm->mmap_sem);
41835 bprm->p = vma->vm_end - sizeof(void *);
41836+
41837+#ifdef CONFIG_PAX_RANDUSTACK
41838+ if (randomize_va_space)
41839+ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
41840+#endif
41841+
41842 return 0;
41843 err:
41844 up_write(&mm->mmap_sem);
41845@@ -396,19 +415,7 @@ err:
41846 return err;
41847 }
41848
41849-struct user_arg_ptr {
41850-#ifdef CONFIG_COMPAT
41851- bool is_compat;
41852-#endif
41853- union {
41854- const char __user *const __user *native;
41855-#ifdef CONFIG_COMPAT
41856- compat_uptr_t __user *compat;
41857-#endif
41858- } ptr;
41859-};
41860-
41861-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41862+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41863 {
41864 const char __user *native;
41865
41866@@ -417,14 +424,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
41867 compat_uptr_t compat;
41868
41869 if (get_user(compat, argv.ptr.compat + nr))
41870- return ERR_PTR(-EFAULT);
41871+ return (const char __force_user *)ERR_PTR(-EFAULT);
41872
41873 return compat_ptr(compat);
41874 }
41875 #endif
41876
41877 if (get_user(native, argv.ptr.native + nr))
41878- return ERR_PTR(-EFAULT);
41879+ return (const char __force_user *)ERR_PTR(-EFAULT);
41880
41881 return native;
41882 }
41883@@ -443,7 +450,7 @@ static int count(struct user_arg_ptr argv, int max)
41884 if (!p)
41885 break;
41886
41887- if (IS_ERR(p))
41888+ if (IS_ERR((const char __force_kernel *)p))
41889 return -EFAULT;
41890
41891 if (i++ >= max)
41892@@ -477,7 +484,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
41893
41894 ret = -EFAULT;
41895 str = get_user_arg_ptr(argv, argc);
41896- if (IS_ERR(str))
41897+ if (IS_ERR((const char __force_kernel *)str))
41898 goto out;
41899
41900 len = strnlen_user(str, MAX_ARG_STRLEN);
41901@@ -559,7 +566,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
41902 int r;
41903 mm_segment_t oldfs = get_fs();
41904 struct user_arg_ptr argv = {
41905- .ptr.native = (const char __user *const __user *)__argv,
41906+ .ptr.native = (const char __force_user *const __force_user *)__argv,
41907 };
41908
41909 set_fs(KERNEL_DS);
41910@@ -594,7 +601,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
41911 unsigned long new_end = old_end - shift;
41912 struct mmu_gather tlb;
41913
41914- BUG_ON(new_start > new_end);
41915+ if (new_start >= new_end || new_start < mmap_min_addr)
41916+ return -ENOMEM;
41917
41918 /*
41919 * ensure there are no vmas between where we want to go
41920@@ -603,6 +611,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
41921 if (vma != find_vma(mm, new_start))
41922 return -EFAULT;
41923
41924+#ifdef CONFIG_PAX_SEGMEXEC
41925+ BUG_ON(pax_find_mirror_vma(vma));
41926+#endif
41927+
41928 /*
41929 * cover the whole range: [new_start, old_end)
41930 */
41931@@ -683,10 +695,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
41932 stack_top = arch_align_stack(stack_top);
41933 stack_top = PAGE_ALIGN(stack_top);
41934
41935- if (unlikely(stack_top < mmap_min_addr) ||
41936- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
41937- return -ENOMEM;
41938-
41939 stack_shift = vma->vm_end - stack_top;
41940
41941 bprm->p -= stack_shift;
41942@@ -698,8 +706,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
41943 bprm->exec -= stack_shift;
41944
41945 down_write(&mm->mmap_sem);
41946+
41947+ /* Move stack pages down in memory. */
41948+ if (stack_shift) {
41949+ ret = shift_arg_pages(vma, stack_shift);
41950+ if (ret)
41951+ goto out_unlock;
41952+ }
41953+
41954 vm_flags = VM_STACK_FLAGS;
41955
41956+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41957+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41958+ vm_flags &= ~VM_EXEC;
41959+
41960+#ifdef CONFIG_PAX_MPROTECT
41961+ if (mm->pax_flags & MF_PAX_MPROTECT)
41962+ vm_flags &= ~VM_MAYEXEC;
41963+#endif
41964+
41965+ }
41966+#endif
41967+
41968 /*
41969 * Adjust stack execute permissions; explicitly enable for
41970 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
41971@@ -718,13 +746,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
41972 goto out_unlock;
41973 BUG_ON(prev != vma);
41974
41975- /* Move stack pages down in memory. */
41976- if (stack_shift) {
41977- ret = shift_arg_pages(vma, stack_shift);
41978- if (ret)
41979- goto out_unlock;
41980- }
41981-
41982 /* mprotect_fixup is overkill to remove the temporary stack flags */
41983 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
41984
41985@@ -805,7 +826,7 @@ int kernel_read(struct file *file, loff_t offset,
41986 old_fs = get_fs();
41987 set_fs(get_ds());
41988 /* The cast to a user pointer is valid due to the set_fs() */
41989- result = vfs_read(file, (void __user *)addr, count, &pos);
41990+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
41991 set_fs(old_fs);
41992 return result;
41993 }
41994@@ -1247,7 +1268,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
41995 }
41996 rcu_read_unlock();
41997
41998- if (p->fs->users > n_fs) {
41999+ if (atomic_read(&p->fs->users) > n_fs) {
42000 bprm->unsafe |= LSM_UNSAFE_SHARE;
42001 } else {
42002 res = -EAGAIN;
42003@@ -1450,6 +1471,11 @@ static int do_execve_common(const char *filename,
42004 struct user_arg_ptr envp,
42005 struct pt_regs *regs)
42006 {
42007+#ifdef CONFIG_GRKERNSEC
42008+ struct file *old_exec_file;
42009+ struct acl_subject_label *old_acl;
42010+ struct rlimit old_rlim[RLIM_NLIMITS];
42011+#endif
42012 struct linux_binprm *bprm;
42013 struct file *file;
42014 struct files_struct *displaced;
42015@@ -1457,6 +1483,8 @@ static int do_execve_common(const char *filename,
42016 int retval;
42017 const struct cred *cred = current_cred();
42018
42019+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
42020+
42021 /*
42022 * We move the actual failure in case of RLIMIT_NPROC excess from
42023 * set*uid() to execve() because too many poorly written programs
42024@@ -1497,12 +1525,27 @@ static int do_execve_common(const char *filename,
42025 if (IS_ERR(file))
42026 goto out_unmark;
42027
42028+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
42029+ retval = -EPERM;
42030+ goto out_file;
42031+ }
42032+
42033 sched_exec();
42034
42035 bprm->file = file;
42036 bprm->filename = filename;
42037 bprm->interp = filename;
42038
42039+ if (gr_process_user_ban()) {
42040+ retval = -EPERM;
42041+ goto out_file;
42042+ }
42043+
42044+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
42045+ retval = -EACCES;
42046+ goto out_file;
42047+ }
42048+
42049 retval = bprm_mm_init(bprm);
42050 if (retval)
42051 goto out_file;
42052@@ -1532,9 +1575,40 @@ static int do_execve_common(const char *filename,
42053 if (retval < 0)
42054 goto out;
42055
42056+ if (!gr_tpe_allow(file)) {
42057+ retval = -EACCES;
42058+ goto out;
42059+ }
42060+
42061+ if (gr_check_crash_exec(file)) {
42062+ retval = -EACCES;
42063+ goto out;
42064+ }
42065+
42066+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
42067+
42068+ gr_handle_exec_args(bprm, argv);
42069+
42070+#ifdef CONFIG_GRKERNSEC
42071+ old_acl = current->acl;
42072+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
42073+ old_exec_file = current->exec_file;
42074+ get_file(file);
42075+ current->exec_file = file;
42076+#endif
42077+
42078+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
42079+ bprm->unsafe);
42080+ if (retval < 0)
42081+ goto out_fail;
42082+
42083 retval = search_binary_handler(bprm,regs);
42084 if (retval < 0)
42085- goto out;
42086+ goto out_fail;
42087+#ifdef CONFIG_GRKERNSEC
42088+ if (old_exec_file)
42089+ fput(old_exec_file);
42090+#endif
42091
42092 /* execve succeeded */
42093 current->fs->in_exec = 0;
42094@@ -1545,6 +1619,14 @@ static int do_execve_common(const char *filename,
42095 put_files_struct(displaced);
42096 return retval;
42097
42098+out_fail:
42099+#ifdef CONFIG_GRKERNSEC
42100+ current->acl = old_acl;
42101+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
42102+ fput(current->exec_file);
42103+ current->exec_file = old_exec_file;
42104+#endif
42105+
42106 out:
42107 if (bprm->mm) {
42108 acct_arg_size(bprm, 0);
42109@@ -1618,7 +1700,7 @@ static int expand_corename(struct core_name *cn)
42110 {
42111 char *old_corename = cn->corename;
42112
42113- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
42114+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
42115 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
42116
42117 if (!cn->corename) {
42118@@ -1715,7 +1797,7 @@ static int format_corename(struct core_name *cn, long signr)
42119 int pid_in_pattern = 0;
42120 int err = 0;
42121
42122- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
42123+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
42124 cn->corename = kmalloc(cn->size, GFP_KERNEL);
42125 cn->used = 0;
42126
42127@@ -1812,6 +1894,218 @@ out:
42128 return ispipe;
42129 }
42130
42131+int pax_check_flags(unsigned long *flags)
42132+{
42133+ int retval = 0;
42134+
42135+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
42136+ if (*flags & MF_PAX_SEGMEXEC)
42137+ {
42138+ *flags &= ~MF_PAX_SEGMEXEC;
42139+ retval = -EINVAL;
42140+ }
42141+#endif
42142+
42143+ if ((*flags & MF_PAX_PAGEEXEC)
42144+
42145+#ifdef CONFIG_PAX_PAGEEXEC
42146+ && (*flags & MF_PAX_SEGMEXEC)
42147+#endif
42148+
42149+ )
42150+ {
42151+ *flags &= ~MF_PAX_PAGEEXEC;
42152+ retval = -EINVAL;
42153+ }
42154+
42155+ if ((*flags & MF_PAX_MPROTECT)
42156+
42157+#ifdef CONFIG_PAX_MPROTECT
42158+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
42159+#endif
42160+
42161+ )
42162+ {
42163+ *flags &= ~MF_PAX_MPROTECT;
42164+ retval = -EINVAL;
42165+ }
42166+
42167+ if ((*flags & MF_PAX_EMUTRAMP)
42168+
42169+#ifdef CONFIG_PAX_EMUTRAMP
42170+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
42171+#endif
42172+
42173+ )
42174+ {
42175+ *flags &= ~MF_PAX_EMUTRAMP;
42176+ retval = -EINVAL;
42177+ }
42178+
42179+ return retval;
42180+}
42181+
42182+EXPORT_SYMBOL(pax_check_flags);
42183+
42184+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
42185+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
42186+{
42187+ struct task_struct *tsk = current;
42188+ struct mm_struct *mm = current->mm;
42189+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
42190+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
42191+ char *path_exec = NULL;
42192+ char *path_fault = NULL;
42193+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
42194+
42195+ if (buffer_exec && buffer_fault) {
42196+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
42197+
42198+ down_read(&mm->mmap_sem);
42199+ vma = mm->mmap;
42200+ while (vma && (!vma_exec || !vma_fault)) {
42201+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
42202+ vma_exec = vma;
42203+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
42204+ vma_fault = vma;
42205+ vma = vma->vm_next;
42206+ }
42207+ if (vma_exec) {
42208+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
42209+ if (IS_ERR(path_exec))
42210+ path_exec = "<path too long>";
42211+ else {
42212+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
42213+ if (path_exec) {
42214+ *path_exec = 0;
42215+ path_exec = buffer_exec;
42216+ } else
42217+ path_exec = "<path too long>";
42218+ }
42219+ }
42220+ if (vma_fault) {
42221+ start = vma_fault->vm_start;
42222+ end = vma_fault->vm_end;
42223+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
42224+ if (vma_fault->vm_file) {
42225+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
42226+ if (IS_ERR(path_fault))
42227+ path_fault = "<path too long>";
42228+ else {
42229+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
42230+ if (path_fault) {
42231+ *path_fault = 0;
42232+ path_fault = buffer_fault;
42233+ } else
42234+ path_fault = "<path too long>";
42235+ }
42236+ } else
42237+ path_fault = "<anonymous mapping>";
42238+ }
42239+ up_read(&mm->mmap_sem);
42240+ }
42241+ if (tsk->signal->curr_ip)
42242+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
42243+ else
42244+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
42245+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
42246+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
42247+ task_uid(tsk), task_euid(tsk), pc, sp);
42248+ free_page((unsigned long)buffer_exec);
42249+ free_page((unsigned long)buffer_fault);
42250+ pax_report_insns(regs, pc, sp);
42251+ do_coredump(SIGKILL, SIGKILL, regs);
42252+}
42253+#endif
42254+
42255+#ifdef CONFIG_PAX_REFCOUNT
42256+void pax_report_refcount_overflow(struct pt_regs *regs)
42257+{
42258+ if (current->signal->curr_ip)
42259+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
42260+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
42261+ else
42262+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
42263+ current->comm, task_pid_nr(current), current_uid(), current_euid());
42264+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
42265+ show_regs(regs);
42266+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
42267+}
42268+#endif
42269+
42270+#ifdef CONFIG_PAX_USERCOPY
42271+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
42272+int object_is_on_stack(const void *obj, unsigned long len)
42273+{
42274+ const void * const stack = task_stack_page(current);
42275+ const void * const stackend = stack + THREAD_SIZE;
42276+
42277+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
42278+ const void *frame = NULL;
42279+ const void *oldframe;
42280+#endif
42281+
42282+ if (obj + len < obj)
42283+ return -1;
42284+
42285+ if (obj + len <= stack || stackend <= obj)
42286+ return 0;
42287+
42288+ if (obj < stack || stackend < obj + len)
42289+ return -1;
42290+
42291+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
42292+ oldframe = __builtin_frame_address(1);
42293+ if (oldframe)
42294+ frame = __builtin_frame_address(2);
42295+ /*
42296+ low ----------------------------------------------> high
42297+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
42298+ ^----------------^
42299+ allow copies only within here
42300+ */
42301+ while (stack <= frame && frame < stackend) {
42302+ /* if obj + len extends past the last frame, this
42303+ check won't pass and the next frame will be 0,
42304+ causing us to bail out and correctly report
42305+ the copy as invalid
42306+ */
42307+ if (obj + len <= frame)
42308+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
42309+ oldframe = frame;
42310+ frame = *(const void * const *)frame;
42311+ }
42312+ return -1;
42313+#else
42314+ return 1;
42315+#endif
42316+}
42317+
42318+NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
42319+{
42320+ if (current->signal->curr_ip)
42321+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
42322+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
42323+ else
42324+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
42325+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
42326+ dump_stack();
42327+ gr_handle_kernel_exploit();
42328+ do_group_exit(SIGKILL);
42329+}
42330+#endif
42331+
42332+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
42333+void pax_track_stack(void)
42334+{
42335+ unsigned long sp = (unsigned long)&sp;
42336+ if (sp < current_thread_info()->lowest_stack &&
42337+ sp > (unsigned long)task_stack_page(current))
42338+ current_thread_info()->lowest_stack = sp;
42339+}
42340+EXPORT_SYMBOL(pax_track_stack);
42341+#endif
42342+
42343 static int zap_process(struct task_struct *start, int exit_code)
42344 {
42345 struct task_struct *t;
42346@@ -2023,17 +2317,17 @@ static void wait_for_dump_helpers(struct file *file)
42347 pipe = file->f_path.dentry->d_inode->i_pipe;
42348
42349 pipe_lock(pipe);
42350- pipe->readers++;
42351- pipe->writers--;
42352+ atomic_inc(&pipe->readers);
42353+ atomic_dec(&pipe->writers);
42354
42355- while ((pipe->readers > 1) && (!signal_pending(current))) {
42356+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
42357 wake_up_interruptible_sync(&pipe->wait);
42358 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
42359 pipe_wait(pipe);
42360 }
42361
42362- pipe->readers--;
42363- pipe->writers++;
42364+ atomic_dec(&pipe->readers);
42365+ atomic_inc(&pipe->writers);
42366 pipe_unlock(pipe);
42367
42368 }
42369@@ -2094,7 +2388,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42370 int retval = 0;
42371 int flag = 0;
42372 int ispipe;
42373- static atomic_t core_dump_count = ATOMIC_INIT(0);
42374+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
42375 struct coredump_params cprm = {
42376 .signr = signr,
42377 .regs = regs,
42378@@ -2109,6 +2403,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42379
42380 audit_core_dumps(signr);
42381
42382+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
42383+ gr_handle_brute_attach(current, cprm.mm_flags);
42384+
42385 binfmt = mm->binfmt;
42386 if (!binfmt || !binfmt->core_dump)
42387 goto fail;
42388@@ -2176,7 +2473,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42389 }
42390 cprm.limit = RLIM_INFINITY;
42391
42392- dump_count = atomic_inc_return(&core_dump_count);
42393+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
42394 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
42395 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
42396 task_tgid_vnr(current), current->comm);
42397@@ -2203,6 +2500,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
42398 } else {
42399 struct inode *inode;
42400
42401+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
42402+
42403 if (cprm.limit < binfmt->min_coredump)
42404 goto fail_unlock;
42405
42406@@ -2246,7 +2545,7 @@ close_fail:
42407 filp_close(cprm.file, NULL);
42408 fail_dropcount:
42409 if (ispipe)
42410- atomic_dec(&core_dump_count);
42411+ atomic_dec_unchecked(&core_dump_count);
42412 fail_unlock:
42413 kfree(cn.corename);
42414 fail_corename:
42415@@ -2265,7 +2564,7 @@ fail:
42416 */
42417 int dump_write(struct file *file, const void *addr, int nr)
42418 {
42419- return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
42420+ return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
42421 }
42422 EXPORT_SYMBOL(dump_write);
42423
42424diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
42425index a8cbe1b..fed04cb 100644
42426--- a/fs/ext2/balloc.c
42427+++ b/fs/ext2/balloc.c
42428@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
42429
42430 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
42431 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
42432- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
42433+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
42434 sbi->s_resuid != current_fsuid() &&
42435 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
42436 return 0;
42437diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
42438index a203892..4e64db5 100644
42439--- a/fs/ext3/balloc.c
42440+++ b/fs/ext3/balloc.c
42441@@ -1446,9 +1446,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
42442
42443 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
42444 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
42445- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
42446+ if (free_blocks < root_blocks + 1 &&
42447 !use_reservation && sbi->s_resuid != current_fsuid() &&
42448- (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
42449+ (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid)) &&
42450+ !capable_nolog(CAP_SYS_RESOURCE)) {
42451 return 0;
42452 }
42453 return 1;
42454diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
42455index 12ccacd..a6035fce0 100644
42456--- a/fs/ext4/balloc.c
42457+++ b/fs/ext4/balloc.c
42458@@ -436,8 +436,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
42459 /* Hm, nope. Are (enough) root reserved clusters available? */
42460 if (sbi->s_resuid == current_fsuid() ||
42461 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
42462- capable(CAP_SYS_RESOURCE) ||
42463- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
42464+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
42465+ capable_nolog(CAP_SYS_RESOURCE)) {
42466
42467 if (free_clusters >= (nclusters + dirty_clusters))
42468 return 1;
42469diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
42470index 5b0e26a..0aa002d 100644
42471--- a/fs/ext4/ext4.h
42472+++ b/fs/ext4/ext4.h
42473@@ -1208,19 +1208,19 @@ struct ext4_sb_info {
42474 unsigned long s_mb_last_start;
42475
42476 /* stats for buddy allocator */
42477- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
42478- atomic_t s_bal_success; /* we found long enough chunks */
42479- atomic_t s_bal_allocated; /* in blocks */
42480- atomic_t s_bal_ex_scanned; /* total extents scanned */
42481- atomic_t s_bal_goals; /* goal hits */
42482- atomic_t s_bal_breaks; /* too long searches */
42483- atomic_t s_bal_2orders; /* 2^order hits */
42484+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
42485+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
42486+ atomic_unchecked_t s_bal_allocated; /* in blocks */
42487+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
42488+ atomic_unchecked_t s_bal_goals; /* goal hits */
42489+ atomic_unchecked_t s_bal_breaks; /* too long searches */
42490+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
42491 spinlock_t s_bal_lock;
42492 unsigned long s_mb_buddies_generated;
42493 unsigned long long s_mb_generation_time;
42494- atomic_t s_mb_lost_chunks;
42495- atomic_t s_mb_preallocated;
42496- atomic_t s_mb_discarded;
42497+ atomic_unchecked_t s_mb_lost_chunks;
42498+ atomic_unchecked_t s_mb_preallocated;
42499+ atomic_unchecked_t s_mb_discarded;
42500 atomic_t s_lock_busy;
42501
42502 /* locality groups */
42503diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
42504index e2d8be8..c7f0ce9 100644
42505--- a/fs/ext4/mballoc.c
42506+++ b/fs/ext4/mballoc.c
42507@@ -1794,7 +1794,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
42508 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
42509
42510 if (EXT4_SB(sb)->s_mb_stats)
42511- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
42512+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
42513
42514 break;
42515 }
42516@@ -2088,7 +2088,7 @@ repeat:
42517 ac->ac_status = AC_STATUS_CONTINUE;
42518 ac->ac_flags |= EXT4_MB_HINT_FIRST;
42519 cr = 3;
42520- atomic_inc(&sbi->s_mb_lost_chunks);
42521+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
42522 goto repeat;
42523 }
42524 }
42525@@ -2592,25 +2592,25 @@ int ext4_mb_release(struct super_block *sb)
42526 if (sbi->s_mb_stats) {
42527 ext4_msg(sb, KERN_INFO,
42528 "mballoc: %u blocks %u reqs (%u success)",
42529- atomic_read(&sbi->s_bal_allocated),
42530- atomic_read(&sbi->s_bal_reqs),
42531- atomic_read(&sbi->s_bal_success));
42532+ atomic_read_unchecked(&sbi->s_bal_allocated),
42533+ atomic_read_unchecked(&sbi->s_bal_reqs),
42534+ atomic_read_unchecked(&sbi->s_bal_success));
42535 ext4_msg(sb, KERN_INFO,
42536 "mballoc: %u extents scanned, %u goal hits, "
42537 "%u 2^N hits, %u breaks, %u lost",
42538- atomic_read(&sbi->s_bal_ex_scanned),
42539- atomic_read(&sbi->s_bal_goals),
42540- atomic_read(&sbi->s_bal_2orders),
42541- atomic_read(&sbi->s_bal_breaks),
42542- atomic_read(&sbi->s_mb_lost_chunks));
42543+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
42544+ atomic_read_unchecked(&sbi->s_bal_goals),
42545+ atomic_read_unchecked(&sbi->s_bal_2orders),
42546+ atomic_read_unchecked(&sbi->s_bal_breaks),
42547+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
42548 ext4_msg(sb, KERN_INFO,
42549 "mballoc: %lu generated and it took %Lu",
42550 sbi->s_mb_buddies_generated,
42551 sbi->s_mb_generation_time);
42552 ext4_msg(sb, KERN_INFO,
42553 "mballoc: %u preallocated, %u discarded",
42554- atomic_read(&sbi->s_mb_preallocated),
42555- atomic_read(&sbi->s_mb_discarded));
42556+ atomic_read_unchecked(&sbi->s_mb_preallocated),
42557+ atomic_read_unchecked(&sbi->s_mb_discarded));
42558 }
42559
42560 free_percpu(sbi->s_locality_groups);
42561@@ -3096,16 +3096,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
42562 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
42563
42564 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
42565- atomic_inc(&sbi->s_bal_reqs);
42566- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
42567+ atomic_inc_unchecked(&sbi->s_bal_reqs);
42568+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
42569 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
42570- atomic_inc(&sbi->s_bal_success);
42571- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
42572+ atomic_inc_unchecked(&sbi->s_bal_success);
42573+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
42574 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
42575 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
42576- atomic_inc(&sbi->s_bal_goals);
42577+ atomic_inc_unchecked(&sbi->s_bal_goals);
42578 if (ac->ac_found > sbi->s_mb_max_to_scan)
42579- atomic_inc(&sbi->s_bal_breaks);
42580+ atomic_inc_unchecked(&sbi->s_bal_breaks);
42581 }
42582
42583 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
42584@@ -3509,7 +3509,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
42585 trace_ext4_mb_new_inode_pa(ac, pa);
42586
42587 ext4_mb_use_inode_pa(ac, pa);
42588- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
42589+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
42590
42591 ei = EXT4_I(ac->ac_inode);
42592 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
42593@@ -3569,7 +3569,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
42594 trace_ext4_mb_new_group_pa(ac, pa);
42595
42596 ext4_mb_use_group_pa(ac, pa);
42597- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42598+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42599
42600 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
42601 lg = ac->ac_lg;
42602@@ -3658,7 +3658,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
42603 * from the bitmap and continue.
42604 */
42605 }
42606- atomic_add(free, &sbi->s_mb_discarded);
42607+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
42608
42609 return err;
42610 }
42611@@ -3676,7 +3676,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
42612 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
42613 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
42614 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
42615- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
42616+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
42617 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
42618
42619 return 0;
42620diff --git a/fs/fcntl.c b/fs/fcntl.c
42621index 22764c7..86372c9 100644
42622--- a/fs/fcntl.c
42623+++ b/fs/fcntl.c
42624@@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
42625 if (err)
42626 return err;
42627
42628+ if (gr_handle_chroot_fowner(pid, type))
42629+ return -ENOENT;
42630+ if (gr_check_protected_task_fowner(pid, type))
42631+ return -EACCES;
42632+
42633 f_modown(filp, pid, type, force);
42634 return 0;
42635 }
42636@@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
42637
42638 static int f_setown_ex(struct file *filp, unsigned long arg)
42639 {
42640- struct f_owner_ex * __user owner_p = (void * __user)arg;
42641+ struct f_owner_ex __user *owner_p = (void __user *)arg;
42642 struct f_owner_ex owner;
42643 struct pid *pid;
42644 int type;
42645@@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
42646
42647 static int f_getown_ex(struct file *filp, unsigned long arg)
42648 {
42649- struct f_owner_ex * __user owner_p = (void * __user)arg;
42650+ struct f_owner_ex __user *owner_p = (void __user *)arg;
42651 struct f_owner_ex owner;
42652 int ret = 0;
42653
42654@@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
42655 switch (cmd) {
42656 case F_DUPFD:
42657 case F_DUPFD_CLOEXEC:
42658+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
42659 if (arg >= rlimit(RLIMIT_NOFILE))
42660 break;
42661 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
42662diff --git a/fs/fifo.c b/fs/fifo.c
42663index b1a524d..4ee270e 100644
42664--- a/fs/fifo.c
42665+++ b/fs/fifo.c
42666@@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
42667 */
42668 filp->f_op = &read_pipefifo_fops;
42669 pipe->r_counter++;
42670- if (pipe->readers++ == 0)
42671+ if (atomic_inc_return(&pipe->readers) == 1)
42672 wake_up_partner(inode);
42673
42674- if (!pipe->writers) {
42675+ if (!atomic_read(&pipe->writers)) {
42676 if ((filp->f_flags & O_NONBLOCK)) {
42677 /* suppress POLLHUP until we have
42678 * seen a writer */
42679@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
42680 * errno=ENXIO when there is no process reading the FIFO.
42681 */
42682 ret = -ENXIO;
42683- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
42684+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
42685 goto err;
42686
42687 filp->f_op = &write_pipefifo_fops;
42688 pipe->w_counter++;
42689- if (!pipe->writers++)
42690+ if (atomic_inc_return(&pipe->writers) == 1)
42691 wake_up_partner(inode);
42692
42693- if (!pipe->readers) {
42694+ if (!atomic_read(&pipe->readers)) {
42695 wait_for_partner(inode, &pipe->r_counter);
42696 if (signal_pending(current))
42697 goto err_wr;
42698@@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
42699 */
42700 filp->f_op = &rdwr_pipefifo_fops;
42701
42702- pipe->readers++;
42703- pipe->writers++;
42704+ atomic_inc(&pipe->readers);
42705+ atomic_inc(&pipe->writers);
42706 pipe->r_counter++;
42707 pipe->w_counter++;
42708- if (pipe->readers == 1 || pipe->writers == 1)
42709+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
42710 wake_up_partner(inode);
42711 break;
42712
42713@@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
42714 return 0;
42715
42716 err_rd:
42717- if (!--pipe->readers)
42718+ if (atomic_dec_and_test(&pipe->readers))
42719 wake_up_interruptible(&pipe->wait);
42720 ret = -ERESTARTSYS;
42721 goto err;
42722
42723 err_wr:
42724- if (!--pipe->writers)
42725+ if (atomic_dec_and_test(&pipe->writers))
42726 wake_up_interruptible(&pipe->wait);
42727 ret = -ERESTARTSYS;
42728 goto err;
42729
42730 err:
42731- if (!pipe->readers && !pipe->writers)
42732+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
42733 free_pipe_info(inode);
42734
42735 err_nocleanup:
42736diff --git a/fs/file.c b/fs/file.c
42737index 4c6992d..104cdea 100644
42738--- a/fs/file.c
42739+++ b/fs/file.c
42740@@ -15,6 +15,7 @@
42741 #include <linux/slab.h>
42742 #include <linux/vmalloc.h>
42743 #include <linux/file.h>
42744+#include <linux/security.h>
42745 #include <linux/fdtable.h>
42746 #include <linux/bitops.h>
42747 #include <linux/interrupt.h>
42748@@ -254,6 +255,7 @@ int expand_files(struct files_struct *files, int nr)
42749 * N.B. For clone tasks sharing a files structure, this test
42750 * will limit the total number of files that can be opened.
42751 */
42752+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
42753 if (nr >= rlimit(RLIMIT_NOFILE))
42754 return -EMFILE;
42755
42756diff --git a/fs/filesystems.c b/fs/filesystems.c
42757index 0845f84..7b4ebef 100644
42758--- a/fs/filesystems.c
42759+++ b/fs/filesystems.c
42760@@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(const char *name)
42761 int len = dot ? dot - name : strlen(name);
42762
42763 fs = __get_fs_type(name, len);
42764+
42765+#ifdef CONFIG_GRKERNSEC_MODHARDEN
42766+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
42767+#else
42768 if (!fs && (request_module("%.*s", len, name) == 0))
42769+#endif
42770 fs = __get_fs_type(name, len);
42771
42772 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
42773diff --git a/fs/fs_struct.c b/fs/fs_struct.c
42774index 78b519c..212c0d0 100644
42775--- a/fs/fs_struct.c
42776+++ b/fs/fs_struct.c
42777@@ -4,6 +4,7 @@
42778 #include <linux/path.h>
42779 #include <linux/slab.h>
42780 #include <linux/fs_struct.h>
42781+#include <linux/grsecurity.h>
42782 #include "internal.h"
42783
42784 static inline void path_get_longterm(struct path *path)
42785@@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
42786 old_root = fs->root;
42787 fs->root = *path;
42788 path_get_longterm(path);
42789+ gr_set_chroot_entries(current, path);
42790 write_seqcount_end(&fs->seq);
42791 spin_unlock(&fs->lock);
42792 if (old_root.dentry)
42793@@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
42794 && fs->root.mnt == old_root->mnt) {
42795 path_get_longterm(new_root);
42796 fs->root = *new_root;
42797+ gr_set_chroot_entries(p, new_root);
42798 count++;
42799 }
42800 if (fs->pwd.dentry == old_root->dentry
42801@@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
42802 spin_lock(&fs->lock);
42803 write_seqcount_begin(&fs->seq);
42804 tsk->fs = NULL;
42805- kill = !--fs->users;
42806+ gr_clear_chroot_entries(tsk);
42807+ kill = !atomic_dec_return(&fs->users);
42808 write_seqcount_end(&fs->seq);
42809 spin_unlock(&fs->lock);
42810 task_unlock(tsk);
42811@@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
42812 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
42813 /* We don't need to lock fs - think why ;-) */
42814 if (fs) {
42815- fs->users = 1;
42816+ atomic_set(&fs->users, 1);
42817 fs->in_exec = 0;
42818 spin_lock_init(&fs->lock);
42819 seqcount_init(&fs->seq);
42820@@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
42821 spin_lock(&old->lock);
42822 fs->root = old->root;
42823 path_get_longterm(&fs->root);
42824+ /* instead of calling gr_set_chroot_entries here,
42825+ we call it from every caller of this function
42826+ */
42827 fs->pwd = old->pwd;
42828 path_get_longterm(&fs->pwd);
42829 spin_unlock(&old->lock);
42830@@ -150,8 +157,9 @@ int unshare_fs_struct(void)
42831
42832 task_lock(current);
42833 spin_lock(&fs->lock);
42834- kill = !--fs->users;
42835+ kill = !atomic_dec_return(&fs->users);
42836 current->fs = new_fs;
42837+ gr_set_chroot_entries(current, &new_fs->root);
42838 spin_unlock(&fs->lock);
42839 task_unlock(current);
42840
42841@@ -170,7 +178,7 @@ EXPORT_SYMBOL(current_umask);
42842
42843 /* to be mentioned only in INIT_TASK */
42844 struct fs_struct init_fs = {
42845- .users = 1,
42846+ .users = ATOMIC_INIT(1),
42847 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
42848 .seq = SEQCNT_ZERO,
42849 .umask = 0022,
42850@@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
42851 task_lock(current);
42852
42853 spin_lock(&init_fs.lock);
42854- init_fs.users++;
42855+ atomic_inc(&init_fs.users);
42856 spin_unlock(&init_fs.lock);
42857
42858 spin_lock(&fs->lock);
42859 current->fs = &init_fs;
42860- kill = !--fs->users;
42861+ gr_set_chroot_entries(current, &current->fs->root);
42862+ kill = !atomic_dec_return(&fs->users);
42863 spin_unlock(&fs->lock);
42864
42865 task_unlock(current);
42866diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
42867index 9905350..02eaec4 100644
42868--- a/fs/fscache/cookie.c
42869+++ b/fs/fscache/cookie.c
42870@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
42871 parent ? (char *) parent->def->name : "<no-parent>",
42872 def->name, netfs_data);
42873
42874- fscache_stat(&fscache_n_acquires);
42875+ fscache_stat_unchecked(&fscache_n_acquires);
42876
42877 /* if there's no parent cookie, then we don't create one here either */
42878 if (!parent) {
42879- fscache_stat(&fscache_n_acquires_null);
42880+ fscache_stat_unchecked(&fscache_n_acquires_null);
42881 _leave(" [no parent]");
42882 return NULL;
42883 }
42884@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
42885 /* allocate and initialise a cookie */
42886 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
42887 if (!cookie) {
42888- fscache_stat(&fscache_n_acquires_oom);
42889+ fscache_stat_unchecked(&fscache_n_acquires_oom);
42890 _leave(" [ENOMEM]");
42891 return NULL;
42892 }
42893@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
42894
42895 switch (cookie->def->type) {
42896 case FSCACHE_COOKIE_TYPE_INDEX:
42897- fscache_stat(&fscache_n_cookie_index);
42898+ fscache_stat_unchecked(&fscache_n_cookie_index);
42899 break;
42900 case FSCACHE_COOKIE_TYPE_DATAFILE:
42901- fscache_stat(&fscache_n_cookie_data);
42902+ fscache_stat_unchecked(&fscache_n_cookie_data);
42903 break;
42904 default:
42905- fscache_stat(&fscache_n_cookie_special);
42906+ fscache_stat_unchecked(&fscache_n_cookie_special);
42907 break;
42908 }
42909
42910@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
42911 if (fscache_acquire_non_index_cookie(cookie) < 0) {
42912 atomic_dec(&parent->n_children);
42913 __fscache_cookie_put(cookie);
42914- fscache_stat(&fscache_n_acquires_nobufs);
42915+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
42916 _leave(" = NULL");
42917 return NULL;
42918 }
42919 }
42920
42921- fscache_stat(&fscache_n_acquires_ok);
42922+ fscache_stat_unchecked(&fscache_n_acquires_ok);
42923 _leave(" = %p", cookie);
42924 return cookie;
42925 }
42926@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
42927 cache = fscache_select_cache_for_object(cookie->parent);
42928 if (!cache) {
42929 up_read(&fscache_addremove_sem);
42930- fscache_stat(&fscache_n_acquires_no_cache);
42931+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
42932 _leave(" = -ENOMEDIUM [no cache]");
42933 return -ENOMEDIUM;
42934 }
42935@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
42936 object = cache->ops->alloc_object(cache, cookie);
42937 fscache_stat_d(&fscache_n_cop_alloc_object);
42938 if (IS_ERR(object)) {
42939- fscache_stat(&fscache_n_object_no_alloc);
42940+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
42941 ret = PTR_ERR(object);
42942 goto error;
42943 }
42944
42945- fscache_stat(&fscache_n_object_alloc);
42946+ fscache_stat_unchecked(&fscache_n_object_alloc);
42947
42948 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
42949
42950@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
42951 struct fscache_object *object;
42952 struct hlist_node *_p;
42953
42954- fscache_stat(&fscache_n_updates);
42955+ fscache_stat_unchecked(&fscache_n_updates);
42956
42957 if (!cookie) {
42958- fscache_stat(&fscache_n_updates_null);
42959+ fscache_stat_unchecked(&fscache_n_updates_null);
42960 _leave(" [no cookie]");
42961 return;
42962 }
42963@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
42964 struct fscache_object *object;
42965 unsigned long event;
42966
42967- fscache_stat(&fscache_n_relinquishes);
42968+ fscache_stat_unchecked(&fscache_n_relinquishes);
42969 if (retire)
42970- fscache_stat(&fscache_n_relinquishes_retire);
42971+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
42972
42973 if (!cookie) {
42974- fscache_stat(&fscache_n_relinquishes_null);
42975+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
42976 _leave(" [no cookie]");
42977 return;
42978 }
42979@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
42980
42981 /* wait for the cookie to finish being instantiated (or to fail) */
42982 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
42983- fscache_stat(&fscache_n_relinquishes_waitcrt);
42984+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
42985 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
42986 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
42987 }
42988diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
42989index f6aad48..88dcf26 100644
42990--- a/fs/fscache/internal.h
42991+++ b/fs/fscache/internal.h
42992@@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
42993 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
42994 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
42995
42996-extern atomic_t fscache_n_op_pend;
42997-extern atomic_t fscache_n_op_run;
42998-extern atomic_t fscache_n_op_enqueue;
42999-extern atomic_t fscache_n_op_deferred_release;
43000-extern atomic_t fscache_n_op_release;
43001-extern atomic_t fscache_n_op_gc;
43002-extern atomic_t fscache_n_op_cancelled;
43003-extern atomic_t fscache_n_op_rejected;
43004+extern atomic_unchecked_t fscache_n_op_pend;
43005+extern atomic_unchecked_t fscache_n_op_run;
43006+extern atomic_unchecked_t fscache_n_op_enqueue;
43007+extern atomic_unchecked_t fscache_n_op_deferred_release;
43008+extern atomic_unchecked_t fscache_n_op_release;
43009+extern atomic_unchecked_t fscache_n_op_gc;
43010+extern atomic_unchecked_t fscache_n_op_cancelled;
43011+extern atomic_unchecked_t fscache_n_op_rejected;
43012
43013-extern atomic_t fscache_n_attr_changed;
43014-extern atomic_t fscache_n_attr_changed_ok;
43015-extern atomic_t fscache_n_attr_changed_nobufs;
43016-extern atomic_t fscache_n_attr_changed_nomem;
43017-extern atomic_t fscache_n_attr_changed_calls;
43018+extern atomic_unchecked_t fscache_n_attr_changed;
43019+extern atomic_unchecked_t fscache_n_attr_changed_ok;
43020+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
43021+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
43022+extern atomic_unchecked_t fscache_n_attr_changed_calls;
43023
43024-extern atomic_t fscache_n_allocs;
43025-extern atomic_t fscache_n_allocs_ok;
43026-extern atomic_t fscache_n_allocs_wait;
43027-extern atomic_t fscache_n_allocs_nobufs;
43028-extern atomic_t fscache_n_allocs_intr;
43029-extern atomic_t fscache_n_allocs_object_dead;
43030-extern atomic_t fscache_n_alloc_ops;
43031-extern atomic_t fscache_n_alloc_op_waits;
43032+extern atomic_unchecked_t fscache_n_allocs;
43033+extern atomic_unchecked_t fscache_n_allocs_ok;
43034+extern atomic_unchecked_t fscache_n_allocs_wait;
43035+extern atomic_unchecked_t fscache_n_allocs_nobufs;
43036+extern atomic_unchecked_t fscache_n_allocs_intr;
43037+extern atomic_unchecked_t fscache_n_allocs_object_dead;
43038+extern atomic_unchecked_t fscache_n_alloc_ops;
43039+extern atomic_unchecked_t fscache_n_alloc_op_waits;
43040
43041-extern atomic_t fscache_n_retrievals;
43042-extern atomic_t fscache_n_retrievals_ok;
43043-extern atomic_t fscache_n_retrievals_wait;
43044-extern atomic_t fscache_n_retrievals_nodata;
43045-extern atomic_t fscache_n_retrievals_nobufs;
43046-extern atomic_t fscache_n_retrievals_intr;
43047-extern atomic_t fscache_n_retrievals_nomem;
43048-extern atomic_t fscache_n_retrievals_object_dead;
43049-extern atomic_t fscache_n_retrieval_ops;
43050-extern atomic_t fscache_n_retrieval_op_waits;
43051+extern atomic_unchecked_t fscache_n_retrievals;
43052+extern atomic_unchecked_t fscache_n_retrievals_ok;
43053+extern atomic_unchecked_t fscache_n_retrievals_wait;
43054+extern atomic_unchecked_t fscache_n_retrievals_nodata;
43055+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
43056+extern atomic_unchecked_t fscache_n_retrievals_intr;
43057+extern atomic_unchecked_t fscache_n_retrievals_nomem;
43058+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
43059+extern atomic_unchecked_t fscache_n_retrieval_ops;
43060+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
43061
43062-extern atomic_t fscache_n_stores;
43063-extern atomic_t fscache_n_stores_ok;
43064-extern atomic_t fscache_n_stores_again;
43065-extern atomic_t fscache_n_stores_nobufs;
43066-extern atomic_t fscache_n_stores_oom;
43067-extern atomic_t fscache_n_store_ops;
43068-extern atomic_t fscache_n_store_calls;
43069-extern atomic_t fscache_n_store_pages;
43070-extern atomic_t fscache_n_store_radix_deletes;
43071-extern atomic_t fscache_n_store_pages_over_limit;
43072+extern atomic_unchecked_t fscache_n_stores;
43073+extern atomic_unchecked_t fscache_n_stores_ok;
43074+extern atomic_unchecked_t fscache_n_stores_again;
43075+extern atomic_unchecked_t fscache_n_stores_nobufs;
43076+extern atomic_unchecked_t fscache_n_stores_oom;
43077+extern atomic_unchecked_t fscache_n_store_ops;
43078+extern atomic_unchecked_t fscache_n_store_calls;
43079+extern atomic_unchecked_t fscache_n_store_pages;
43080+extern atomic_unchecked_t fscache_n_store_radix_deletes;
43081+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
43082
43083-extern atomic_t fscache_n_store_vmscan_not_storing;
43084-extern atomic_t fscache_n_store_vmscan_gone;
43085-extern atomic_t fscache_n_store_vmscan_busy;
43086-extern atomic_t fscache_n_store_vmscan_cancelled;
43087+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
43088+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
43089+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
43090+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
43091
43092-extern atomic_t fscache_n_marks;
43093-extern atomic_t fscache_n_uncaches;
43094+extern atomic_unchecked_t fscache_n_marks;
43095+extern atomic_unchecked_t fscache_n_uncaches;
43096
43097-extern atomic_t fscache_n_acquires;
43098-extern atomic_t fscache_n_acquires_null;
43099-extern atomic_t fscache_n_acquires_no_cache;
43100-extern atomic_t fscache_n_acquires_ok;
43101-extern atomic_t fscache_n_acquires_nobufs;
43102-extern atomic_t fscache_n_acquires_oom;
43103+extern atomic_unchecked_t fscache_n_acquires;
43104+extern atomic_unchecked_t fscache_n_acquires_null;
43105+extern atomic_unchecked_t fscache_n_acquires_no_cache;
43106+extern atomic_unchecked_t fscache_n_acquires_ok;
43107+extern atomic_unchecked_t fscache_n_acquires_nobufs;
43108+extern atomic_unchecked_t fscache_n_acquires_oom;
43109
43110-extern atomic_t fscache_n_updates;
43111-extern atomic_t fscache_n_updates_null;
43112-extern atomic_t fscache_n_updates_run;
43113+extern atomic_unchecked_t fscache_n_updates;
43114+extern atomic_unchecked_t fscache_n_updates_null;
43115+extern atomic_unchecked_t fscache_n_updates_run;
43116
43117-extern atomic_t fscache_n_relinquishes;
43118-extern atomic_t fscache_n_relinquishes_null;
43119-extern atomic_t fscache_n_relinquishes_waitcrt;
43120-extern atomic_t fscache_n_relinquishes_retire;
43121+extern atomic_unchecked_t fscache_n_relinquishes;
43122+extern atomic_unchecked_t fscache_n_relinquishes_null;
43123+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
43124+extern atomic_unchecked_t fscache_n_relinquishes_retire;
43125
43126-extern atomic_t fscache_n_cookie_index;
43127-extern atomic_t fscache_n_cookie_data;
43128-extern atomic_t fscache_n_cookie_special;
43129+extern atomic_unchecked_t fscache_n_cookie_index;
43130+extern atomic_unchecked_t fscache_n_cookie_data;
43131+extern atomic_unchecked_t fscache_n_cookie_special;
43132
43133-extern atomic_t fscache_n_object_alloc;
43134-extern atomic_t fscache_n_object_no_alloc;
43135-extern atomic_t fscache_n_object_lookups;
43136-extern atomic_t fscache_n_object_lookups_negative;
43137-extern atomic_t fscache_n_object_lookups_positive;
43138-extern atomic_t fscache_n_object_lookups_timed_out;
43139-extern atomic_t fscache_n_object_created;
43140-extern atomic_t fscache_n_object_avail;
43141-extern atomic_t fscache_n_object_dead;
43142+extern atomic_unchecked_t fscache_n_object_alloc;
43143+extern atomic_unchecked_t fscache_n_object_no_alloc;
43144+extern atomic_unchecked_t fscache_n_object_lookups;
43145+extern atomic_unchecked_t fscache_n_object_lookups_negative;
43146+extern atomic_unchecked_t fscache_n_object_lookups_positive;
43147+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
43148+extern atomic_unchecked_t fscache_n_object_created;
43149+extern atomic_unchecked_t fscache_n_object_avail;
43150+extern atomic_unchecked_t fscache_n_object_dead;
43151
43152-extern atomic_t fscache_n_checkaux_none;
43153-extern atomic_t fscache_n_checkaux_okay;
43154-extern atomic_t fscache_n_checkaux_update;
43155-extern atomic_t fscache_n_checkaux_obsolete;
43156+extern atomic_unchecked_t fscache_n_checkaux_none;
43157+extern atomic_unchecked_t fscache_n_checkaux_okay;
43158+extern atomic_unchecked_t fscache_n_checkaux_update;
43159+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
43160
43161 extern atomic_t fscache_n_cop_alloc_object;
43162 extern atomic_t fscache_n_cop_lookup_object;
43163@@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
43164 atomic_inc(stat);
43165 }
43166
43167+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
43168+{
43169+ atomic_inc_unchecked(stat);
43170+}
43171+
43172 static inline void fscache_stat_d(atomic_t *stat)
43173 {
43174 atomic_dec(stat);
43175@@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
43176
43177 #define __fscache_stat(stat) (NULL)
43178 #define fscache_stat(stat) do {} while (0)
43179+#define fscache_stat_unchecked(stat) do {} while (0)
43180 #define fscache_stat_d(stat) do {} while (0)
43181 #endif
43182
43183diff --git a/fs/fscache/object.c b/fs/fscache/object.c
43184index b6b897c..0ffff9c 100644
43185--- a/fs/fscache/object.c
43186+++ b/fs/fscache/object.c
43187@@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
43188 /* update the object metadata on disk */
43189 case FSCACHE_OBJECT_UPDATING:
43190 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
43191- fscache_stat(&fscache_n_updates_run);
43192+ fscache_stat_unchecked(&fscache_n_updates_run);
43193 fscache_stat(&fscache_n_cop_update_object);
43194 object->cache->ops->update_object(object);
43195 fscache_stat_d(&fscache_n_cop_update_object);
43196@@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
43197 spin_lock(&object->lock);
43198 object->state = FSCACHE_OBJECT_DEAD;
43199 spin_unlock(&object->lock);
43200- fscache_stat(&fscache_n_object_dead);
43201+ fscache_stat_unchecked(&fscache_n_object_dead);
43202 goto terminal_transit;
43203
43204 /* handle the parent cache of this object being withdrawn from
43205@@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
43206 spin_lock(&object->lock);
43207 object->state = FSCACHE_OBJECT_DEAD;
43208 spin_unlock(&object->lock);
43209- fscache_stat(&fscache_n_object_dead);
43210+ fscache_stat_unchecked(&fscache_n_object_dead);
43211 goto terminal_transit;
43212
43213 /* complain about the object being woken up once it is
43214@@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
43215 parent->cookie->def->name, cookie->def->name,
43216 object->cache->tag->name);
43217
43218- fscache_stat(&fscache_n_object_lookups);
43219+ fscache_stat_unchecked(&fscache_n_object_lookups);
43220 fscache_stat(&fscache_n_cop_lookup_object);
43221 ret = object->cache->ops->lookup_object(object);
43222 fscache_stat_d(&fscache_n_cop_lookup_object);
43223@@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
43224 if (ret == -ETIMEDOUT) {
43225 /* probably stuck behind another object, so move this one to
43226 * the back of the queue */
43227- fscache_stat(&fscache_n_object_lookups_timed_out);
43228+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
43229 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
43230 }
43231
43232@@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
43233
43234 spin_lock(&object->lock);
43235 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
43236- fscache_stat(&fscache_n_object_lookups_negative);
43237+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
43238
43239 /* transit here to allow write requests to begin stacking up
43240 * and read requests to begin returning ENODATA */
43241@@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
43242 * result, in which case there may be data available */
43243 spin_lock(&object->lock);
43244 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
43245- fscache_stat(&fscache_n_object_lookups_positive);
43246+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
43247
43248 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
43249
43250@@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
43251 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
43252 } else {
43253 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
43254- fscache_stat(&fscache_n_object_created);
43255+ fscache_stat_unchecked(&fscache_n_object_created);
43256
43257 object->state = FSCACHE_OBJECT_AVAILABLE;
43258 spin_unlock(&object->lock);
43259@@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
43260 fscache_enqueue_dependents(object);
43261
43262 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
43263- fscache_stat(&fscache_n_object_avail);
43264+ fscache_stat_unchecked(&fscache_n_object_avail);
43265
43266 _leave("");
43267 }
43268@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
43269 enum fscache_checkaux result;
43270
43271 if (!object->cookie->def->check_aux) {
43272- fscache_stat(&fscache_n_checkaux_none);
43273+ fscache_stat_unchecked(&fscache_n_checkaux_none);
43274 return FSCACHE_CHECKAUX_OKAY;
43275 }
43276
43277@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
43278 switch (result) {
43279 /* entry okay as is */
43280 case FSCACHE_CHECKAUX_OKAY:
43281- fscache_stat(&fscache_n_checkaux_okay);
43282+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
43283 break;
43284
43285 /* entry requires update */
43286 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
43287- fscache_stat(&fscache_n_checkaux_update);
43288+ fscache_stat_unchecked(&fscache_n_checkaux_update);
43289 break;
43290
43291 /* entry requires deletion */
43292 case FSCACHE_CHECKAUX_OBSOLETE:
43293- fscache_stat(&fscache_n_checkaux_obsolete);
43294+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
43295 break;
43296
43297 default:
43298diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
43299index 30afdfa..2256596 100644
43300--- a/fs/fscache/operation.c
43301+++ b/fs/fscache/operation.c
43302@@ -17,7 +17,7 @@
43303 #include <linux/slab.h>
43304 #include "internal.h"
43305
43306-atomic_t fscache_op_debug_id;
43307+atomic_unchecked_t fscache_op_debug_id;
43308 EXPORT_SYMBOL(fscache_op_debug_id);
43309
43310 /**
43311@@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
43312 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
43313 ASSERTCMP(atomic_read(&op->usage), >, 0);
43314
43315- fscache_stat(&fscache_n_op_enqueue);
43316+ fscache_stat_unchecked(&fscache_n_op_enqueue);
43317 switch (op->flags & FSCACHE_OP_TYPE) {
43318 case FSCACHE_OP_ASYNC:
43319 _debug("queue async");
43320@@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
43321 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
43322 if (op->processor)
43323 fscache_enqueue_operation(op);
43324- fscache_stat(&fscache_n_op_run);
43325+ fscache_stat_unchecked(&fscache_n_op_run);
43326 }
43327
43328 /*
43329@@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
43330 if (object->n_ops > 1) {
43331 atomic_inc(&op->usage);
43332 list_add_tail(&op->pend_link, &object->pending_ops);
43333- fscache_stat(&fscache_n_op_pend);
43334+ fscache_stat_unchecked(&fscache_n_op_pend);
43335 } else if (!list_empty(&object->pending_ops)) {
43336 atomic_inc(&op->usage);
43337 list_add_tail(&op->pend_link, &object->pending_ops);
43338- fscache_stat(&fscache_n_op_pend);
43339+ fscache_stat_unchecked(&fscache_n_op_pend);
43340 fscache_start_operations(object);
43341 } else {
43342 ASSERTCMP(object->n_in_progress, ==, 0);
43343@@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
43344 object->n_exclusive++; /* reads and writes must wait */
43345 atomic_inc(&op->usage);
43346 list_add_tail(&op->pend_link, &object->pending_ops);
43347- fscache_stat(&fscache_n_op_pend);
43348+ fscache_stat_unchecked(&fscache_n_op_pend);
43349 ret = 0;
43350 } else {
43351 /* not allowed to submit ops in any other state */
43352@@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
43353 if (object->n_exclusive > 0) {
43354 atomic_inc(&op->usage);
43355 list_add_tail(&op->pend_link, &object->pending_ops);
43356- fscache_stat(&fscache_n_op_pend);
43357+ fscache_stat_unchecked(&fscache_n_op_pend);
43358 } else if (!list_empty(&object->pending_ops)) {
43359 atomic_inc(&op->usage);
43360 list_add_tail(&op->pend_link, &object->pending_ops);
43361- fscache_stat(&fscache_n_op_pend);
43362+ fscache_stat_unchecked(&fscache_n_op_pend);
43363 fscache_start_operations(object);
43364 } else {
43365 ASSERTCMP(object->n_exclusive, ==, 0);
43366@@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
43367 object->n_ops++;
43368 atomic_inc(&op->usage);
43369 list_add_tail(&op->pend_link, &object->pending_ops);
43370- fscache_stat(&fscache_n_op_pend);
43371+ fscache_stat_unchecked(&fscache_n_op_pend);
43372 ret = 0;
43373 } else if (object->state == FSCACHE_OBJECT_DYING ||
43374 object->state == FSCACHE_OBJECT_LC_DYING ||
43375 object->state == FSCACHE_OBJECT_WITHDRAWING) {
43376- fscache_stat(&fscache_n_op_rejected);
43377+ fscache_stat_unchecked(&fscache_n_op_rejected);
43378 ret = -ENOBUFS;
43379 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
43380 fscache_report_unexpected_submission(object, op, ostate);
43381@@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
43382
43383 ret = -EBUSY;
43384 if (!list_empty(&op->pend_link)) {
43385- fscache_stat(&fscache_n_op_cancelled);
43386+ fscache_stat_unchecked(&fscache_n_op_cancelled);
43387 list_del_init(&op->pend_link);
43388 object->n_ops--;
43389 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
43390@@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
43391 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
43392 BUG();
43393
43394- fscache_stat(&fscache_n_op_release);
43395+ fscache_stat_unchecked(&fscache_n_op_release);
43396
43397 if (op->release) {
43398 op->release(op);
43399@@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
43400 * lock, and defer it otherwise */
43401 if (!spin_trylock(&object->lock)) {
43402 _debug("defer put");
43403- fscache_stat(&fscache_n_op_deferred_release);
43404+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
43405
43406 cache = object->cache;
43407 spin_lock(&cache->op_gc_list_lock);
43408@@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
43409
43410 _debug("GC DEFERRED REL OBJ%x OP%x",
43411 object->debug_id, op->debug_id);
43412- fscache_stat(&fscache_n_op_gc);
43413+ fscache_stat_unchecked(&fscache_n_op_gc);
43414
43415 ASSERTCMP(atomic_read(&op->usage), ==, 0);
43416
43417diff --git a/fs/fscache/page.c b/fs/fscache/page.c
43418index 3f7a59b..cf196cc 100644
43419--- a/fs/fscache/page.c
43420+++ b/fs/fscache/page.c
43421@@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
43422 val = radix_tree_lookup(&cookie->stores, page->index);
43423 if (!val) {
43424 rcu_read_unlock();
43425- fscache_stat(&fscache_n_store_vmscan_not_storing);
43426+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
43427 __fscache_uncache_page(cookie, page);
43428 return true;
43429 }
43430@@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
43431 spin_unlock(&cookie->stores_lock);
43432
43433 if (xpage) {
43434- fscache_stat(&fscache_n_store_vmscan_cancelled);
43435- fscache_stat(&fscache_n_store_radix_deletes);
43436+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
43437+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
43438 ASSERTCMP(xpage, ==, page);
43439 } else {
43440- fscache_stat(&fscache_n_store_vmscan_gone);
43441+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
43442 }
43443
43444 wake_up_bit(&cookie->flags, 0);
43445@@ -107,7 +107,7 @@ page_busy:
43446 /* we might want to wait here, but that could deadlock the allocator as
43447 * the work threads writing to the cache may all end up sleeping
43448 * on memory allocation */
43449- fscache_stat(&fscache_n_store_vmscan_busy);
43450+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
43451 return false;
43452 }
43453 EXPORT_SYMBOL(__fscache_maybe_release_page);
43454@@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
43455 FSCACHE_COOKIE_STORING_TAG);
43456 if (!radix_tree_tag_get(&cookie->stores, page->index,
43457 FSCACHE_COOKIE_PENDING_TAG)) {
43458- fscache_stat(&fscache_n_store_radix_deletes);
43459+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
43460 xpage = radix_tree_delete(&cookie->stores, page->index);
43461 }
43462 spin_unlock(&cookie->stores_lock);
43463@@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
43464
43465 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
43466
43467- fscache_stat(&fscache_n_attr_changed_calls);
43468+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
43469
43470 if (fscache_object_is_active(object)) {
43471 fscache_stat(&fscache_n_cop_attr_changed);
43472@@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
43473
43474 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43475
43476- fscache_stat(&fscache_n_attr_changed);
43477+ fscache_stat_unchecked(&fscache_n_attr_changed);
43478
43479 op = kzalloc(sizeof(*op), GFP_KERNEL);
43480 if (!op) {
43481- fscache_stat(&fscache_n_attr_changed_nomem);
43482+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
43483 _leave(" = -ENOMEM");
43484 return -ENOMEM;
43485 }
43486@@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
43487 if (fscache_submit_exclusive_op(object, op) < 0)
43488 goto nobufs;
43489 spin_unlock(&cookie->lock);
43490- fscache_stat(&fscache_n_attr_changed_ok);
43491+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
43492 fscache_put_operation(op);
43493 _leave(" = 0");
43494 return 0;
43495@@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
43496 nobufs:
43497 spin_unlock(&cookie->lock);
43498 kfree(op);
43499- fscache_stat(&fscache_n_attr_changed_nobufs);
43500+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
43501 _leave(" = %d", -ENOBUFS);
43502 return -ENOBUFS;
43503 }
43504@@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
43505 /* allocate a retrieval operation and attempt to submit it */
43506 op = kzalloc(sizeof(*op), GFP_NOIO);
43507 if (!op) {
43508- fscache_stat(&fscache_n_retrievals_nomem);
43509+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43510 return NULL;
43511 }
43512
43513@@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
43514 return 0;
43515 }
43516
43517- fscache_stat(&fscache_n_retrievals_wait);
43518+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
43519
43520 jif = jiffies;
43521 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
43522 fscache_wait_bit_interruptible,
43523 TASK_INTERRUPTIBLE) != 0) {
43524- fscache_stat(&fscache_n_retrievals_intr);
43525+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
43526 _leave(" = -ERESTARTSYS");
43527 return -ERESTARTSYS;
43528 }
43529@@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
43530 */
43531 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
43532 struct fscache_retrieval *op,
43533- atomic_t *stat_op_waits,
43534- atomic_t *stat_object_dead)
43535+ atomic_unchecked_t *stat_op_waits,
43536+ atomic_unchecked_t *stat_object_dead)
43537 {
43538 int ret;
43539
43540@@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
43541 goto check_if_dead;
43542
43543 _debug(">>> WT");
43544- fscache_stat(stat_op_waits);
43545+ fscache_stat_unchecked(stat_op_waits);
43546 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
43547 fscache_wait_bit_interruptible,
43548 TASK_INTERRUPTIBLE) < 0) {
43549@@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
43550
43551 check_if_dead:
43552 if (unlikely(fscache_object_is_dead(object))) {
43553- fscache_stat(stat_object_dead);
43554+ fscache_stat_unchecked(stat_object_dead);
43555 return -ENOBUFS;
43556 }
43557 return 0;
43558@@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
43559
43560 _enter("%p,%p,,,", cookie, page);
43561
43562- fscache_stat(&fscache_n_retrievals);
43563+ fscache_stat_unchecked(&fscache_n_retrievals);
43564
43565 if (hlist_empty(&cookie->backing_objects))
43566 goto nobufs;
43567@@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
43568 goto nobufs_unlock;
43569 spin_unlock(&cookie->lock);
43570
43571- fscache_stat(&fscache_n_retrieval_ops);
43572+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
43573
43574 /* pin the netfs read context in case we need to do the actual netfs
43575 * read because we've encountered a cache read failure */
43576@@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
43577
43578 error:
43579 if (ret == -ENOMEM)
43580- fscache_stat(&fscache_n_retrievals_nomem);
43581+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43582 else if (ret == -ERESTARTSYS)
43583- fscache_stat(&fscache_n_retrievals_intr);
43584+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
43585 else if (ret == -ENODATA)
43586- fscache_stat(&fscache_n_retrievals_nodata);
43587+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
43588 else if (ret < 0)
43589- fscache_stat(&fscache_n_retrievals_nobufs);
43590+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43591 else
43592- fscache_stat(&fscache_n_retrievals_ok);
43593+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
43594
43595 fscache_put_retrieval(op);
43596 _leave(" = %d", ret);
43597@@ -429,7 +429,7 @@ nobufs_unlock:
43598 spin_unlock(&cookie->lock);
43599 kfree(op);
43600 nobufs:
43601- fscache_stat(&fscache_n_retrievals_nobufs);
43602+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43603 _leave(" = -ENOBUFS");
43604 return -ENOBUFS;
43605 }
43606@@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
43607
43608 _enter("%p,,%d,,,", cookie, *nr_pages);
43609
43610- fscache_stat(&fscache_n_retrievals);
43611+ fscache_stat_unchecked(&fscache_n_retrievals);
43612
43613 if (hlist_empty(&cookie->backing_objects))
43614 goto nobufs;
43615@@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
43616 goto nobufs_unlock;
43617 spin_unlock(&cookie->lock);
43618
43619- fscache_stat(&fscache_n_retrieval_ops);
43620+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
43621
43622 /* pin the netfs read context in case we need to do the actual netfs
43623 * read because we've encountered a cache read failure */
43624@@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
43625
43626 error:
43627 if (ret == -ENOMEM)
43628- fscache_stat(&fscache_n_retrievals_nomem);
43629+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
43630 else if (ret == -ERESTARTSYS)
43631- fscache_stat(&fscache_n_retrievals_intr);
43632+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
43633 else if (ret == -ENODATA)
43634- fscache_stat(&fscache_n_retrievals_nodata);
43635+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
43636 else if (ret < 0)
43637- fscache_stat(&fscache_n_retrievals_nobufs);
43638+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43639 else
43640- fscache_stat(&fscache_n_retrievals_ok);
43641+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
43642
43643 fscache_put_retrieval(op);
43644 _leave(" = %d", ret);
43645@@ -545,7 +545,7 @@ nobufs_unlock:
43646 spin_unlock(&cookie->lock);
43647 kfree(op);
43648 nobufs:
43649- fscache_stat(&fscache_n_retrievals_nobufs);
43650+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43651 _leave(" = -ENOBUFS");
43652 return -ENOBUFS;
43653 }
43654@@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
43655
43656 _enter("%p,%p,,,", cookie, page);
43657
43658- fscache_stat(&fscache_n_allocs);
43659+ fscache_stat_unchecked(&fscache_n_allocs);
43660
43661 if (hlist_empty(&cookie->backing_objects))
43662 goto nobufs;
43663@@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
43664 goto nobufs_unlock;
43665 spin_unlock(&cookie->lock);
43666
43667- fscache_stat(&fscache_n_alloc_ops);
43668+ fscache_stat_unchecked(&fscache_n_alloc_ops);
43669
43670 ret = fscache_wait_for_retrieval_activation(
43671 object, op,
43672@@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
43673
43674 error:
43675 if (ret == -ERESTARTSYS)
43676- fscache_stat(&fscache_n_allocs_intr);
43677+ fscache_stat_unchecked(&fscache_n_allocs_intr);
43678 else if (ret < 0)
43679- fscache_stat(&fscache_n_allocs_nobufs);
43680+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
43681 else
43682- fscache_stat(&fscache_n_allocs_ok);
43683+ fscache_stat_unchecked(&fscache_n_allocs_ok);
43684
43685 fscache_put_retrieval(op);
43686 _leave(" = %d", ret);
43687@@ -625,7 +625,7 @@ nobufs_unlock:
43688 spin_unlock(&cookie->lock);
43689 kfree(op);
43690 nobufs:
43691- fscache_stat(&fscache_n_allocs_nobufs);
43692+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
43693 _leave(" = -ENOBUFS");
43694 return -ENOBUFS;
43695 }
43696@@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
43697
43698 spin_lock(&cookie->stores_lock);
43699
43700- fscache_stat(&fscache_n_store_calls);
43701+ fscache_stat_unchecked(&fscache_n_store_calls);
43702
43703 /* find a page to store */
43704 page = NULL;
43705@@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
43706 page = results[0];
43707 _debug("gang %d [%lx]", n, page->index);
43708 if (page->index > op->store_limit) {
43709- fscache_stat(&fscache_n_store_pages_over_limit);
43710+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
43711 goto superseded;
43712 }
43713
43714@@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
43715 spin_unlock(&cookie->stores_lock);
43716 spin_unlock(&object->lock);
43717
43718- fscache_stat(&fscache_n_store_pages);
43719+ fscache_stat_unchecked(&fscache_n_store_pages);
43720 fscache_stat(&fscache_n_cop_write_page);
43721 ret = object->cache->ops->write_page(op, page);
43722 fscache_stat_d(&fscache_n_cop_write_page);
43723@@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43724 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43725 ASSERT(PageFsCache(page));
43726
43727- fscache_stat(&fscache_n_stores);
43728+ fscache_stat_unchecked(&fscache_n_stores);
43729
43730 op = kzalloc(sizeof(*op), GFP_NOIO);
43731 if (!op)
43732@@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43733 spin_unlock(&cookie->stores_lock);
43734 spin_unlock(&object->lock);
43735
43736- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
43737+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
43738 op->store_limit = object->store_limit;
43739
43740 if (fscache_submit_op(object, &op->op) < 0)
43741@@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43742
43743 spin_unlock(&cookie->lock);
43744 radix_tree_preload_end();
43745- fscache_stat(&fscache_n_store_ops);
43746- fscache_stat(&fscache_n_stores_ok);
43747+ fscache_stat_unchecked(&fscache_n_store_ops);
43748+ fscache_stat_unchecked(&fscache_n_stores_ok);
43749
43750 /* the work queue now carries its own ref on the object */
43751 fscache_put_operation(&op->op);
43752@@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
43753 return 0;
43754
43755 already_queued:
43756- fscache_stat(&fscache_n_stores_again);
43757+ fscache_stat_unchecked(&fscache_n_stores_again);
43758 already_pending:
43759 spin_unlock(&cookie->stores_lock);
43760 spin_unlock(&object->lock);
43761 spin_unlock(&cookie->lock);
43762 radix_tree_preload_end();
43763 kfree(op);
43764- fscache_stat(&fscache_n_stores_ok);
43765+ fscache_stat_unchecked(&fscache_n_stores_ok);
43766 _leave(" = 0");
43767 return 0;
43768
43769@@ -851,14 +851,14 @@ nobufs:
43770 spin_unlock(&cookie->lock);
43771 radix_tree_preload_end();
43772 kfree(op);
43773- fscache_stat(&fscache_n_stores_nobufs);
43774+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
43775 _leave(" = -ENOBUFS");
43776 return -ENOBUFS;
43777
43778 nomem_free:
43779 kfree(op);
43780 nomem:
43781- fscache_stat(&fscache_n_stores_oom);
43782+ fscache_stat_unchecked(&fscache_n_stores_oom);
43783 _leave(" = -ENOMEM");
43784 return -ENOMEM;
43785 }
43786@@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
43787 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43788 ASSERTCMP(page, !=, NULL);
43789
43790- fscache_stat(&fscache_n_uncaches);
43791+ fscache_stat_unchecked(&fscache_n_uncaches);
43792
43793 /* cache withdrawal may beat us to it */
43794 if (!PageFsCache(page))
43795@@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
43796 unsigned long loop;
43797
43798 #ifdef CONFIG_FSCACHE_STATS
43799- atomic_add(pagevec->nr, &fscache_n_marks);
43800+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
43801 #endif
43802
43803 for (loop = 0; loop < pagevec->nr; loop++) {
43804diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
43805index 4765190..2a067f2 100644
43806--- a/fs/fscache/stats.c
43807+++ b/fs/fscache/stats.c
43808@@ -18,95 +18,95 @@
43809 /*
43810 * operation counters
43811 */
43812-atomic_t fscache_n_op_pend;
43813-atomic_t fscache_n_op_run;
43814-atomic_t fscache_n_op_enqueue;
43815-atomic_t fscache_n_op_requeue;
43816-atomic_t fscache_n_op_deferred_release;
43817-atomic_t fscache_n_op_release;
43818-atomic_t fscache_n_op_gc;
43819-atomic_t fscache_n_op_cancelled;
43820-atomic_t fscache_n_op_rejected;
43821+atomic_unchecked_t fscache_n_op_pend;
43822+atomic_unchecked_t fscache_n_op_run;
43823+atomic_unchecked_t fscache_n_op_enqueue;
43824+atomic_unchecked_t fscache_n_op_requeue;
43825+atomic_unchecked_t fscache_n_op_deferred_release;
43826+atomic_unchecked_t fscache_n_op_release;
43827+atomic_unchecked_t fscache_n_op_gc;
43828+atomic_unchecked_t fscache_n_op_cancelled;
43829+atomic_unchecked_t fscache_n_op_rejected;
43830
43831-atomic_t fscache_n_attr_changed;
43832-atomic_t fscache_n_attr_changed_ok;
43833-atomic_t fscache_n_attr_changed_nobufs;
43834-atomic_t fscache_n_attr_changed_nomem;
43835-atomic_t fscache_n_attr_changed_calls;
43836+atomic_unchecked_t fscache_n_attr_changed;
43837+atomic_unchecked_t fscache_n_attr_changed_ok;
43838+atomic_unchecked_t fscache_n_attr_changed_nobufs;
43839+atomic_unchecked_t fscache_n_attr_changed_nomem;
43840+atomic_unchecked_t fscache_n_attr_changed_calls;
43841
43842-atomic_t fscache_n_allocs;
43843-atomic_t fscache_n_allocs_ok;
43844-atomic_t fscache_n_allocs_wait;
43845-atomic_t fscache_n_allocs_nobufs;
43846-atomic_t fscache_n_allocs_intr;
43847-atomic_t fscache_n_allocs_object_dead;
43848-atomic_t fscache_n_alloc_ops;
43849-atomic_t fscache_n_alloc_op_waits;
43850+atomic_unchecked_t fscache_n_allocs;
43851+atomic_unchecked_t fscache_n_allocs_ok;
43852+atomic_unchecked_t fscache_n_allocs_wait;
43853+atomic_unchecked_t fscache_n_allocs_nobufs;
43854+atomic_unchecked_t fscache_n_allocs_intr;
43855+atomic_unchecked_t fscache_n_allocs_object_dead;
43856+atomic_unchecked_t fscache_n_alloc_ops;
43857+atomic_unchecked_t fscache_n_alloc_op_waits;
43858
43859-atomic_t fscache_n_retrievals;
43860-atomic_t fscache_n_retrievals_ok;
43861-atomic_t fscache_n_retrievals_wait;
43862-atomic_t fscache_n_retrievals_nodata;
43863-atomic_t fscache_n_retrievals_nobufs;
43864-atomic_t fscache_n_retrievals_intr;
43865-atomic_t fscache_n_retrievals_nomem;
43866-atomic_t fscache_n_retrievals_object_dead;
43867-atomic_t fscache_n_retrieval_ops;
43868-atomic_t fscache_n_retrieval_op_waits;
43869+atomic_unchecked_t fscache_n_retrievals;
43870+atomic_unchecked_t fscache_n_retrievals_ok;
43871+atomic_unchecked_t fscache_n_retrievals_wait;
43872+atomic_unchecked_t fscache_n_retrievals_nodata;
43873+atomic_unchecked_t fscache_n_retrievals_nobufs;
43874+atomic_unchecked_t fscache_n_retrievals_intr;
43875+atomic_unchecked_t fscache_n_retrievals_nomem;
43876+atomic_unchecked_t fscache_n_retrievals_object_dead;
43877+atomic_unchecked_t fscache_n_retrieval_ops;
43878+atomic_unchecked_t fscache_n_retrieval_op_waits;
43879
43880-atomic_t fscache_n_stores;
43881-atomic_t fscache_n_stores_ok;
43882-atomic_t fscache_n_stores_again;
43883-atomic_t fscache_n_stores_nobufs;
43884-atomic_t fscache_n_stores_oom;
43885-atomic_t fscache_n_store_ops;
43886-atomic_t fscache_n_store_calls;
43887-atomic_t fscache_n_store_pages;
43888-atomic_t fscache_n_store_radix_deletes;
43889-atomic_t fscache_n_store_pages_over_limit;
43890+atomic_unchecked_t fscache_n_stores;
43891+atomic_unchecked_t fscache_n_stores_ok;
43892+atomic_unchecked_t fscache_n_stores_again;
43893+atomic_unchecked_t fscache_n_stores_nobufs;
43894+atomic_unchecked_t fscache_n_stores_oom;
43895+atomic_unchecked_t fscache_n_store_ops;
43896+atomic_unchecked_t fscache_n_store_calls;
43897+atomic_unchecked_t fscache_n_store_pages;
43898+atomic_unchecked_t fscache_n_store_radix_deletes;
43899+atomic_unchecked_t fscache_n_store_pages_over_limit;
43900
43901-atomic_t fscache_n_store_vmscan_not_storing;
43902-atomic_t fscache_n_store_vmscan_gone;
43903-atomic_t fscache_n_store_vmscan_busy;
43904-atomic_t fscache_n_store_vmscan_cancelled;
43905+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
43906+atomic_unchecked_t fscache_n_store_vmscan_gone;
43907+atomic_unchecked_t fscache_n_store_vmscan_busy;
43908+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
43909
43910-atomic_t fscache_n_marks;
43911-atomic_t fscache_n_uncaches;
43912+atomic_unchecked_t fscache_n_marks;
43913+atomic_unchecked_t fscache_n_uncaches;
43914
43915-atomic_t fscache_n_acquires;
43916-atomic_t fscache_n_acquires_null;
43917-atomic_t fscache_n_acquires_no_cache;
43918-atomic_t fscache_n_acquires_ok;
43919-atomic_t fscache_n_acquires_nobufs;
43920-atomic_t fscache_n_acquires_oom;
43921+atomic_unchecked_t fscache_n_acquires;
43922+atomic_unchecked_t fscache_n_acquires_null;
43923+atomic_unchecked_t fscache_n_acquires_no_cache;
43924+atomic_unchecked_t fscache_n_acquires_ok;
43925+atomic_unchecked_t fscache_n_acquires_nobufs;
43926+atomic_unchecked_t fscache_n_acquires_oom;
43927
43928-atomic_t fscache_n_updates;
43929-atomic_t fscache_n_updates_null;
43930-atomic_t fscache_n_updates_run;
43931+atomic_unchecked_t fscache_n_updates;
43932+atomic_unchecked_t fscache_n_updates_null;
43933+atomic_unchecked_t fscache_n_updates_run;
43934
43935-atomic_t fscache_n_relinquishes;
43936-atomic_t fscache_n_relinquishes_null;
43937-atomic_t fscache_n_relinquishes_waitcrt;
43938-atomic_t fscache_n_relinquishes_retire;
43939+atomic_unchecked_t fscache_n_relinquishes;
43940+atomic_unchecked_t fscache_n_relinquishes_null;
43941+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
43942+atomic_unchecked_t fscache_n_relinquishes_retire;
43943
43944-atomic_t fscache_n_cookie_index;
43945-atomic_t fscache_n_cookie_data;
43946-atomic_t fscache_n_cookie_special;
43947+atomic_unchecked_t fscache_n_cookie_index;
43948+atomic_unchecked_t fscache_n_cookie_data;
43949+atomic_unchecked_t fscache_n_cookie_special;
43950
43951-atomic_t fscache_n_object_alloc;
43952-atomic_t fscache_n_object_no_alloc;
43953-atomic_t fscache_n_object_lookups;
43954-atomic_t fscache_n_object_lookups_negative;
43955-atomic_t fscache_n_object_lookups_positive;
43956-atomic_t fscache_n_object_lookups_timed_out;
43957-atomic_t fscache_n_object_created;
43958-atomic_t fscache_n_object_avail;
43959-atomic_t fscache_n_object_dead;
43960+atomic_unchecked_t fscache_n_object_alloc;
43961+atomic_unchecked_t fscache_n_object_no_alloc;
43962+atomic_unchecked_t fscache_n_object_lookups;
43963+atomic_unchecked_t fscache_n_object_lookups_negative;
43964+atomic_unchecked_t fscache_n_object_lookups_positive;
43965+atomic_unchecked_t fscache_n_object_lookups_timed_out;
43966+atomic_unchecked_t fscache_n_object_created;
43967+atomic_unchecked_t fscache_n_object_avail;
43968+atomic_unchecked_t fscache_n_object_dead;
43969
43970-atomic_t fscache_n_checkaux_none;
43971-atomic_t fscache_n_checkaux_okay;
43972-atomic_t fscache_n_checkaux_update;
43973-atomic_t fscache_n_checkaux_obsolete;
43974+atomic_unchecked_t fscache_n_checkaux_none;
43975+atomic_unchecked_t fscache_n_checkaux_okay;
43976+atomic_unchecked_t fscache_n_checkaux_update;
43977+atomic_unchecked_t fscache_n_checkaux_obsolete;
43978
43979 atomic_t fscache_n_cop_alloc_object;
43980 atomic_t fscache_n_cop_lookup_object;
43981@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
43982 seq_puts(m, "FS-Cache statistics\n");
43983
43984 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
43985- atomic_read(&fscache_n_cookie_index),
43986- atomic_read(&fscache_n_cookie_data),
43987- atomic_read(&fscache_n_cookie_special));
43988+ atomic_read_unchecked(&fscache_n_cookie_index),
43989+ atomic_read_unchecked(&fscache_n_cookie_data),
43990+ atomic_read_unchecked(&fscache_n_cookie_special));
43991
43992 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
43993- atomic_read(&fscache_n_object_alloc),
43994- atomic_read(&fscache_n_object_no_alloc),
43995- atomic_read(&fscache_n_object_avail),
43996- atomic_read(&fscache_n_object_dead));
43997+ atomic_read_unchecked(&fscache_n_object_alloc),
43998+ atomic_read_unchecked(&fscache_n_object_no_alloc),
43999+ atomic_read_unchecked(&fscache_n_object_avail),
44000+ atomic_read_unchecked(&fscache_n_object_dead));
44001 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
44002- atomic_read(&fscache_n_checkaux_none),
44003- atomic_read(&fscache_n_checkaux_okay),
44004- atomic_read(&fscache_n_checkaux_update),
44005- atomic_read(&fscache_n_checkaux_obsolete));
44006+ atomic_read_unchecked(&fscache_n_checkaux_none),
44007+ atomic_read_unchecked(&fscache_n_checkaux_okay),
44008+ atomic_read_unchecked(&fscache_n_checkaux_update),
44009+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
44010
44011 seq_printf(m, "Pages : mrk=%u unc=%u\n",
44012- atomic_read(&fscache_n_marks),
44013- atomic_read(&fscache_n_uncaches));
44014+ atomic_read_unchecked(&fscache_n_marks),
44015+ atomic_read_unchecked(&fscache_n_uncaches));
44016
44017 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
44018 " oom=%u\n",
44019- atomic_read(&fscache_n_acquires),
44020- atomic_read(&fscache_n_acquires_null),
44021- atomic_read(&fscache_n_acquires_no_cache),
44022- atomic_read(&fscache_n_acquires_ok),
44023- atomic_read(&fscache_n_acquires_nobufs),
44024- atomic_read(&fscache_n_acquires_oom));
44025+ atomic_read_unchecked(&fscache_n_acquires),
44026+ atomic_read_unchecked(&fscache_n_acquires_null),
44027+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
44028+ atomic_read_unchecked(&fscache_n_acquires_ok),
44029+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
44030+ atomic_read_unchecked(&fscache_n_acquires_oom));
44031
44032 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
44033- atomic_read(&fscache_n_object_lookups),
44034- atomic_read(&fscache_n_object_lookups_negative),
44035- atomic_read(&fscache_n_object_lookups_positive),
44036- atomic_read(&fscache_n_object_created),
44037- atomic_read(&fscache_n_object_lookups_timed_out));
44038+ atomic_read_unchecked(&fscache_n_object_lookups),
44039+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
44040+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
44041+ atomic_read_unchecked(&fscache_n_object_created),
44042+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
44043
44044 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
44045- atomic_read(&fscache_n_updates),
44046- atomic_read(&fscache_n_updates_null),
44047- atomic_read(&fscache_n_updates_run));
44048+ atomic_read_unchecked(&fscache_n_updates),
44049+ atomic_read_unchecked(&fscache_n_updates_null),
44050+ atomic_read_unchecked(&fscache_n_updates_run));
44051
44052 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
44053- atomic_read(&fscache_n_relinquishes),
44054- atomic_read(&fscache_n_relinquishes_null),
44055- atomic_read(&fscache_n_relinquishes_waitcrt),
44056- atomic_read(&fscache_n_relinquishes_retire));
44057+ atomic_read_unchecked(&fscache_n_relinquishes),
44058+ atomic_read_unchecked(&fscache_n_relinquishes_null),
44059+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
44060+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
44061
44062 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
44063- atomic_read(&fscache_n_attr_changed),
44064- atomic_read(&fscache_n_attr_changed_ok),
44065- atomic_read(&fscache_n_attr_changed_nobufs),
44066- atomic_read(&fscache_n_attr_changed_nomem),
44067- atomic_read(&fscache_n_attr_changed_calls));
44068+ atomic_read_unchecked(&fscache_n_attr_changed),
44069+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
44070+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
44071+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
44072+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
44073
44074 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
44075- atomic_read(&fscache_n_allocs),
44076- atomic_read(&fscache_n_allocs_ok),
44077- atomic_read(&fscache_n_allocs_wait),
44078- atomic_read(&fscache_n_allocs_nobufs),
44079- atomic_read(&fscache_n_allocs_intr));
44080+ atomic_read_unchecked(&fscache_n_allocs),
44081+ atomic_read_unchecked(&fscache_n_allocs_ok),
44082+ atomic_read_unchecked(&fscache_n_allocs_wait),
44083+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
44084+ atomic_read_unchecked(&fscache_n_allocs_intr));
44085 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
44086- atomic_read(&fscache_n_alloc_ops),
44087- atomic_read(&fscache_n_alloc_op_waits),
44088- atomic_read(&fscache_n_allocs_object_dead));
44089+ atomic_read_unchecked(&fscache_n_alloc_ops),
44090+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
44091+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
44092
44093 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
44094 " int=%u oom=%u\n",
44095- atomic_read(&fscache_n_retrievals),
44096- atomic_read(&fscache_n_retrievals_ok),
44097- atomic_read(&fscache_n_retrievals_wait),
44098- atomic_read(&fscache_n_retrievals_nodata),
44099- atomic_read(&fscache_n_retrievals_nobufs),
44100- atomic_read(&fscache_n_retrievals_intr),
44101- atomic_read(&fscache_n_retrievals_nomem));
44102+ atomic_read_unchecked(&fscache_n_retrievals),
44103+ atomic_read_unchecked(&fscache_n_retrievals_ok),
44104+ atomic_read_unchecked(&fscache_n_retrievals_wait),
44105+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
44106+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
44107+ atomic_read_unchecked(&fscache_n_retrievals_intr),
44108+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
44109 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
44110- atomic_read(&fscache_n_retrieval_ops),
44111- atomic_read(&fscache_n_retrieval_op_waits),
44112- atomic_read(&fscache_n_retrievals_object_dead));
44113+ atomic_read_unchecked(&fscache_n_retrieval_ops),
44114+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
44115+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
44116
44117 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
44118- atomic_read(&fscache_n_stores),
44119- atomic_read(&fscache_n_stores_ok),
44120- atomic_read(&fscache_n_stores_again),
44121- atomic_read(&fscache_n_stores_nobufs),
44122- atomic_read(&fscache_n_stores_oom));
44123+ atomic_read_unchecked(&fscache_n_stores),
44124+ atomic_read_unchecked(&fscache_n_stores_ok),
44125+ atomic_read_unchecked(&fscache_n_stores_again),
44126+ atomic_read_unchecked(&fscache_n_stores_nobufs),
44127+ atomic_read_unchecked(&fscache_n_stores_oom));
44128 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
44129- atomic_read(&fscache_n_store_ops),
44130- atomic_read(&fscache_n_store_calls),
44131- atomic_read(&fscache_n_store_pages),
44132- atomic_read(&fscache_n_store_radix_deletes),
44133- atomic_read(&fscache_n_store_pages_over_limit));
44134+ atomic_read_unchecked(&fscache_n_store_ops),
44135+ atomic_read_unchecked(&fscache_n_store_calls),
44136+ atomic_read_unchecked(&fscache_n_store_pages),
44137+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
44138+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
44139
44140 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
44141- atomic_read(&fscache_n_store_vmscan_not_storing),
44142- atomic_read(&fscache_n_store_vmscan_gone),
44143- atomic_read(&fscache_n_store_vmscan_busy),
44144- atomic_read(&fscache_n_store_vmscan_cancelled));
44145+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
44146+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
44147+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
44148+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
44149
44150 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
44151- atomic_read(&fscache_n_op_pend),
44152- atomic_read(&fscache_n_op_run),
44153- atomic_read(&fscache_n_op_enqueue),
44154- atomic_read(&fscache_n_op_cancelled),
44155- atomic_read(&fscache_n_op_rejected));
44156+ atomic_read_unchecked(&fscache_n_op_pend),
44157+ atomic_read_unchecked(&fscache_n_op_run),
44158+ atomic_read_unchecked(&fscache_n_op_enqueue),
44159+ atomic_read_unchecked(&fscache_n_op_cancelled),
44160+ atomic_read_unchecked(&fscache_n_op_rejected));
44161 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
44162- atomic_read(&fscache_n_op_deferred_release),
44163- atomic_read(&fscache_n_op_release),
44164- atomic_read(&fscache_n_op_gc));
44165+ atomic_read_unchecked(&fscache_n_op_deferred_release),
44166+ atomic_read_unchecked(&fscache_n_op_release),
44167+ atomic_read_unchecked(&fscache_n_op_gc));
44168
44169 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
44170 atomic_read(&fscache_n_cop_alloc_object),
44171diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
44172index 3426521..3b75162 100644
44173--- a/fs/fuse/cuse.c
44174+++ b/fs/fuse/cuse.c
44175@@ -587,10 +587,12 @@ static int __init cuse_init(void)
44176 INIT_LIST_HEAD(&cuse_conntbl[i]);
44177
44178 /* inherit and extend fuse_dev_operations */
44179- cuse_channel_fops = fuse_dev_operations;
44180- cuse_channel_fops.owner = THIS_MODULE;
44181- cuse_channel_fops.open = cuse_channel_open;
44182- cuse_channel_fops.release = cuse_channel_release;
44183+ pax_open_kernel();
44184+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
44185+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
44186+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
44187+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
44188+ pax_close_kernel();
44189
44190 cuse_class = class_create(THIS_MODULE, "cuse");
44191 if (IS_ERR(cuse_class))
44192diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
44193index 2aaf3ea..8e50863 100644
44194--- a/fs/fuse/dev.c
44195+++ b/fs/fuse/dev.c
44196@@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
44197 ret = 0;
44198 pipe_lock(pipe);
44199
44200- if (!pipe->readers) {
44201+ if (!atomic_read(&pipe->readers)) {
44202 send_sig(SIGPIPE, current, 0);
44203 if (!ret)
44204 ret = -EPIPE;
44205diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
44206index 9f63e49..d8a64c0 100644
44207--- a/fs/fuse/dir.c
44208+++ b/fs/fuse/dir.c
44209@@ -1147,7 +1147,7 @@ static char *read_link(struct dentry *dentry)
44210 return link;
44211 }
44212
44213-static void free_link(char *link)
44214+static void free_link(const char *link)
44215 {
44216 if (!IS_ERR(link))
44217 free_page((unsigned long) link);
44218diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
44219index cfd4959..a780959 100644
44220--- a/fs/gfs2/inode.c
44221+++ b/fs/gfs2/inode.c
44222@@ -1490,7 +1490,7 @@ out:
44223
44224 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
44225 {
44226- char *s = nd_get_link(nd);
44227+ const char *s = nd_get_link(nd);
44228 if (!IS_ERR(s))
44229 kfree(s);
44230 }
44231diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
44232index 0be5a78..9cfb853 100644
44233--- a/fs/hugetlbfs/inode.c
44234+++ b/fs/hugetlbfs/inode.c
44235@@ -915,7 +915,7 @@ static struct file_system_type hugetlbfs_fs_type = {
44236 .kill_sb = kill_litter_super,
44237 };
44238
44239-static struct vfsmount *hugetlbfs_vfsmount;
44240+struct vfsmount *hugetlbfs_vfsmount;
44241
44242 static int can_do_hugetlb_shm(void)
44243 {
44244diff --git a/fs/inode.c b/fs/inode.c
44245index ee4e66b..0451521 100644
44246--- a/fs/inode.c
44247+++ b/fs/inode.c
44248@@ -787,8 +787,8 @@ unsigned int get_next_ino(void)
44249
44250 #ifdef CONFIG_SMP
44251 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
44252- static atomic_t shared_last_ino;
44253- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
44254+ static atomic_unchecked_t shared_last_ino;
44255+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
44256
44257 res = next - LAST_INO_BATCH;
44258 }
44259diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
44260index e513f19..2ab1351 100644
44261--- a/fs/jffs2/erase.c
44262+++ b/fs/jffs2/erase.c
44263@@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
44264 struct jffs2_unknown_node marker = {
44265 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
44266 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
44267- .totlen = cpu_to_je32(c->cleanmarker_size)
44268+ .totlen = cpu_to_je32(c->cleanmarker_size),
44269+ .hdr_crc = cpu_to_je32(0)
44270 };
44271
44272 jffs2_prealloc_raw_node_refs(c, jeb, 1);
44273diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
44274index b09e51d..e482afa 100644
44275--- a/fs/jffs2/wbuf.c
44276+++ b/fs/jffs2/wbuf.c
44277@@ -1011,7 +1011,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
44278 {
44279 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
44280 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
44281- .totlen = constant_cpu_to_je32(8)
44282+ .totlen = constant_cpu_to_je32(8),
44283+ .hdr_crc = constant_cpu_to_je32(0)
44284 };
44285
44286 /*
44287diff --git a/fs/jfs/super.c b/fs/jfs/super.c
44288index a44eff0..462e07d 100644
44289--- a/fs/jfs/super.c
44290+++ b/fs/jfs/super.c
44291@@ -802,7 +802,7 @@ static int __init init_jfs_fs(void)
44292
44293 jfs_inode_cachep =
44294 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
44295- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
44296+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
44297 init_once);
44298 if (jfs_inode_cachep == NULL)
44299 return -ENOMEM;
44300diff --git a/fs/libfs.c b/fs/libfs.c
44301index f6d411e..e82a08d 100644
44302--- a/fs/libfs.c
44303+++ b/fs/libfs.c
44304@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
44305
44306 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
44307 struct dentry *next;
44308+ char d_name[sizeof(next->d_iname)];
44309+ const unsigned char *name;
44310+
44311 next = list_entry(p, struct dentry, d_u.d_child);
44312 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
44313 if (!simple_positive(next)) {
44314@@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
44315
44316 spin_unlock(&next->d_lock);
44317 spin_unlock(&dentry->d_lock);
44318- if (filldir(dirent, next->d_name.name,
44319+ name = next->d_name.name;
44320+ if (name == next->d_iname) {
44321+ memcpy(d_name, name, next->d_name.len);
44322+ name = d_name;
44323+ }
44324+ if (filldir(dirent, name,
44325 next->d_name.len, filp->f_pos,
44326 next->d_inode->i_ino,
44327 dt_type(next->d_inode)) < 0)
44328diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
44329index 8392cb8..80d6193 100644
44330--- a/fs/lockd/clntproc.c
44331+++ b/fs/lockd/clntproc.c
44332@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
44333 /*
44334 * Cookie counter for NLM requests
44335 */
44336-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
44337+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
44338
44339 void nlmclnt_next_cookie(struct nlm_cookie *c)
44340 {
44341- u32 cookie = atomic_inc_return(&nlm_cookie);
44342+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
44343
44344 memcpy(c->data, &cookie, 4);
44345 c->len=4;
44346diff --git a/fs/locks.c b/fs/locks.c
44347index 637694b..f84a121 100644
44348--- a/fs/locks.c
44349+++ b/fs/locks.c
44350@@ -2074,16 +2074,16 @@ void locks_remove_flock(struct file *filp)
44351 return;
44352
44353 if (filp->f_op && filp->f_op->flock) {
44354- struct file_lock fl = {
44355+ struct file_lock flock = {
44356 .fl_pid = current->tgid,
44357 .fl_file = filp,
44358 .fl_flags = FL_FLOCK,
44359 .fl_type = F_UNLCK,
44360 .fl_end = OFFSET_MAX,
44361 };
44362- filp->f_op->flock(filp, F_SETLKW, &fl);
44363- if (fl.fl_ops && fl.fl_ops->fl_release_private)
44364- fl.fl_ops->fl_release_private(&fl);
44365+ filp->f_op->flock(filp, F_SETLKW, &flock);
44366+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
44367+ flock.fl_ops->fl_release_private(&flock);
44368 }
44369
44370 lock_flocks();
44371diff --git a/fs/namei.c b/fs/namei.c
44372index 5008f01..90328a7 100644
44373--- a/fs/namei.c
44374+++ b/fs/namei.c
44375@@ -279,16 +279,32 @@ int generic_permission(struct inode *inode, int mask)
44376 if (ret != -EACCES)
44377 return ret;
44378
44379+#ifdef CONFIG_GRKERNSEC
44380+ /* we'll block if we have to log due to a denied capability use */
44381+ if (mask & MAY_NOT_BLOCK)
44382+ return -ECHILD;
44383+#endif
44384+
44385 if (S_ISDIR(inode->i_mode)) {
44386 /* DACs are overridable for directories */
44387- if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44388- return 0;
44389 if (!(mask & MAY_WRITE))
44390- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44391+ if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
44392+ ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44393 return 0;
44394+ if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44395+ return 0;
44396 return -EACCES;
44397 }
44398 /*
44399+ * Searching includes executable on directories, else just read.
44400+ */
44401+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
44402+ if (mask == MAY_READ)
44403+ if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
44404+ ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44405+ return 0;
44406+
44407+ /*
44408 * Read/write DACs are always overridable.
44409 * Executable DACs are overridable when there is
44410 * at least one exec bit set.
44411@@ -297,14 +313,6 @@ int generic_permission(struct inode *inode, int mask)
44412 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
44413 return 0;
44414
44415- /*
44416- * Searching includes executable on directories, else just read.
44417- */
44418- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
44419- if (mask == MAY_READ)
44420- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
44421- return 0;
44422-
44423 return -EACCES;
44424 }
44425
44426@@ -653,11 +661,19 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
44427 return error;
44428 }
44429
44430+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
44431+ dentry->d_inode, dentry, nd->path.mnt)) {
44432+ error = -EACCES;
44433+ *p = ERR_PTR(error); /* no ->put_link(), please */
44434+ path_put(&nd->path);
44435+ return error;
44436+ }
44437+
44438 nd->last_type = LAST_BIND;
44439 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
44440 error = PTR_ERR(*p);
44441 if (!IS_ERR(*p)) {
44442- char *s = nd_get_link(nd);
44443+ const char *s = nd_get_link(nd);
44444 error = 0;
44445 if (s)
44446 error = __vfs_follow_link(nd, s);
44447@@ -1622,6 +1638,21 @@ static int path_lookupat(int dfd, const char *name,
44448 if (!err)
44449 err = complete_walk(nd);
44450
44451+ if (!(nd->flags & LOOKUP_PARENT)) {
44452+#ifdef CONFIG_GRKERNSEC
44453+ if (flags & LOOKUP_RCU) {
44454+ if (!err)
44455+ path_put(&nd->path);
44456+ err = -ECHILD;
44457+ } else
44458+#endif
44459+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44460+ if (!err)
44461+ path_put(&nd->path);
44462+ err = -ENOENT;
44463+ }
44464+ }
44465+
44466 if (!err && nd->flags & LOOKUP_DIRECTORY) {
44467 if (!nd->inode->i_op->lookup) {
44468 path_put(&nd->path);
44469@@ -1649,6 +1680,15 @@ static int do_path_lookup(int dfd, const char *name,
44470 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
44471
44472 if (likely(!retval)) {
44473+ if (*name != '/' && nd->path.dentry && nd->inode) {
44474+#ifdef CONFIG_GRKERNSEC
44475+ if (flags & LOOKUP_RCU)
44476+ return -ECHILD;
44477+#endif
44478+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
44479+ return -ENOENT;
44480+ }
44481+
44482 if (unlikely(!audit_dummy_context())) {
44483 if (nd->path.dentry && nd->inode)
44484 audit_inode(name, nd->path.dentry);
44485@@ -2046,6 +2086,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
44486 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
44487 return -EPERM;
44488
44489+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
44490+ return -EPERM;
44491+ if (gr_handle_rawio(inode))
44492+ return -EPERM;
44493+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
44494+ return -EACCES;
44495+
44496 return 0;
44497 }
44498
44499@@ -2107,6 +2154,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44500 error = complete_walk(nd);
44501 if (error)
44502 return ERR_PTR(error);
44503+#ifdef CONFIG_GRKERNSEC
44504+ if (nd->flags & LOOKUP_RCU) {
44505+ error = -ECHILD;
44506+ goto exit;
44507+ }
44508+#endif
44509+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44510+ error = -ENOENT;
44511+ goto exit;
44512+ }
44513 audit_inode(pathname, nd->path.dentry);
44514 if (open_flag & O_CREAT) {
44515 error = -EISDIR;
44516@@ -2117,6 +2174,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44517 error = complete_walk(nd);
44518 if (error)
44519 return ERR_PTR(error);
44520+#ifdef CONFIG_GRKERNSEC
44521+ if (nd->flags & LOOKUP_RCU) {
44522+ error = -ECHILD;
44523+ goto exit;
44524+ }
44525+#endif
44526+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
44527+ error = -ENOENT;
44528+ goto exit;
44529+ }
44530 audit_inode(pathname, dir);
44531 goto ok;
44532 }
44533@@ -2138,6 +2205,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44534 error = complete_walk(nd);
44535 if (error)
44536 return ERR_PTR(-ECHILD);
44537+#ifdef CONFIG_GRKERNSEC
44538+ if (nd->flags & LOOKUP_RCU) {
44539+ error = -ECHILD;
44540+ goto exit;
44541+ }
44542+#endif
44543+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44544+ error = -ENOENT;
44545+ goto exit;
44546+ }
44547
44548 error = -ENOTDIR;
44549 if (nd->flags & LOOKUP_DIRECTORY) {
44550@@ -2178,6 +2255,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44551 /* Negative dentry, just create the file */
44552 if (!dentry->d_inode) {
44553 int mode = op->mode;
44554+
44555+ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
44556+ error = -EACCES;
44557+ goto exit_mutex_unlock;
44558+ }
44559+
44560 if (!IS_POSIXACL(dir->d_inode))
44561 mode &= ~current_umask();
44562 /*
44563@@ -2201,6 +2284,8 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44564 error = vfs_create(dir->d_inode, dentry, mode, nd);
44565 if (error)
44566 goto exit_mutex_unlock;
44567+ else
44568+ gr_handle_create(path->dentry, path->mnt);
44569 mutex_unlock(&dir->d_inode->i_mutex);
44570 dput(nd->path.dentry);
44571 nd->path.dentry = dentry;
44572@@ -2210,6 +2295,19 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
44573 /*
44574 * It already exists.
44575 */
44576+
44577+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
44578+ error = -ENOENT;
44579+ goto exit_mutex_unlock;
44580+ }
44581+
44582+ /* only check if O_CREAT is specified, all other checks need to go
44583+ into may_open */
44584+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
44585+ error = -EACCES;
44586+ goto exit_mutex_unlock;
44587+ }
44588+
44589 mutex_unlock(&dir->d_inode->i_mutex);
44590 audit_inode(pathname, path->dentry);
44591
44592@@ -2422,6 +2520,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
44593 *path = nd.path;
44594 return dentry;
44595 eexist:
44596+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
44597+ dput(dentry);
44598+ dentry = ERR_PTR(-ENOENT);
44599+ goto fail;
44600+ }
44601 dput(dentry);
44602 dentry = ERR_PTR(-EEXIST);
44603 fail:
44604@@ -2444,6 +2547,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
44605 }
44606 EXPORT_SYMBOL(user_path_create);
44607
44608+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
44609+{
44610+ char *tmp = getname(pathname);
44611+ struct dentry *res;
44612+ if (IS_ERR(tmp))
44613+ return ERR_CAST(tmp);
44614+ res = kern_path_create(dfd, tmp, path, is_dir);
44615+ if (IS_ERR(res))
44616+ putname(tmp);
44617+ else
44618+ *to = tmp;
44619+ return res;
44620+}
44621+
44622 int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
44623 {
44624 int error = may_create(dir, dentry);
44625@@ -2511,6 +2628,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
44626 error = mnt_want_write(path.mnt);
44627 if (error)
44628 goto out_dput;
44629+
44630+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
44631+ error = -EPERM;
44632+ goto out_drop_write;
44633+ }
44634+
44635+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
44636+ error = -EACCES;
44637+ goto out_drop_write;
44638+ }
44639+
44640 error = security_path_mknod(&path, dentry, mode, dev);
44641 if (error)
44642 goto out_drop_write;
44643@@ -2528,6 +2656,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
44644 }
44645 out_drop_write:
44646 mnt_drop_write(path.mnt);
44647+
44648+ if (!error)
44649+ gr_handle_create(dentry, path.mnt);
44650 out_dput:
44651 dput(dentry);
44652 mutex_unlock(&path.dentry->d_inode->i_mutex);
44653@@ -2577,12 +2708,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
44654 error = mnt_want_write(path.mnt);
44655 if (error)
44656 goto out_dput;
44657+
44658+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
44659+ error = -EACCES;
44660+ goto out_drop_write;
44661+ }
44662+
44663 error = security_path_mkdir(&path, dentry, mode);
44664 if (error)
44665 goto out_drop_write;
44666 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
44667 out_drop_write:
44668 mnt_drop_write(path.mnt);
44669+
44670+ if (!error)
44671+ gr_handle_create(dentry, path.mnt);
44672 out_dput:
44673 dput(dentry);
44674 mutex_unlock(&path.dentry->d_inode->i_mutex);
44675@@ -2662,6 +2802,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
44676 char * name;
44677 struct dentry *dentry;
44678 struct nameidata nd;
44679+ ino_t saved_ino = 0;
44680+ dev_t saved_dev = 0;
44681
44682 error = user_path_parent(dfd, pathname, &nd, &name);
44683 if (error)
44684@@ -2690,6 +2832,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
44685 error = -ENOENT;
44686 goto exit3;
44687 }
44688+
44689+ saved_ino = dentry->d_inode->i_ino;
44690+ saved_dev = gr_get_dev_from_dentry(dentry);
44691+
44692+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
44693+ error = -EACCES;
44694+ goto exit3;
44695+ }
44696+
44697 error = mnt_want_write(nd.path.mnt);
44698 if (error)
44699 goto exit3;
44700@@ -2697,6 +2848,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
44701 if (error)
44702 goto exit4;
44703 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
44704+ if (!error && (saved_dev || saved_ino))
44705+ gr_handle_delete(saved_ino, saved_dev);
44706 exit4:
44707 mnt_drop_write(nd.path.mnt);
44708 exit3:
44709@@ -2759,6 +2912,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
44710 struct dentry *dentry;
44711 struct nameidata nd;
44712 struct inode *inode = NULL;
44713+ ino_t saved_ino = 0;
44714+ dev_t saved_dev = 0;
44715
44716 error = user_path_parent(dfd, pathname, &nd, &name);
44717 if (error)
44718@@ -2781,6 +2936,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
44719 if (!inode)
44720 goto slashes;
44721 ihold(inode);
44722+
44723+ if (inode->i_nlink <= 1) {
44724+ saved_ino = inode->i_ino;
44725+ saved_dev = gr_get_dev_from_dentry(dentry);
44726+ }
44727+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
44728+ error = -EACCES;
44729+ goto exit2;
44730+ }
44731+
44732 error = mnt_want_write(nd.path.mnt);
44733 if (error)
44734 goto exit2;
44735@@ -2788,6 +2953,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
44736 if (error)
44737 goto exit3;
44738 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
44739+ if (!error && (saved_ino || saved_dev))
44740+ gr_handle_delete(saved_ino, saved_dev);
44741 exit3:
44742 mnt_drop_write(nd.path.mnt);
44743 exit2:
44744@@ -2863,10 +3030,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
44745 error = mnt_want_write(path.mnt);
44746 if (error)
44747 goto out_dput;
44748+
44749+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
44750+ error = -EACCES;
44751+ goto out_drop_write;
44752+ }
44753+
44754 error = security_path_symlink(&path, dentry, from);
44755 if (error)
44756 goto out_drop_write;
44757 error = vfs_symlink(path.dentry->d_inode, dentry, from);
44758+ if (!error)
44759+ gr_handle_create(dentry, path.mnt);
44760 out_drop_write:
44761 mnt_drop_write(path.mnt);
44762 out_dput:
44763@@ -2938,6 +3113,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
44764 {
44765 struct dentry *new_dentry;
44766 struct path old_path, new_path;
44767+ char *to = NULL;
44768 int how = 0;
44769 int error;
44770
44771@@ -2961,7 +3137,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
44772 if (error)
44773 return error;
44774
44775- new_dentry = user_path_create(newdfd, newname, &new_path, 0);
44776+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
44777 error = PTR_ERR(new_dentry);
44778 if (IS_ERR(new_dentry))
44779 goto out;
44780@@ -2972,13 +3148,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
44781 error = mnt_want_write(new_path.mnt);
44782 if (error)
44783 goto out_dput;
44784+
44785+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
44786+ old_path.dentry->d_inode,
44787+ old_path.dentry->d_inode->i_mode, to)) {
44788+ error = -EACCES;
44789+ goto out_drop_write;
44790+ }
44791+
44792+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
44793+ old_path.dentry, old_path.mnt, to)) {
44794+ error = -EACCES;
44795+ goto out_drop_write;
44796+ }
44797+
44798 error = security_path_link(old_path.dentry, &new_path, new_dentry);
44799 if (error)
44800 goto out_drop_write;
44801 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
44802+ if (!error)
44803+ gr_handle_create(new_dentry, new_path.mnt);
44804 out_drop_write:
44805 mnt_drop_write(new_path.mnt);
44806 out_dput:
44807+ putname(to);
44808 dput(new_dentry);
44809 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
44810 path_put(&new_path);
44811@@ -3206,6 +3399,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
44812 if (new_dentry == trap)
44813 goto exit5;
44814
44815+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
44816+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
44817+ to);
44818+ if (error)
44819+ goto exit5;
44820+
44821 error = mnt_want_write(oldnd.path.mnt);
44822 if (error)
44823 goto exit5;
44824@@ -3215,6 +3414,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
44825 goto exit6;
44826 error = vfs_rename(old_dir->d_inode, old_dentry,
44827 new_dir->d_inode, new_dentry);
44828+ if (!error)
44829+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
44830+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
44831 exit6:
44832 mnt_drop_write(oldnd.path.mnt);
44833 exit5:
44834@@ -3240,6 +3442,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
44835
44836 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
44837 {
44838+ char tmpbuf[64];
44839+ const char *newlink;
44840 int len;
44841
44842 len = PTR_ERR(link);
44843@@ -3249,7 +3453,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
44844 len = strlen(link);
44845 if (len > (unsigned) buflen)
44846 len = buflen;
44847- if (copy_to_user(buffer, link, len))
44848+
44849+ if (len < sizeof(tmpbuf)) {
44850+ memcpy(tmpbuf, link, len);
44851+ newlink = tmpbuf;
44852+ } else
44853+ newlink = link;
44854+
44855+ if (copy_to_user(buffer, newlink, len))
44856 len = -EFAULT;
44857 out:
44858 return len;
44859diff --git a/fs/namespace.c b/fs/namespace.c
44860index cfc6d44..b4632a5 100644
44861--- a/fs/namespace.c
44862+++ b/fs/namespace.c
44863@@ -1326,6 +1326,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
44864 if (!(sb->s_flags & MS_RDONLY))
44865 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
44866 up_write(&sb->s_umount);
44867+
44868+ gr_log_remount(mnt->mnt_devname, retval);
44869+
44870 return retval;
44871 }
44872
44873@@ -1345,6 +1348,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
44874 br_write_unlock(vfsmount_lock);
44875 up_write(&namespace_sem);
44876 release_mounts(&umount_list);
44877+
44878+ gr_log_unmount(mnt->mnt_devname, retval);
44879+
44880 return retval;
44881 }
44882
44883@@ -2336,6 +2342,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
44884 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
44885 MS_STRICTATIME);
44886
44887+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
44888+ retval = -EPERM;
44889+ goto dput_out;
44890+ }
44891+
44892+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
44893+ retval = -EPERM;
44894+ goto dput_out;
44895+ }
44896+
44897 if (flags & MS_REMOUNT)
44898 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
44899 data_page);
44900@@ -2350,6 +2366,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
44901 dev_name, data_page);
44902 dput_out:
44903 path_put(&path);
44904+
44905+ gr_log_mount(dev_name, dir_name, retval);
44906+
44907 return retval;
44908 }
44909
44910@@ -2605,6 +2624,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
44911 if (error)
44912 goto out2;
44913
44914+ if (gr_handle_chroot_pivot()) {
44915+ error = -EPERM;
44916+ goto out2;
44917+ }
44918+
44919 get_fs_root(current->fs, &root);
44920 error = lock_mount(&old);
44921 if (error)
44922diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
44923index 3db6b82..a57597e 100644
44924--- a/fs/nfs/blocklayout/blocklayout.c
44925+++ b/fs/nfs/blocklayout/blocklayout.c
44926@@ -90,7 +90,7 @@ static int is_writable(struct pnfs_block_extent *be, sector_t isect)
44927 */
44928 struct parallel_io {
44929 struct kref refcnt;
44930- struct rpc_call_ops call_ops;
44931+ rpc_call_ops_no_const call_ops;
44932 void (*pnfs_callback) (void *data);
44933 void *data;
44934 };
44935diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
44936index 50a15fa..ca113f9 100644
44937--- a/fs/nfs/inode.c
44938+++ b/fs/nfs/inode.c
44939@@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
44940 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
44941 nfsi->attrtimeo_timestamp = jiffies;
44942
44943- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
44944+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
44945 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
44946 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
44947 else
44948@@ -1002,16 +1002,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
44949 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
44950 }
44951
44952-static atomic_long_t nfs_attr_generation_counter;
44953+static atomic_long_unchecked_t nfs_attr_generation_counter;
44954
44955 static unsigned long nfs_read_attr_generation_counter(void)
44956 {
44957- return atomic_long_read(&nfs_attr_generation_counter);
44958+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
44959 }
44960
44961 unsigned long nfs_inc_attr_generation_counter(void)
44962 {
44963- return atomic_long_inc_return(&nfs_attr_generation_counter);
44964+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
44965 }
44966
44967 void nfs_fattr_init(struct nfs_fattr *fattr)
44968diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
44969index 7a2e442..8e544cc 100644
44970--- a/fs/nfsd/vfs.c
44971+++ b/fs/nfsd/vfs.c
44972@@ -914,7 +914,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
44973 } else {
44974 oldfs = get_fs();
44975 set_fs(KERNEL_DS);
44976- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
44977+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
44978 set_fs(oldfs);
44979 }
44980
44981@@ -1018,7 +1018,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
44982
44983 /* Write the data. */
44984 oldfs = get_fs(); set_fs(KERNEL_DS);
44985- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
44986+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
44987 set_fs(oldfs);
44988 if (host_err < 0)
44989 goto out_nfserr;
44990@@ -1553,7 +1553,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
44991 */
44992
44993 oldfs = get_fs(); set_fs(KERNEL_DS);
44994- host_err = inode->i_op->readlink(dentry, buf, *lenp);
44995+ host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
44996 set_fs(oldfs);
44997
44998 if (host_err < 0)
44999diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
45000index 9fde1c0..14e8827 100644
45001--- a/fs/notify/fanotify/fanotify_user.c
45002+++ b/fs/notify/fanotify/fanotify_user.c
45003@@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
45004 goto out_close_fd;
45005
45006 ret = -EFAULT;
45007- if (copy_to_user(buf, &fanotify_event_metadata,
45008+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
45009+ copy_to_user(buf, &fanotify_event_metadata,
45010 fanotify_event_metadata.event_len))
45011 goto out_kill_access_response;
45012
45013diff --git a/fs/notify/notification.c b/fs/notify/notification.c
45014index ee18815..7aa5d01 100644
45015--- a/fs/notify/notification.c
45016+++ b/fs/notify/notification.c
45017@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
45018 * get set to 0 so it will never get 'freed'
45019 */
45020 static struct fsnotify_event *q_overflow_event;
45021-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
45022+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
45023
45024 /**
45025 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
45026@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
45027 */
45028 u32 fsnotify_get_cookie(void)
45029 {
45030- return atomic_inc_return(&fsnotify_sync_cookie);
45031+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
45032 }
45033 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
45034
45035diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
45036index 99e3610..02c1068 100644
45037--- a/fs/ntfs/dir.c
45038+++ b/fs/ntfs/dir.c
45039@@ -1329,7 +1329,7 @@ find_next_index_buffer:
45040 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
45041 ~(s64)(ndir->itype.index.block_size - 1)));
45042 /* Bounds checks. */
45043- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
45044+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
45045 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
45046 "inode 0x%lx or driver bug.", vdir->i_ino);
45047 goto err_out;
45048diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
45049index c587e2d..3641eaa 100644
45050--- a/fs/ntfs/file.c
45051+++ b/fs/ntfs/file.c
45052@@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_inode_ops = {
45053 #endif /* NTFS_RW */
45054 };
45055
45056-const struct file_operations ntfs_empty_file_ops = {};
45057+const struct file_operations ntfs_empty_file_ops __read_only;
45058
45059-const struct inode_operations ntfs_empty_inode_ops = {};
45060+const struct inode_operations ntfs_empty_inode_ops __read_only;
45061diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
45062index 210c352..a174f83 100644
45063--- a/fs/ocfs2/localalloc.c
45064+++ b/fs/ocfs2/localalloc.c
45065@@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
45066 goto bail;
45067 }
45068
45069- atomic_inc(&osb->alloc_stats.moves);
45070+ atomic_inc_unchecked(&osb->alloc_stats.moves);
45071
45072 bail:
45073 if (handle)
45074diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
45075index d355e6e..578d905 100644
45076--- a/fs/ocfs2/ocfs2.h
45077+++ b/fs/ocfs2/ocfs2.h
45078@@ -235,11 +235,11 @@ enum ocfs2_vol_state
45079
45080 struct ocfs2_alloc_stats
45081 {
45082- atomic_t moves;
45083- atomic_t local_data;
45084- atomic_t bitmap_data;
45085- atomic_t bg_allocs;
45086- atomic_t bg_extends;
45087+ atomic_unchecked_t moves;
45088+ atomic_unchecked_t local_data;
45089+ atomic_unchecked_t bitmap_data;
45090+ atomic_unchecked_t bg_allocs;
45091+ atomic_unchecked_t bg_extends;
45092 };
45093
45094 enum ocfs2_local_alloc_state
45095diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
45096index ba5d97e..c77db25 100644
45097--- a/fs/ocfs2/suballoc.c
45098+++ b/fs/ocfs2/suballoc.c
45099@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
45100 mlog_errno(status);
45101 goto bail;
45102 }
45103- atomic_inc(&osb->alloc_stats.bg_extends);
45104+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
45105
45106 /* You should never ask for this much metadata */
45107 BUG_ON(bits_wanted >
45108@@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle,
45109 mlog_errno(status);
45110 goto bail;
45111 }
45112- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45113+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45114
45115 *suballoc_loc = res.sr_bg_blkno;
45116 *suballoc_bit_start = res.sr_bit_offset;
45117@@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
45118 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
45119 res->sr_bits);
45120
45121- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45122+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45123
45124 BUG_ON(res->sr_bits != 1);
45125
45126@@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
45127 mlog_errno(status);
45128 goto bail;
45129 }
45130- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45131+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
45132
45133 BUG_ON(res.sr_bits != 1);
45134
45135@@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
45136 cluster_start,
45137 num_clusters);
45138 if (!status)
45139- atomic_inc(&osb->alloc_stats.local_data);
45140+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
45141 } else {
45142 if (min_clusters > (osb->bitmap_cpg - 1)) {
45143 /* The only paths asking for contiguousness
45144@@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
45145 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
45146 res.sr_bg_blkno,
45147 res.sr_bit_offset);
45148- atomic_inc(&osb->alloc_stats.bitmap_data);
45149+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
45150 *num_clusters = res.sr_bits;
45151 }
45152 }
45153diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
45154index 4994f8b..eaab8eb 100644
45155--- a/fs/ocfs2/super.c
45156+++ b/fs/ocfs2/super.c
45157@@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
45158 "%10s => GlobalAllocs: %d LocalAllocs: %d "
45159 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
45160 "Stats",
45161- atomic_read(&osb->alloc_stats.bitmap_data),
45162- atomic_read(&osb->alloc_stats.local_data),
45163- atomic_read(&osb->alloc_stats.bg_allocs),
45164- atomic_read(&osb->alloc_stats.moves),
45165- atomic_read(&osb->alloc_stats.bg_extends));
45166+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
45167+ atomic_read_unchecked(&osb->alloc_stats.local_data),
45168+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
45169+ atomic_read_unchecked(&osb->alloc_stats.moves),
45170+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
45171
45172 out += snprintf(buf + out, len - out,
45173 "%10s => State: %u Descriptor: %llu Size: %u bits "
45174@@ -2119,11 +2119,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
45175 spin_lock_init(&osb->osb_xattr_lock);
45176 ocfs2_init_steal_slots(osb);
45177
45178- atomic_set(&osb->alloc_stats.moves, 0);
45179- atomic_set(&osb->alloc_stats.local_data, 0);
45180- atomic_set(&osb->alloc_stats.bitmap_data, 0);
45181- atomic_set(&osb->alloc_stats.bg_allocs, 0);
45182- atomic_set(&osb->alloc_stats.bg_extends, 0);
45183+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
45184+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
45185+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
45186+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
45187+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
45188
45189 /* Copy the blockcheck stats from the superblock probe */
45190 osb->osb_ecc_stats = *stats;
45191diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
45192index 5d22872..523db20 100644
45193--- a/fs/ocfs2/symlink.c
45194+++ b/fs/ocfs2/symlink.c
45195@@ -142,7 +142,7 @@ bail:
45196
45197 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
45198 {
45199- char *link = nd_get_link(nd);
45200+ const char *link = nd_get_link(nd);
45201 if (!IS_ERR(link))
45202 kfree(link);
45203 }
45204diff --git a/fs/open.c b/fs/open.c
45205index 22c41b5..695cb17 100644
45206--- a/fs/open.c
45207+++ b/fs/open.c
45208@@ -112,6 +112,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
45209 error = locks_verify_truncate(inode, NULL, length);
45210 if (!error)
45211 error = security_path_truncate(&path);
45212+
45213+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
45214+ error = -EACCES;
45215+
45216 if (!error)
45217 error = do_truncate(path.dentry, length, 0, NULL);
45218
45219@@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
45220 if (__mnt_is_readonly(path.mnt))
45221 res = -EROFS;
45222
45223+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
45224+ res = -EACCES;
45225+
45226 out_path_release:
45227 path_put(&path);
45228 out:
45229@@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
45230 if (error)
45231 goto dput_and_out;
45232
45233+ gr_log_chdir(path.dentry, path.mnt);
45234+
45235 set_fs_pwd(current->fs, &path);
45236
45237 dput_and_out:
45238@@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
45239 goto out_putf;
45240
45241 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
45242+
45243+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
45244+ error = -EPERM;
45245+
45246+ if (!error)
45247+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
45248+
45249 if (!error)
45250 set_fs_pwd(current->fs, &file->f_path);
45251 out_putf:
45252@@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
45253 if (error)
45254 goto dput_and_out;
45255
45256+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
45257+ goto dput_and_out;
45258+
45259 set_fs_root(current->fs, &path);
45260+
45261+ gr_handle_chroot_chdir(&path);
45262+
45263 error = 0;
45264 dput_and_out:
45265 path_put(&path);
45266@@ -456,6 +478,16 @@ static int chmod_common(struct path *path, umode_t mode)
45267 if (error)
45268 return error;
45269 mutex_lock(&inode->i_mutex);
45270+
45271+ if (!gr_acl_handle_fchmod(path->dentry, path->mnt, mode)) {
45272+ error = -EACCES;
45273+ goto out_unlock;
45274+ }
45275+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
45276+ error = -EACCES;
45277+ goto out_unlock;
45278+ }
45279+
45280 error = security_path_chmod(path->dentry, path->mnt, mode);
45281 if (error)
45282 goto out_unlock;
45283@@ -506,6 +538,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
45284 int error;
45285 struct iattr newattrs;
45286
45287+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
45288+ return -EACCES;
45289+
45290 newattrs.ia_valid = ATTR_CTIME;
45291 if (user != (uid_t) -1) {
45292 newattrs.ia_valid |= ATTR_UID;
45293diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
45294index 6296b40..417c00f 100644
45295--- a/fs/partitions/efi.c
45296+++ b/fs/partitions/efi.c
45297@@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
45298 if (!gpt)
45299 return NULL;
45300
45301+ if (!le32_to_cpu(gpt->num_partition_entries))
45302+ return NULL;
45303+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
45304+ if (!pte)
45305+ return NULL;
45306+
45307 count = le32_to_cpu(gpt->num_partition_entries) *
45308 le32_to_cpu(gpt->sizeof_partition_entry);
45309- if (!count)
45310- return NULL;
45311- pte = kzalloc(count, GFP_KERNEL);
45312- if (!pte)
45313- return NULL;
45314-
45315 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
45316 (u8 *) pte,
45317 count) < count) {
45318diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
45319index bd8ae78..539d250 100644
45320--- a/fs/partitions/ldm.c
45321+++ b/fs/partitions/ldm.c
45322@@ -1324,7 +1324,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
45323 goto found;
45324 }
45325
45326- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
45327+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
45328 if (!f) {
45329 ldm_crit ("Out of memory.");
45330 return false;
45331diff --git a/fs/pipe.c b/fs/pipe.c
45332index 4065f07..68c0706 100644
45333--- a/fs/pipe.c
45334+++ b/fs/pipe.c
45335@@ -420,9 +420,9 @@ redo:
45336 }
45337 if (bufs) /* More to do? */
45338 continue;
45339- if (!pipe->writers)
45340+ if (!atomic_read(&pipe->writers))
45341 break;
45342- if (!pipe->waiting_writers) {
45343+ if (!atomic_read(&pipe->waiting_writers)) {
45344 /* syscall merging: Usually we must not sleep
45345 * if O_NONBLOCK is set, or if we got some data.
45346 * But if a writer sleeps in kernel space, then
45347@@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
45348 mutex_lock(&inode->i_mutex);
45349 pipe = inode->i_pipe;
45350
45351- if (!pipe->readers) {
45352+ if (!atomic_read(&pipe->readers)) {
45353 send_sig(SIGPIPE, current, 0);
45354 ret = -EPIPE;
45355 goto out;
45356@@ -530,7 +530,7 @@ redo1:
45357 for (;;) {
45358 int bufs;
45359
45360- if (!pipe->readers) {
45361+ if (!atomic_read(&pipe->readers)) {
45362 send_sig(SIGPIPE, current, 0);
45363 if (!ret)
45364 ret = -EPIPE;
45365@@ -616,9 +616,9 @@ redo2:
45366 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
45367 do_wakeup = 0;
45368 }
45369- pipe->waiting_writers++;
45370+ atomic_inc(&pipe->waiting_writers);
45371 pipe_wait(pipe);
45372- pipe->waiting_writers--;
45373+ atomic_dec(&pipe->waiting_writers);
45374 }
45375 out:
45376 mutex_unlock(&inode->i_mutex);
45377@@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table *wait)
45378 mask = 0;
45379 if (filp->f_mode & FMODE_READ) {
45380 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
45381- if (!pipe->writers && filp->f_version != pipe->w_counter)
45382+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
45383 mask |= POLLHUP;
45384 }
45385
45386@@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table *wait)
45387 * Most Unices do not set POLLERR for FIFOs but on Linux they
45388 * behave exactly like pipes for poll().
45389 */
45390- if (!pipe->readers)
45391+ if (!atomic_read(&pipe->readers))
45392 mask |= POLLERR;
45393 }
45394
45395@@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int decr, int decw)
45396
45397 mutex_lock(&inode->i_mutex);
45398 pipe = inode->i_pipe;
45399- pipe->readers -= decr;
45400- pipe->writers -= decw;
45401+ atomic_sub(decr, &pipe->readers);
45402+ atomic_sub(decw, &pipe->writers);
45403
45404- if (!pipe->readers && !pipe->writers) {
45405+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
45406 free_pipe_info(inode);
45407 } else {
45408 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
45409@@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
45410
45411 if (inode->i_pipe) {
45412 ret = 0;
45413- inode->i_pipe->readers++;
45414+ atomic_inc(&inode->i_pipe->readers);
45415 }
45416
45417 mutex_unlock(&inode->i_mutex);
45418@@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
45419
45420 if (inode->i_pipe) {
45421 ret = 0;
45422- inode->i_pipe->writers++;
45423+ atomic_inc(&inode->i_pipe->writers);
45424 }
45425
45426 mutex_unlock(&inode->i_mutex);
45427@@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
45428 if (inode->i_pipe) {
45429 ret = 0;
45430 if (filp->f_mode & FMODE_READ)
45431- inode->i_pipe->readers++;
45432+ atomic_inc(&inode->i_pipe->readers);
45433 if (filp->f_mode & FMODE_WRITE)
45434- inode->i_pipe->writers++;
45435+ atomic_inc(&inode->i_pipe->writers);
45436 }
45437
45438 mutex_unlock(&inode->i_mutex);
45439@@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
45440 inode->i_pipe = NULL;
45441 }
45442
45443-static struct vfsmount *pipe_mnt __read_mostly;
45444+struct vfsmount *pipe_mnt __read_mostly;
45445
45446 /*
45447 * pipefs_dname() is called from d_path().
45448@@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(void)
45449 goto fail_iput;
45450 inode->i_pipe = pipe;
45451
45452- pipe->readers = pipe->writers = 1;
45453+ atomic_set(&pipe->readers, 1);
45454+ atomic_set(&pipe->writers, 1);
45455 inode->i_fop = &rdwr_pipefifo_fops;
45456
45457 /*
45458diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
45459index 15af622..0e9f4467 100644
45460--- a/fs/proc/Kconfig
45461+++ b/fs/proc/Kconfig
45462@@ -30,12 +30,12 @@ config PROC_FS
45463
45464 config PROC_KCORE
45465 bool "/proc/kcore support" if !ARM
45466- depends on PROC_FS && MMU
45467+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
45468
45469 config PROC_VMCORE
45470 bool "/proc/vmcore support"
45471- depends on PROC_FS && CRASH_DUMP
45472- default y
45473+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
45474+ default n
45475 help
45476 Exports the dump image of crashed kernel in ELF format.
45477
45478@@ -59,8 +59,8 @@ config PROC_SYSCTL
45479 limited in memory.
45480
45481 config PROC_PAGE_MONITOR
45482- default y
45483- depends on PROC_FS && MMU
45484+ default n
45485+ depends on PROC_FS && MMU && !GRKERNSEC
45486 bool "Enable /proc page monitoring" if EXPERT
45487 help
45488 Various /proc files exist to monitor process memory utilization:
45489diff --git a/fs/proc/array.c b/fs/proc/array.c
45490index 3a1dafd..d41fc37 100644
45491--- a/fs/proc/array.c
45492+++ b/fs/proc/array.c
45493@@ -60,6 +60,7 @@
45494 #include <linux/tty.h>
45495 #include <linux/string.h>
45496 #include <linux/mman.h>
45497+#include <linux/grsecurity.h>
45498 #include <linux/proc_fs.h>
45499 #include <linux/ioport.h>
45500 #include <linux/uaccess.h>
45501@@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
45502 seq_putc(m, '\n');
45503 }
45504
45505+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45506+static inline void task_pax(struct seq_file *m, struct task_struct *p)
45507+{
45508+ if (p->mm)
45509+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
45510+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
45511+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
45512+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
45513+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
45514+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
45515+ else
45516+ seq_printf(m, "PaX:\t-----\n");
45517+}
45518+#endif
45519+
45520 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
45521 struct pid *pid, struct task_struct *task)
45522 {
45523@@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
45524 task_cpus_allowed(m, task);
45525 cpuset_task_status_allowed(m, task);
45526 task_context_switch_counts(m, task);
45527+
45528+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45529+ task_pax(m, task);
45530+#endif
45531+
45532+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
45533+ task_grsec_rbac(m, task);
45534+#endif
45535+
45536 return 0;
45537 }
45538
45539+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45540+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45541+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
45542+ _mm->pax_flags & MF_PAX_SEGMEXEC))
45543+#endif
45544+
45545 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45546 struct pid *pid, struct task_struct *task, int whole)
45547 {
45548@@ -449,6 +480,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45549 gtime = task->gtime;
45550 }
45551
45552+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45553+ if (PAX_RAND_FLAGS(mm)) {
45554+ eip = 0;
45555+ esp = 0;
45556+ wchan = 0;
45557+ }
45558+#endif
45559+#ifdef CONFIG_GRKERNSEC_HIDESYM
45560+ wchan = 0;
45561+ eip =0;
45562+ esp =0;
45563+#endif
45564+
45565 /* scale priority and nice values from timeslices to -20..20 */
45566 /* to make it look like a "normal" Unix priority/nice value */
45567 priority = task_prio(task);
45568@@ -489,9 +533,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45569 vsize,
45570 mm ? get_mm_rss(mm) : 0,
45571 rsslim,
45572+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45573+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
45574+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
45575+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
45576+#else
45577 mm ? (permitted ? mm->start_code : 1) : 0,
45578 mm ? (permitted ? mm->end_code : 1) : 0,
45579 (permitted && mm) ? mm->start_stack : 0,
45580+#endif
45581 esp,
45582 eip,
45583 /* The signal information here is obsolete.
45584@@ -544,3 +594,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
45585
45586 return 0;
45587 }
45588+
45589+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45590+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
45591+{
45592+ u32 curr_ip = 0;
45593+ unsigned long flags;
45594+
45595+ if (lock_task_sighand(task, &flags)) {
45596+ curr_ip = task->signal->curr_ip;
45597+ unlock_task_sighand(task, &flags);
45598+ }
45599+
45600+ return sprintf(buffer, "%pI4\n", &curr_ip);
45601+}
45602+#endif
45603diff --git a/fs/proc/base.c b/fs/proc/base.c
45604index 1fc1dca..813fd0b 100644
45605--- a/fs/proc/base.c
45606+++ b/fs/proc/base.c
45607@@ -107,6 +107,22 @@ struct pid_entry {
45608 union proc_op op;
45609 };
45610
45611+struct getdents_callback {
45612+ struct linux_dirent __user * current_dir;
45613+ struct linux_dirent __user * previous;
45614+ struct file * file;
45615+ int count;
45616+ int error;
45617+};
45618+
45619+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
45620+ loff_t offset, u64 ino, unsigned int d_type)
45621+{
45622+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
45623+ buf->error = -EINVAL;
45624+ return 0;
45625+}
45626+
45627 #define NOD(NAME, MODE, IOP, FOP, OP) { \
45628 .name = (NAME), \
45629 .len = sizeof(NAME) - 1, \
45630@@ -204,10 +220,12 @@ static struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
45631 return ERR_PTR(err);
45632
45633 mm = get_task_mm(task);
45634- if (mm && mm != current->mm &&
45635- !ptrace_may_access(task, mode)) {
45636- mmput(mm);
45637- mm = ERR_PTR(-EACCES);
45638+ if (mm) {
45639+ if ((mm != current->mm && !ptrace_may_access(task, mode)) ||
45640+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task)))) {
45641+ mmput(mm);
45642+ mm = ERR_PTR(-EACCES);
45643+ }
45644 }
45645 mutex_unlock(&task->signal->cred_guard_mutex);
45646
45647@@ -229,6 +247,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
45648 if (!mm->arg_end)
45649 goto out_mm; /* Shh! No looking before we're done */
45650
45651+ if (gr_acl_handle_procpidmem(task))
45652+ goto out_mm;
45653+
45654 len = mm->arg_end - mm->arg_start;
45655
45656 if (len > PAGE_SIZE)
45657@@ -256,12 +277,28 @@ out:
45658 return res;
45659 }
45660
45661+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45662+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45663+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
45664+ _mm->pax_flags & MF_PAX_SEGMEXEC))
45665+#endif
45666+
45667 static int proc_pid_auxv(struct task_struct *task, char *buffer)
45668 {
45669 struct mm_struct *mm = mm_for_maps(task);
45670 int res = PTR_ERR(mm);
45671 if (mm && !IS_ERR(mm)) {
45672 unsigned int nwords = 0;
45673+
45674+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45675+ /* allow if we're currently ptracing this task */
45676+ if (PAX_RAND_FLAGS(mm) &&
45677+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
45678+ mmput(mm);
45679+ return 0;
45680+ }
45681+#endif
45682+
45683 do {
45684 nwords += 2;
45685 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
45686@@ -275,7 +312,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
45687 }
45688
45689
45690-#ifdef CONFIG_KALLSYMS
45691+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45692 /*
45693 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
45694 * Returns the resolved symbol. If that fails, simply return the address.
45695@@ -314,7 +351,7 @@ static void unlock_trace(struct task_struct *task)
45696 mutex_unlock(&task->signal->cred_guard_mutex);
45697 }
45698
45699-#ifdef CONFIG_STACKTRACE
45700+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45701
45702 #define MAX_STACK_TRACE_DEPTH 64
45703
45704@@ -505,7 +542,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
45705 return count;
45706 }
45707
45708-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45709+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45710 static int proc_pid_syscall(struct task_struct *task, char *buffer)
45711 {
45712 long nr;
45713@@ -534,7 +571,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
45714 /************************************************************************/
45715
45716 /* permission checks */
45717-static int proc_fd_access_allowed(struct inode *inode)
45718+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
45719 {
45720 struct task_struct *task;
45721 int allowed = 0;
45722@@ -544,7 +581,10 @@ static int proc_fd_access_allowed(struct inode *inode)
45723 */
45724 task = get_proc_task(inode);
45725 if (task) {
45726- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45727+ if (log)
45728+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
45729+ else
45730+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45731 put_task_struct(task);
45732 }
45733 return allowed;
45734@@ -826,6 +866,10 @@ static ssize_t mem_read(struct file * file, char __user * buf,
45735 return ret;
45736 }
45737
45738+#define mem_write NULL
45739+
45740+#ifndef mem_write
45741+/* They were right the first time */
45742 static ssize_t mem_write(struct file * file, const char __user *buf,
45743 size_t count, loff_t *ppos)
45744 {
45745@@ -866,6 +910,7 @@ static ssize_t mem_write(struct file * file, const char __user *buf,
45746 free_page((unsigned long) page);
45747 return copied;
45748 }
45749+#endif
45750
45751 loff_t mem_lseek(struct file *file, loff_t offset, int orig)
45752 {
45753@@ -911,6 +956,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
45754 if (!task)
45755 goto out_no_task;
45756
45757+ if (gr_acl_handle_procpidmem(task))
45758+ goto out;
45759+
45760 ret = -ENOMEM;
45761 page = (char *)__get_free_page(GFP_TEMPORARY);
45762 if (!page)
45763@@ -1533,7 +1581,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
45764 path_put(&nd->path);
45765
45766 /* Are we allowed to snoop on the tasks file descriptors? */
45767- if (!proc_fd_access_allowed(inode))
45768+ if (!proc_fd_access_allowed(inode,0))
45769 goto out;
45770
45771 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
45772@@ -1572,8 +1620,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
45773 struct path path;
45774
45775 /* Are we allowed to snoop on the tasks file descriptors? */
45776- if (!proc_fd_access_allowed(inode))
45777- goto out;
45778+ /* logging this is needed for learning on chromium to work properly,
45779+ but we don't want to flood the logs from 'ps' which does a readlink
45780+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
45781+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
45782+ */
45783+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
45784+ if (!proc_fd_access_allowed(inode,0))
45785+ goto out;
45786+ } else {
45787+ if (!proc_fd_access_allowed(inode,1))
45788+ goto out;
45789+ }
45790
45791 error = PROC_I(inode)->op.proc_get_link(inode, &path);
45792 if (error)
45793@@ -1638,7 +1696,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
45794 rcu_read_lock();
45795 cred = __task_cred(task);
45796 inode->i_uid = cred->euid;
45797+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45798+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45799+#else
45800 inode->i_gid = cred->egid;
45801+#endif
45802 rcu_read_unlock();
45803 }
45804 security_task_to_inode(task, inode);
45805@@ -1656,6 +1718,9 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
45806 struct inode *inode = dentry->d_inode;
45807 struct task_struct *task;
45808 const struct cred *cred;
45809+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45810+ const struct cred *tmpcred = current_cred();
45811+#endif
45812
45813 generic_fillattr(inode, stat);
45814
45815@@ -1663,13 +1728,41 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
45816 stat->uid = 0;
45817 stat->gid = 0;
45818 task = pid_task(proc_pid(inode), PIDTYPE_PID);
45819+
45820+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
45821+ rcu_read_unlock();
45822+ return -ENOENT;
45823+ }
45824+
45825 if (task) {
45826+ cred = __task_cred(task);
45827+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45828+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
45829+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45830+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45831+#endif
45832+ ) {
45833+#endif
45834 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45835+#ifdef CONFIG_GRKERNSEC_PROC_USER
45836+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45837+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45838+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45839+#endif
45840 task_dumpable(task)) {
45841- cred = __task_cred(task);
45842 stat->uid = cred->euid;
45843+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45844+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
45845+#else
45846 stat->gid = cred->egid;
45847+#endif
45848 }
45849+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45850+ } else {
45851+ rcu_read_unlock();
45852+ return -ENOENT;
45853+ }
45854+#endif
45855 }
45856 rcu_read_unlock();
45857 return 0;
45858@@ -1706,11 +1799,20 @@ int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
45859
45860 if (task) {
45861 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45862+#ifdef CONFIG_GRKERNSEC_PROC_USER
45863+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45864+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45865+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45866+#endif
45867 task_dumpable(task)) {
45868 rcu_read_lock();
45869 cred = __task_cred(task);
45870 inode->i_uid = cred->euid;
45871+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45872+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45873+#else
45874 inode->i_gid = cred->egid;
45875+#endif
45876 rcu_read_unlock();
45877 } else {
45878 inode->i_uid = 0;
45879@@ -1828,7 +1930,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
45880 int fd = proc_fd(inode);
45881
45882 if (task) {
45883- files = get_files_struct(task);
45884+ if (!gr_acl_handle_procpidmem(task))
45885+ files = get_files_struct(task);
45886 put_task_struct(task);
45887 }
45888 if (files) {
45889@@ -2096,11 +2199,21 @@ static const struct file_operations proc_fd_operations = {
45890 */
45891 static int proc_fd_permission(struct inode *inode, int mask)
45892 {
45893+ struct task_struct *task;
45894 int rv = generic_permission(inode, mask);
45895- if (rv == 0)
45896- return 0;
45897+
45898 if (task_pid(current) == proc_pid(inode))
45899 rv = 0;
45900+
45901+ task = get_proc_task(inode);
45902+ if (task == NULL)
45903+ return rv;
45904+
45905+ if (gr_acl_handle_procpidmem(task))
45906+ rv = -EACCES;
45907+
45908+ put_task_struct(task);
45909+
45910 return rv;
45911 }
45912
45913@@ -2210,6 +2323,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
45914 if (!task)
45915 goto out_no_task;
45916
45917+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45918+ goto out;
45919+
45920 /*
45921 * Yes, it does not scale. And it should not. Don't add
45922 * new entries into /proc/<tgid>/ without very good reasons.
45923@@ -2254,6 +2370,9 @@ static int proc_pident_readdir(struct file *filp,
45924 if (!task)
45925 goto out_no_task;
45926
45927+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45928+ goto out;
45929+
45930 ret = 0;
45931 i = filp->f_pos;
45932 switch (i) {
45933@@ -2524,7 +2643,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
45934 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
45935 void *cookie)
45936 {
45937- char *s = nd_get_link(nd);
45938+ const char *s = nd_get_link(nd);
45939 if (!IS_ERR(s))
45940 __putname(s);
45941 }
45942@@ -2722,7 +2841,7 @@ static const struct pid_entry tgid_base_stuff[] = {
45943 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
45944 #endif
45945 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
45946-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45947+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45948 INF("syscall", S_IRUGO, proc_pid_syscall),
45949 #endif
45950 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45951@@ -2747,10 +2866,10 @@ static const struct pid_entry tgid_base_stuff[] = {
45952 #ifdef CONFIG_SECURITY
45953 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45954 #endif
45955-#ifdef CONFIG_KALLSYMS
45956+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45957 INF("wchan", S_IRUGO, proc_pid_wchan),
45958 #endif
45959-#ifdef CONFIG_STACKTRACE
45960+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45961 ONE("stack", S_IRUGO, proc_pid_stack),
45962 #endif
45963 #ifdef CONFIG_SCHEDSTATS
45964@@ -2784,6 +2903,9 @@ static const struct pid_entry tgid_base_stuff[] = {
45965 #ifdef CONFIG_HARDWALL
45966 INF("hardwall", S_IRUGO, proc_pid_hardwall),
45967 #endif
45968+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45969+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
45970+#endif
45971 };
45972
45973 static int proc_tgid_base_readdir(struct file * filp,
45974@@ -2909,7 +3031,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
45975 if (!inode)
45976 goto out;
45977
45978+#ifdef CONFIG_GRKERNSEC_PROC_USER
45979+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
45980+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45981+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45982+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
45983+#else
45984 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
45985+#endif
45986 inode->i_op = &proc_tgid_base_inode_operations;
45987 inode->i_fop = &proc_tgid_base_operations;
45988 inode->i_flags|=S_IMMUTABLE;
45989@@ -2951,7 +3080,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
45990 if (!task)
45991 goto out;
45992
45993+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45994+ goto out_put_task;
45995+
45996 result = proc_pid_instantiate(dir, dentry, task, NULL);
45997+out_put_task:
45998 put_task_struct(task);
45999 out:
46000 return result;
46001@@ -3016,6 +3149,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
46002 {
46003 unsigned int nr;
46004 struct task_struct *reaper;
46005+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46006+ const struct cred *tmpcred = current_cred();
46007+ const struct cred *itercred;
46008+#endif
46009+ filldir_t __filldir = filldir;
46010 struct tgid_iter iter;
46011 struct pid_namespace *ns;
46012
46013@@ -3039,8 +3177,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
46014 for (iter = next_tgid(ns, iter);
46015 iter.task;
46016 iter.tgid += 1, iter = next_tgid(ns, iter)) {
46017+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46018+ rcu_read_lock();
46019+ itercred = __task_cred(iter.task);
46020+#endif
46021+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
46022+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46023+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
46024+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
46025+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
46026+#endif
46027+ )
46028+#endif
46029+ )
46030+ __filldir = &gr_fake_filldir;
46031+ else
46032+ __filldir = filldir;
46033+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46034+ rcu_read_unlock();
46035+#endif
46036 filp->f_pos = iter.tgid + TGID_OFFSET;
46037- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
46038+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
46039 put_task_struct(iter.task);
46040 goto out;
46041 }
46042@@ -3068,7 +3225,7 @@ static const struct pid_entry tid_base_stuff[] = {
46043 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
46044 #endif
46045 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
46046-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
46047+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
46048 INF("syscall", S_IRUGO, proc_pid_syscall),
46049 #endif
46050 INF("cmdline", S_IRUGO, proc_pid_cmdline),
46051@@ -3092,10 +3249,10 @@ static const struct pid_entry tid_base_stuff[] = {
46052 #ifdef CONFIG_SECURITY
46053 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
46054 #endif
46055-#ifdef CONFIG_KALLSYMS
46056+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
46057 INF("wchan", S_IRUGO, proc_pid_wchan),
46058 #endif
46059-#ifdef CONFIG_STACKTRACE
46060+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
46061 ONE("stack", S_IRUGO, proc_pid_stack),
46062 #endif
46063 #ifdef CONFIG_SCHEDSTATS
46064diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
46065index 82676e3..5f8518a 100644
46066--- a/fs/proc/cmdline.c
46067+++ b/fs/proc/cmdline.c
46068@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
46069
46070 static int __init proc_cmdline_init(void)
46071 {
46072+#ifdef CONFIG_GRKERNSEC_PROC_ADD
46073+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
46074+#else
46075 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
46076+#endif
46077 return 0;
46078 }
46079 module_init(proc_cmdline_init);
46080diff --git a/fs/proc/devices.c b/fs/proc/devices.c
46081index b143471..bb105e5 100644
46082--- a/fs/proc/devices.c
46083+++ b/fs/proc/devices.c
46084@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
46085
46086 static int __init proc_devices_init(void)
46087 {
46088+#ifdef CONFIG_GRKERNSEC_PROC_ADD
46089+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
46090+#else
46091 proc_create("devices", 0, NULL, &proc_devinfo_operations);
46092+#endif
46093 return 0;
46094 }
46095 module_init(proc_devices_init);
46096diff --git a/fs/proc/inode.c b/fs/proc/inode.c
46097index 7737c54..7172574 100644
46098--- a/fs/proc/inode.c
46099+++ b/fs/proc/inode.c
46100@@ -18,12 +18,18 @@
46101 #include <linux/module.h>
46102 #include <linux/sysctl.h>
46103 #include <linux/slab.h>
46104+#include <linux/grsecurity.h>
46105
46106 #include <asm/system.h>
46107 #include <asm/uaccess.h>
46108
46109 #include "internal.h"
46110
46111+#ifdef CONFIG_PROC_SYSCTL
46112+extern const struct inode_operations proc_sys_inode_operations;
46113+extern const struct inode_operations proc_sys_dir_operations;
46114+#endif
46115+
46116 static void proc_evict_inode(struct inode *inode)
46117 {
46118 struct proc_dir_entry *de;
46119@@ -49,6 +55,13 @@ static void proc_evict_inode(struct inode *inode)
46120 ns_ops = PROC_I(inode)->ns_ops;
46121 if (ns_ops && ns_ops->put)
46122 ns_ops->put(PROC_I(inode)->ns);
46123+
46124+#ifdef CONFIG_PROC_SYSCTL
46125+ if (inode->i_op == &proc_sys_inode_operations ||
46126+ inode->i_op == &proc_sys_dir_operations)
46127+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
46128+#endif
46129+
46130 }
46131
46132 static struct kmem_cache * proc_inode_cachep;
46133@@ -440,7 +453,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
46134 if (de->mode) {
46135 inode->i_mode = de->mode;
46136 inode->i_uid = de->uid;
46137+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
46138+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
46139+#else
46140 inode->i_gid = de->gid;
46141+#endif
46142 }
46143 if (de->size)
46144 inode->i_size = de->size;
46145diff --git a/fs/proc/internal.h b/fs/proc/internal.h
46146index 7838e5c..ff92cbc 100644
46147--- a/fs/proc/internal.h
46148+++ b/fs/proc/internal.h
46149@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
46150 struct pid *pid, struct task_struct *task);
46151 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
46152 struct pid *pid, struct task_struct *task);
46153+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
46154+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
46155+#endif
46156 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
46157
46158 extern const struct file_operations proc_maps_operations;
46159diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
46160index d245cb2..f4e8498 100644
46161--- a/fs/proc/kcore.c
46162+++ b/fs/proc/kcore.c
46163@@ -478,9 +478,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
46164 * the addresses in the elf_phdr on our list.
46165 */
46166 start = kc_offset_to_vaddr(*fpos - elf_buflen);
46167- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
46168+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
46169+ if (tsz > buflen)
46170 tsz = buflen;
46171-
46172+
46173 while (buflen) {
46174 struct kcore_list *m;
46175
46176@@ -509,20 +510,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
46177 kfree(elf_buf);
46178 } else {
46179 if (kern_addr_valid(start)) {
46180- unsigned long n;
46181+ char *elf_buf;
46182+ mm_segment_t oldfs;
46183
46184- n = copy_to_user(buffer, (char *)start, tsz);
46185- /*
46186- * We cannot distingush between fault on source
46187- * and fault on destination. When this happens
46188- * we clear too and hope it will trigger the
46189- * EFAULT again.
46190- */
46191- if (n) {
46192- if (clear_user(buffer + tsz - n,
46193- n))
46194+ elf_buf = kmalloc(tsz, GFP_KERNEL);
46195+ if (!elf_buf)
46196+ return -ENOMEM;
46197+ oldfs = get_fs();
46198+ set_fs(KERNEL_DS);
46199+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
46200+ set_fs(oldfs);
46201+ if (copy_to_user(buffer, elf_buf, tsz)) {
46202+ kfree(elf_buf);
46203 return -EFAULT;
46204+ }
46205 }
46206+ set_fs(oldfs);
46207+ kfree(elf_buf);
46208 } else {
46209 if (clear_user(buffer, tsz))
46210 return -EFAULT;
46211@@ -542,6 +546,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
46212
46213 static int open_kcore(struct inode *inode, struct file *filp)
46214 {
46215+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
46216+ return -EPERM;
46217+#endif
46218 if (!capable(CAP_SYS_RAWIO))
46219 return -EPERM;
46220 if (kcore_need_update)
46221diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
46222index 80e4645..53e5fcf 100644
46223--- a/fs/proc/meminfo.c
46224+++ b/fs/proc/meminfo.c
46225@@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
46226 vmi.used >> 10,
46227 vmi.largest_chunk >> 10
46228 #ifdef CONFIG_MEMORY_FAILURE
46229- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
46230+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
46231 #endif
46232 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
46233 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
46234diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
46235index b1822dd..df622cb 100644
46236--- a/fs/proc/nommu.c
46237+++ b/fs/proc/nommu.c
46238@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
46239 if (len < 1)
46240 len = 1;
46241 seq_printf(m, "%*c", len, ' ');
46242- seq_path(m, &file->f_path, "");
46243+ seq_path(m, &file->f_path, "\n\\");
46244 }
46245
46246 seq_putc(m, '\n');
46247diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
46248index f738024..876984a 100644
46249--- a/fs/proc/proc_net.c
46250+++ b/fs/proc/proc_net.c
46251@@ -105,6 +105,17 @@ static struct net *get_proc_task_net(struct inode *dir)
46252 struct task_struct *task;
46253 struct nsproxy *ns;
46254 struct net *net = NULL;
46255+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46256+ const struct cred *cred = current_cred();
46257+#endif
46258+
46259+#ifdef CONFIG_GRKERNSEC_PROC_USER
46260+ if (cred->fsuid)
46261+ return net;
46262+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46263+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
46264+ return net;
46265+#endif
46266
46267 rcu_read_lock();
46268 task = pid_task(proc_pid(dir), PIDTYPE_PID);
46269diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
46270index a6b6217..1e0579d 100644
46271--- a/fs/proc/proc_sysctl.c
46272+++ b/fs/proc/proc_sysctl.c
46273@@ -9,11 +9,13 @@
46274 #include <linux/namei.h>
46275 #include "internal.h"
46276
46277+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
46278+
46279 static const struct dentry_operations proc_sys_dentry_operations;
46280 static const struct file_operations proc_sys_file_operations;
46281-static const struct inode_operations proc_sys_inode_operations;
46282+const struct inode_operations proc_sys_inode_operations;
46283 static const struct file_operations proc_sys_dir_file_operations;
46284-static const struct inode_operations proc_sys_dir_operations;
46285+const struct inode_operations proc_sys_dir_operations;
46286
46287 void proc_sys_poll_notify(struct ctl_table_poll *poll)
46288 {
46289@@ -131,8 +133,14 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
46290
46291 err = NULL;
46292 d_set_d_op(dentry, &proc_sys_dentry_operations);
46293+
46294+ gr_handle_proc_create(dentry, inode);
46295+
46296 d_add(dentry, inode);
46297
46298+ if (gr_handle_sysctl(p, MAY_EXEC))
46299+ err = ERR_PTR(-ENOENT);
46300+
46301 out:
46302 sysctl_head_finish(head);
46303 return err;
46304@@ -163,6 +171,12 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
46305 if (!table->proc_handler)
46306 goto out;
46307
46308+#ifdef CONFIG_GRKERNSEC
46309+ error = -EPERM;
46310+ if (write && !capable(CAP_SYS_ADMIN))
46311+ goto out;
46312+#endif
46313+
46314 /* careful: calling conventions are nasty here */
46315 res = count;
46316 error = table->proc_handler(table, write, buf, &res, ppos);
46317@@ -245,6 +259,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
46318 return -ENOMEM;
46319 } else {
46320 d_set_d_op(child, &proc_sys_dentry_operations);
46321+
46322+ gr_handle_proc_create(child, inode);
46323+
46324 d_add(child, inode);
46325 }
46326 } else {
46327@@ -273,6 +290,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
46328 if (*pos < file->f_pos)
46329 continue;
46330
46331+ if (gr_handle_sysctl(table, 0))
46332+ continue;
46333+
46334 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
46335 if (res)
46336 return res;
46337@@ -398,6 +418,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
46338 if (IS_ERR(head))
46339 return PTR_ERR(head);
46340
46341+ if (table && gr_handle_sysctl(table, MAY_EXEC))
46342+ return -ENOENT;
46343+
46344 generic_fillattr(inode, stat);
46345 if (table)
46346 stat->mode = (stat->mode & S_IFMT) | table->mode;
46347@@ -420,13 +443,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
46348 .llseek = generic_file_llseek,
46349 };
46350
46351-static const struct inode_operations proc_sys_inode_operations = {
46352+const struct inode_operations proc_sys_inode_operations = {
46353 .permission = proc_sys_permission,
46354 .setattr = proc_sys_setattr,
46355 .getattr = proc_sys_getattr,
46356 };
46357
46358-static const struct inode_operations proc_sys_dir_operations = {
46359+const struct inode_operations proc_sys_dir_operations = {
46360 .lookup = proc_sys_lookup,
46361 .permission = proc_sys_permission,
46362 .setattr = proc_sys_setattr,
46363diff --git a/fs/proc/root.c b/fs/proc/root.c
46364index 03102d9..4ae347e 100644
46365--- a/fs/proc/root.c
46366+++ b/fs/proc/root.c
46367@@ -121,7 +121,15 @@ void __init proc_root_init(void)
46368 #ifdef CONFIG_PROC_DEVICETREE
46369 proc_device_tree_init();
46370 #endif
46371+#ifdef CONFIG_GRKERNSEC_PROC_ADD
46372+#ifdef CONFIG_GRKERNSEC_PROC_USER
46373+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
46374+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46375+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
46376+#endif
46377+#else
46378 proc_mkdir("bus", NULL);
46379+#endif
46380 proc_sys_init();
46381 }
46382
46383diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
46384index 7dcd2a2..d1d9cb6 100644
46385--- a/fs/proc/task_mmu.c
46386+++ b/fs/proc/task_mmu.c
46387@@ -52,8 +52,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
46388 "VmExe:\t%8lu kB\n"
46389 "VmLib:\t%8lu kB\n"
46390 "VmPTE:\t%8lu kB\n"
46391- "VmSwap:\t%8lu kB\n",
46392- hiwater_vm << (PAGE_SHIFT-10),
46393+ "VmSwap:\t%8lu kB\n"
46394+
46395+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46396+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
46397+#endif
46398+
46399+ ,hiwater_vm << (PAGE_SHIFT-10),
46400 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
46401 mm->locked_vm << (PAGE_SHIFT-10),
46402 mm->pinned_vm << (PAGE_SHIFT-10),
46403@@ -62,7 +67,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
46404 data << (PAGE_SHIFT-10),
46405 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
46406 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
46407- swap << (PAGE_SHIFT-10));
46408+ swap << (PAGE_SHIFT-10)
46409+
46410+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46411+ , mm->context.user_cs_base, mm->context.user_cs_limit
46412+#endif
46413+
46414+ );
46415 }
46416
46417 unsigned long task_vsize(struct mm_struct *mm)
46418@@ -209,6 +220,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
46419 return ret;
46420 }
46421
46422+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46423+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
46424+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
46425+ _mm->pax_flags & MF_PAX_SEGMEXEC))
46426+#endif
46427+
46428 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46429 {
46430 struct mm_struct *mm = vma->vm_mm;
46431@@ -227,13 +244,13 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46432 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
46433 }
46434
46435- /* We don't show the stack guard page in /proc/maps */
46436+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46437+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
46438+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
46439+#else
46440 start = vma->vm_start;
46441- if (stack_guard_page_start(vma, start))
46442- start += PAGE_SIZE;
46443 end = vma->vm_end;
46444- if (stack_guard_page_end(vma, end))
46445- end -= PAGE_SIZE;
46446+#endif
46447
46448 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
46449 start,
46450@@ -242,7 +259,11 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46451 flags & VM_WRITE ? 'w' : '-',
46452 flags & VM_EXEC ? 'x' : '-',
46453 flags & VM_MAYSHARE ? 's' : 'p',
46454+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46455+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
46456+#else
46457 pgoff,
46458+#endif
46459 MAJOR(dev), MINOR(dev), ino, &len);
46460
46461 /*
46462@@ -251,7 +272,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46463 */
46464 if (file) {
46465 pad_len_spaces(m, len);
46466- seq_path(m, &file->f_path, "\n");
46467+ seq_path(m, &file->f_path, "\n\\");
46468 } else {
46469 const char *name = arch_vma_name(vma);
46470 if (!name) {
46471@@ -259,8 +280,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46472 if (vma->vm_start <= mm->brk &&
46473 vma->vm_end >= mm->start_brk) {
46474 name = "[heap]";
46475- } else if (vma->vm_start <= mm->start_stack &&
46476- vma->vm_end >= mm->start_stack) {
46477+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
46478+ (vma->vm_start <= mm->start_stack &&
46479+ vma->vm_end >= mm->start_stack)) {
46480 name = "[stack]";
46481 }
46482 } else {
46483@@ -435,11 +457,16 @@ static int show_smap(struct seq_file *m, void *v)
46484 };
46485
46486 memset(&mss, 0, sizeof mss);
46487- mss.vma = vma;
46488- /* mmap_sem is held in m_start */
46489- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
46490- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
46491-
46492+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46493+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
46494+#endif
46495+ mss.vma = vma;
46496+ /* mmap_sem is held in m_start */
46497+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
46498+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
46499+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46500+ }
46501+#endif
46502 show_map_vma(m, vma);
46503
46504 seq_printf(m,
46505@@ -457,7 +484,11 @@ static int show_smap(struct seq_file *m, void *v)
46506 "KernelPageSize: %8lu kB\n"
46507 "MMUPageSize: %8lu kB\n"
46508 "Locked: %8lu kB\n",
46509+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46510+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
46511+#else
46512 (vma->vm_end - vma->vm_start) >> 10,
46513+#endif
46514 mss.resident >> 10,
46515 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
46516 mss.shared_clean >> 10,
46517@@ -1036,7 +1067,7 @@ static int show_numa_map(struct seq_file *m, void *v)
46518
46519 if (file) {
46520 seq_printf(m, " file=");
46521- seq_path(m, &file->f_path, "\n\t= ");
46522+ seq_path(m, &file->f_path, "\n\t\\= ");
46523 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
46524 seq_printf(m, " heap");
46525 } else if (vma->vm_start <= mm->start_stack &&
46526diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
46527index 980de54..2a4db5f 100644
46528--- a/fs/proc/task_nommu.c
46529+++ b/fs/proc/task_nommu.c
46530@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
46531 else
46532 bytes += kobjsize(mm);
46533
46534- if (current->fs && current->fs->users > 1)
46535+ if (current->fs && atomic_read(&current->fs->users) > 1)
46536 sbytes += kobjsize(current->fs);
46537 else
46538 bytes += kobjsize(current->fs);
46539@@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
46540
46541 if (file) {
46542 pad_len_spaces(m, len);
46543- seq_path(m, &file->f_path, "");
46544+ seq_path(m, &file->f_path, "\n\\");
46545 } else if (mm) {
46546 if (vma->vm_start <= mm->start_stack &&
46547 vma->vm_end >= mm->start_stack) {
46548diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
46549index d67908b..d13f6a6 100644
46550--- a/fs/quota/netlink.c
46551+++ b/fs/quota/netlink.c
46552@@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
46553 void quota_send_warning(short type, unsigned int id, dev_t dev,
46554 const char warntype)
46555 {
46556- static atomic_t seq;
46557+ static atomic_unchecked_t seq;
46558 struct sk_buff *skb;
46559 void *msg_head;
46560 int ret;
46561@@ -49,7 +49,7 @@ void quota_send_warning(short type, unsigned int id, dev_t dev,
46562 "VFS: Not enough memory to send quota warning.\n");
46563 return;
46564 }
46565- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
46566+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
46567 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
46568 if (!msg_head) {
46569 printk(KERN_ERR
46570diff --git a/fs/readdir.c b/fs/readdir.c
46571index 356f715..c918d38 100644
46572--- a/fs/readdir.c
46573+++ b/fs/readdir.c
46574@@ -17,6 +17,7 @@
46575 #include <linux/security.h>
46576 #include <linux/syscalls.h>
46577 #include <linux/unistd.h>
46578+#include <linux/namei.h>
46579
46580 #include <asm/uaccess.h>
46581
46582@@ -67,6 +68,7 @@ struct old_linux_dirent {
46583
46584 struct readdir_callback {
46585 struct old_linux_dirent __user * dirent;
46586+ struct file * file;
46587 int result;
46588 };
46589
46590@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
46591 buf->result = -EOVERFLOW;
46592 return -EOVERFLOW;
46593 }
46594+
46595+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46596+ return 0;
46597+
46598 buf->result++;
46599 dirent = buf->dirent;
46600 if (!access_ok(VERIFY_WRITE, dirent,
46601@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
46602
46603 buf.result = 0;
46604 buf.dirent = dirent;
46605+ buf.file = file;
46606
46607 error = vfs_readdir(file, fillonedir, &buf);
46608 if (buf.result)
46609@@ -142,6 +149,7 @@ struct linux_dirent {
46610 struct getdents_callback {
46611 struct linux_dirent __user * current_dir;
46612 struct linux_dirent __user * previous;
46613+ struct file * file;
46614 int count;
46615 int error;
46616 };
46617@@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
46618 buf->error = -EOVERFLOW;
46619 return -EOVERFLOW;
46620 }
46621+
46622+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46623+ return 0;
46624+
46625 dirent = buf->previous;
46626 if (dirent) {
46627 if (__put_user(offset, &dirent->d_off))
46628@@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
46629 buf.previous = NULL;
46630 buf.count = count;
46631 buf.error = 0;
46632+ buf.file = file;
46633
46634 error = vfs_readdir(file, filldir, &buf);
46635 if (error >= 0)
46636@@ -229,6 +242,7 @@ out:
46637 struct getdents_callback64 {
46638 struct linux_dirent64 __user * current_dir;
46639 struct linux_dirent64 __user * previous;
46640+ struct file *file;
46641 int count;
46642 int error;
46643 };
46644@@ -244,6 +258,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
46645 buf->error = -EINVAL; /* only used if we fail.. */
46646 if (reclen > buf->count)
46647 return -EINVAL;
46648+
46649+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46650+ return 0;
46651+
46652 dirent = buf->previous;
46653 if (dirent) {
46654 if (__put_user(offset, &dirent->d_off))
46655@@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
46656
46657 buf.current_dir = dirent;
46658 buf.previous = NULL;
46659+ buf.file = file;
46660 buf.count = count;
46661 buf.error = 0;
46662
46663@@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
46664 error = buf.error;
46665 lastdirent = buf.previous;
46666 if (lastdirent) {
46667- typeof(lastdirent->d_off) d_off = file->f_pos;
46668+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
46669 if (__put_user(d_off, &lastdirent->d_off))
46670 error = -EFAULT;
46671 else
46672diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
46673index 60c0804..d814f98 100644
46674--- a/fs/reiserfs/do_balan.c
46675+++ b/fs/reiserfs/do_balan.c
46676@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
46677 return;
46678 }
46679
46680- atomic_inc(&(fs_generation(tb->tb_sb)));
46681+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
46682 do_balance_starts(tb);
46683
46684 /* balance leaf returns 0 except if combining L R and S into
46685diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
46686index 7a99811..a7c96c4 100644
46687--- a/fs/reiserfs/procfs.c
46688+++ b/fs/reiserfs/procfs.c
46689@@ -113,7 +113,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
46690 "SMALL_TAILS " : "NO_TAILS ",
46691 replay_only(sb) ? "REPLAY_ONLY " : "",
46692 convert_reiserfs(sb) ? "CONV " : "",
46693- atomic_read(&r->s_generation_counter),
46694+ atomic_read_unchecked(&r->s_generation_counter),
46695 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
46696 SF(s_do_balance), SF(s_unneeded_left_neighbor),
46697 SF(s_good_search_by_key_reada), SF(s_bmaps),
46698diff --git a/fs/select.c b/fs/select.c
46699index d33418f..2a5345e 100644
46700--- a/fs/select.c
46701+++ b/fs/select.c
46702@@ -20,6 +20,7 @@
46703 #include <linux/module.h>
46704 #include <linux/slab.h>
46705 #include <linux/poll.h>
46706+#include <linux/security.h>
46707 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
46708 #include <linux/file.h>
46709 #include <linux/fdtable.h>
46710@@ -837,6 +838,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
46711 struct poll_list *walk = head;
46712 unsigned long todo = nfds;
46713
46714+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
46715 if (nfds > rlimit(RLIMIT_NOFILE))
46716 return -EINVAL;
46717
46718diff --git a/fs/seq_file.c b/fs/seq_file.c
46719index dba43c3..a99fb63 100644
46720--- a/fs/seq_file.c
46721+++ b/fs/seq_file.c
46722@@ -76,7 +76,8 @@ static int traverse(struct seq_file *m, loff_t offset)
46723 return 0;
46724 }
46725 if (!m->buf) {
46726- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46727+ m->size = PAGE_SIZE;
46728+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46729 if (!m->buf)
46730 return -ENOMEM;
46731 }
46732@@ -116,7 +117,8 @@ static int traverse(struct seq_file *m, loff_t offset)
46733 Eoverflow:
46734 m->op->stop(m, p);
46735 kfree(m->buf);
46736- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46737+ m->size <<= 1;
46738+ m->buf = kmalloc(m->size, GFP_KERNEL);
46739 return !m->buf ? -ENOMEM : -EAGAIN;
46740 }
46741
46742@@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
46743 m->version = file->f_version;
46744 /* grab buffer if we didn't have one */
46745 if (!m->buf) {
46746- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46747+ m->size = PAGE_SIZE;
46748+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46749 if (!m->buf)
46750 goto Enomem;
46751 }
46752@@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
46753 goto Fill;
46754 m->op->stop(m, p);
46755 kfree(m->buf);
46756- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46757+ m->size <<= 1;
46758+ m->buf = kmalloc(m->size, GFP_KERNEL);
46759 if (!m->buf)
46760 goto Enomem;
46761 m->count = 0;
46762@@ -549,7 +553,7 @@ static void single_stop(struct seq_file *p, void *v)
46763 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
46764 void *data)
46765 {
46766- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
46767+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
46768 int res = -ENOMEM;
46769
46770 if (op) {
46771diff --git a/fs/splice.c b/fs/splice.c
46772index fa2defa..8601650 100644
46773--- a/fs/splice.c
46774+++ b/fs/splice.c
46775@@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
46776 pipe_lock(pipe);
46777
46778 for (;;) {
46779- if (!pipe->readers) {
46780+ if (!atomic_read(&pipe->readers)) {
46781 send_sig(SIGPIPE, current, 0);
46782 if (!ret)
46783 ret = -EPIPE;
46784@@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
46785 do_wakeup = 0;
46786 }
46787
46788- pipe->waiting_writers++;
46789+ atomic_inc(&pipe->waiting_writers);
46790 pipe_wait(pipe);
46791- pipe->waiting_writers--;
46792+ atomic_dec(&pipe->waiting_writers);
46793 }
46794
46795 pipe_unlock(pipe);
46796@@ -560,7 +560,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
46797 old_fs = get_fs();
46798 set_fs(get_ds());
46799 /* The cast to a user pointer is valid due to the set_fs() */
46800- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
46801+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
46802 set_fs(old_fs);
46803
46804 return res;
46805@@ -575,7 +575,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
46806 old_fs = get_fs();
46807 set_fs(get_ds());
46808 /* The cast to a user pointer is valid due to the set_fs() */
46809- res = vfs_write(file, (const char __user *)buf, count, &pos);
46810+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
46811 set_fs(old_fs);
46812
46813 return res;
46814@@ -626,7 +626,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
46815 goto err;
46816
46817 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
46818- vec[i].iov_base = (void __user *) page_address(page);
46819+ vec[i].iov_base = (void __force_user *) page_address(page);
46820 vec[i].iov_len = this_len;
46821 spd.pages[i] = page;
46822 spd.nr_pages++;
46823@@ -846,10 +846,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
46824 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
46825 {
46826 while (!pipe->nrbufs) {
46827- if (!pipe->writers)
46828+ if (!atomic_read(&pipe->writers))
46829 return 0;
46830
46831- if (!pipe->waiting_writers && sd->num_spliced)
46832+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
46833 return 0;
46834
46835 if (sd->flags & SPLICE_F_NONBLOCK)
46836@@ -1182,7 +1182,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
46837 * out of the pipe right after the splice_to_pipe(). So set
46838 * PIPE_READERS appropriately.
46839 */
46840- pipe->readers = 1;
46841+ atomic_set(&pipe->readers, 1);
46842
46843 current->splice_pipe = pipe;
46844 }
46845@@ -1734,9 +1734,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
46846 ret = -ERESTARTSYS;
46847 break;
46848 }
46849- if (!pipe->writers)
46850+ if (!atomic_read(&pipe->writers))
46851 break;
46852- if (!pipe->waiting_writers) {
46853+ if (!atomic_read(&pipe->waiting_writers)) {
46854 if (flags & SPLICE_F_NONBLOCK) {
46855 ret = -EAGAIN;
46856 break;
46857@@ -1768,7 +1768,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
46858 pipe_lock(pipe);
46859
46860 while (pipe->nrbufs >= pipe->buffers) {
46861- if (!pipe->readers) {
46862+ if (!atomic_read(&pipe->readers)) {
46863 send_sig(SIGPIPE, current, 0);
46864 ret = -EPIPE;
46865 break;
46866@@ -1781,9 +1781,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
46867 ret = -ERESTARTSYS;
46868 break;
46869 }
46870- pipe->waiting_writers++;
46871+ atomic_inc(&pipe->waiting_writers);
46872 pipe_wait(pipe);
46873- pipe->waiting_writers--;
46874+ atomic_dec(&pipe->waiting_writers);
46875 }
46876
46877 pipe_unlock(pipe);
46878@@ -1819,14 +1819,14 @@ retry:
46879 pipe_double_lock(ipipe, opipe);
46880
46881 do {
46882- if (!opipe->readers) {
46883+ if (!atomic_read(&opipe->readers)) {
46884 send_sig(SIGPIPE, current, 0);
46885 if (!ret)
46886 ret = -EPIPE;
46887 break;
46888 }
46889
46890- if (!ipipe->nrbufs && !ipipe->writers)
46891+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
46892 break;
46893
46894 /*
46895@@ -1923,7 +1923,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
46896 pipe_double_lock(ipipe, opipe);
46897
46898 do {
46899- if (!opipe->readers) {
46900+ if (!atomic_read(&opipe->readers)) {
46901 send_sig(SIGPIPE, current, 0);
46902 if (!ret)
46903 ret = -EPIPE;
46904@@ -1968,7 +1968,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
46905 * return EAGAIN if we have the potential of some data in the
46906 * future, otherwise just return 0
46907 */
46908- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
46909+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
46910 ret = -EAGAIN;
46911
46912 pipe_unlock(ipipe);
46913diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
46914index 7fdf6a7..e6cd8ad 100644
46915--- a/fs/sysfs/dir.c
46916+++ b/fs/sysfs/dir.c
46917@@ -642,6 +642,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
46918 struct sysfs_dirent *sd;
46919 int rc;
46920
46921+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
46922+ const char *parent_name = parent_sd->s_name;
46923+
46924+ mode = S_IFDIR | S_IRWXU;
46925+
46926+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
46927+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
46928+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
46929+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
46930+ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
46931+#endif
46932+
46933 /* allocate */
46934 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
46935 if (!sd)
46936diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
46937index d4e6080b..0e58b99 100644
46938--- a/fs/sysfs/file.c
46939+++ b/fs/sysfs/file.c
46940@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
46941
46942 struct sysfs_open_dirent {
46943 atomic_t refcnt;
46944- atomic_t event;
46945+ atomic_unchecked_t event;
46946 wait_queue_head_t poll;
46947 struct list_head buffers; /* goes through sysfs_buffer.list */
46948 };
46949@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
46950 if (!sysfs_get_active(attr_sd))
46951 return -ENODEV;
46952
46953- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
46954+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
46955 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
46956
46957 sysfs_put_active(attr_sd);
46958@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
46959 return -ENOMEM;
46960
46961 atomic_set(&new_od->refcnt, 0);
46962- atomic_set(&new_od->event, 1);
46963+ atomic_set_unchecked(&new_od->event, 1);
46964 init_waitqueue_head(&new_od->poll);
46965 INIT_LIST_HEAD(&new_od->buffers);
46966 goto retry;
46967@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
46968
46969 sysfs_put_active(attr_sd);
46970
46971- if (buffer->event != atomic_read(&od->event))
46972+ if (buffer->event != atomic_read_unchecked(&od->event))
46973 goto trigger;
46974
46975 return DEFAULT_POLLMASK;
46976@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
46977
46978 od = sd->s_attr.open;
46979 if (od) {
46980- atomic_inc(&od->event);
46981+ atomic_inc_unchecked(&od->event);
46982 wake_up_interruptible(&od->poll);
46983 }
46984
46985diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
46986index a7ac78f..02158e1 100644
46987--- a/fs/sysfs/symlink.c
46988+++ b/fs/sysfs/symlink.c
46989@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
46990
46991 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
46992 {
46993- char *page = nd_get_link(nd);
46994+ const char *page = nd_get_link(nd);
46995 if (!IS_ERR(page))
46996 free_page((unsigned long)page);
46997 }
46998diff --git a/fs/udf/misc.c b/fs/udf/misc.c
46999index c175b4d..8f36a16 100644
47000--- a/fs/udf/misc.c
47001+++ b/fs/udf/misc.c
47002@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
47003
47004 u8 udf_tag_checksum(const struct tag *t)
47005 {
47006- u8 *data = (u8 *)t;
47007+ const u8 *data = (const u8 *)t;
47008 u8 checksum = 0;
47009 int i;
47010 for (i = 0; i < sizeof(struct tag); ++i)
47011diff --git a/fs/utimes.c b/fs/utimes.c
47012index ba653f3..06ea4b1 100644
47013--- a/fs/utimes.c
47014+++ b/fs/utimes.c
47015@@ -1,6 +1,7 @@
47016 #include <linux/compiler.h>
47017 #include <linux/file.h>
47018 #include <linux/fs.h>
47019+#include <linux/security.h>
47020 #include <linux/linkage.h>
47021 #include <linux/mount.h>
47022 #include <linux/namei.h>
47023@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
47024 goto mnt_drop_write_and_out;
47025 }
47026 }
47027+
47028+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
47029+ error = -EACCES;
47030+ goto mnt_drop_write_and_out;
47031+ }
47032+
47033 mutex_lock(&inode->i_mutex);
47034 error = notify_change(path->dentry, &newattrs);
47035 mutex_unlock(&inode->i_mutex);
47036diff --git a/fs/xattr.c b/fs/xattr.c
47037index 67583de..c5aad14 100644
47038--- a/fs/xattr.c
47039+++ b/fs/xattr.c
47040@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
47041 * Extended attribute SET operations
47042 */
47043 static long
47044-setxattr(struct dentry *d, const char __user *name, const void __user *value,
47045+setxattr(struct path *path, const char __user *name, const void __user *value,
47046 size_t size, int flags)
47047 {
47048 int error;
47049@@ -339,7 +339,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
47050 return PTR_ERR(kvalue);
47051 }
47052
47053- error = vfs_setxattr(d, kname, kvalue, size, flags);
47054+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
47055+ error = -EACCES;
47056+ goto out;
47057+ }
47058+
47059+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
47060+out:
47061 kfree(kvalue);
47062 return error;
47063 }
47064@@ -356,7 +362,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
47065 return error;
47066 error = mnt_want_write(path.mnt);
47067 if (!error) {
47068- error = setxattr(path.dentry, name, value, size, flags);
47069+ error = setxattr(&path, name, value, size, flags);
47070 mnt_drop_write(path.mnt);
47071 }
47072 path_put(&path);
47073@@ -375,7 +381,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
47074 return error;
47075 error = mnt_want_write(path.mnt);
47076 if (!error) {
47077- error = setxattr(path.dentry, name, value, size, flags);
47078+ error = setxattr(&path, name, value, size, flags);
47079 mnt_drop_write(path.mnt);
47080 }
47081 path_put(&path);
47082@@ -386,17 +392,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
47083 const void __user *,value, size_t, size, int, flags)
47084 {
47085 struct file *f;
47086- struct dentry *dentry;
47087 int error = -EBADF;
47088
47089 f = fget(fd);
47090 if (!f)
47091 return error;
47092- dentry = f->f_path.dentry;
47093- audit_inode(NULL, dentry);
47094+ audit_inode(NULL, f->f_path.dentry);
47095 error = mnt_want_write_file(f);
47096 if (!error) {
47097- error = setxattr(dentry, name, value, size, flags);
47098+ error = setxattr(&f->f_path, name, value, size, flags);
47099 mnt_drop_write(f->f_path.mnt);
47100 }
47101 fput(f);
47102diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
47103index 8d5a506..7f62712 100644
47104--- a/fs/xattr_acl.c
47105+++ b/fs/xattr_acl.c
47106@@ -17,8 +17,8 @@
47107 struct posix_acl *
47108 posix_acl_from_xattr(const void *value, size_t size)
47109 {
47110- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
47111- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
47112+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
47113+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
47114 int count;
47115 struct posix_acl *acl;
47116 struct posix_acl_entry *acl_e;
47117diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
47118index d0ab788..827999b 100644
47119--- a/fs/xfs/xfs_bmap.c
47120+++ b/fs/xfs/xfs_bmap.c
47121@@ -190,7 +190,7 @@ xfs_bmap_validate_ret(
47122 int nmap,
47123 int ret_nmap);
47124 #else
47125-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
47126+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
47127 #endif /* DEBUG */
47128
47129 STATIC int
47130diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
47131index 79d05e8..e3e5861 100644
47132--- a/fs/xfs/xfs_dir2_sf.c
47133+++ b/fs/xfs/xfs_dir2_sf.c
47134@@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
47135 }
47136
47137 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
47138- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
47139+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
47140+ char name[sfep->namelen];
47141+ memcpy(name, sfep->name, sfep->namelen);
47142+ if (filldir(dirent, name, sfep->namelen,
47143+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
47144+ *offset = off & 0x7fffffff;
47145+ return 0;
47146+ }
47147+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
47148 off & 0x7fffffff, ino, DT_UNKNOWN)) {
47149 *offset = off & 0x7fffffff;
47150 return 0;
47151diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
47152index d99a905..9f88202 100644
47153--- a/fs/xfs/xfs_ioctl.c
47154+++ b/fs/xfs/xfs_ioctl.c
47155@@ -128,7 +128,7 @@ xfs_find_handle(
47156 }
47157
47158 error = -EFAULT;
47159- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
47160+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
47161 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
47162 goto out_put;
47163
47164diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
47165index 23ce927..e274cc1 100644
47166--- a/fs/xfs/xfs_iops.c
47167+++ b/fs/xfs/xfs_iops.c
47168@@ -447,7 +447,7 @@ xfs_vn_put_link(
47169 struct nameidata *nd,
47170 void *p)
47171 {
47172- char *s = nd_get_link(nd);
47173+ const char *s = nd_get_link(nd);
47174
47175 if (!IS_ERR(s))
47176 kfree(s);
47177diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
47178index ce9268a..ee98d0b 100644
47179--- a/fs/xfs/xfs_vnodeops.c
47180+++ b/fs/xfs/xfs_vnodeops.c
47181@@ -131,7 +131,8 @@ xfs_readlink(
47182 __func__, (unsigned long long) ip->i_ino,
47183 (long long) pathlen);
47184 ASSERT(0);
47185- return XFS_ERROR(EFSCORRUPTED);
47186+ error = XFS_ERROR(EFSCORRUPTED);
47187+ goto out;
47188 }
47189
47190
47191diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
47192new file mode 100644
47193index 0000000..dfd3d34
47194--- /dev/null
47195+++ b/grsecurity/Kconfig
47196@@ -0,0 +1,1069 @@
47197+#
47198+# grecurity configuration
47199+#
47200+
47201+menu "Grsecurity"
47202+
47203+config GRKERNSEC
47204+ bool "Grsecurity"
47205+ select CRYPTO
47206+ select CRYPTO_SHA256
47207+ help
47208+ If you say Y here, you will be able to configure many features
47209+ that will enhance the security of your system. It is highly
47210+ recommended that you say Y here and read through the help
47211+ for each option so that you fully understand the features and
47212+ can evaluate their usefulness for your machine.
47213+
47214+choice
47215+ prompt "Security Level"
47216+ depends on GRKERNSEC
47217+ default GRKERNSEC_CUSTOM
47218+
47219+config GRKERNSEC_LOW
47220+ bool "Low"
47221+ select GRKERNSEC_LINK
47222+ select GRKERNSEC_FIFO
47223+ select GRKERNSEC_RANDNET
47224+ select GRKERNSEC_DMESG
47225+ select GRKERNSEC_CHROOT
47226+ select GRKERNSEC_CHROOT_CHDIR
47227+
47228+ help
47229+ If you choose this option, several of the grsecurity options will
47230+ be enabled that will give you greater protection against a number
47231+ of attacks, while assuring that none of your software will have any
47232+ conflicts with the additional security measures. If you run a lot
47233+ of unusual software, or you are having problems with the higher
47234+ security levels, you should say Y here. With this option, the
47235+ following features are enabled:
47236+
47237+ - Linking restrictions
47238+ - FIFO restrictions
47239+ - Restricted dmesg
47240+ - Enforced chdir("/") on chroot
47241+ - Runtime module disabling
47242+
47243+config GRKERNSEC_MEDIUM
47244+ bool "Medium"
47245+ select PAX
47246+ select PAX_EI_PAX
47247+ select PAX_PT_PAX_FLAGS
47248+ select PAX_HAVE_ACL_FLAGS
47249+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
47250+ select GRKERNSEC_CHROOT
47251+ select GRKERNSEC_CHROOT_SYSCTL
47252+ select GRKERNSEC_LINK
47253+ select GRKERNSEC_FIFO
47254+ select GRKERNSEC_DMESG
47255+ select GRKERNSEC_RANDNET
47256+ select GRKERNSEC_FORKFAIL
47257+ select GRKERNSEC_TIME
47258+ select GRKERNSEC_SIGNAL
47259+ select GRKERNSEC_CHROOT
47260+ select GRKERNSEC_CHROOT_UNIX
47261+ select GRKERNSEC_CHROOT_MOUNT
47262+ select GRKERNSEC_CHROOT_PIVOT
47263+ select GRKERNSEC_CHROOT_DOUBLE
47264+ select GRKERNSEC_CHROOT_CHDIR
47265+ select GRKERNSEC_CHROOT_MKNOD
47266+ select GRKERNSEC_PROC
47267+ select GRKERNSEC_PROC_USERGROUP
47268+ select PAX_RANDUSTACK
47269+ select PAX_ASLR
47270+ select PAX_RANDMMAP
47271+ select PAX_REFCOUNT if (X86 || SPARC64)
47272+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
47273+
47274+ help
47275+ If you say Y here, several features in addition to those included
47276+ in the low additional security level will be enabled. These
47277+ features provide even more security to your system, though in rare
47278+ cases they may be incompatible with very old or poorly written
47279+ software. If you enable this option, make sure that your auth
47280+ service (identd) is running as gid 1001. With this option,
47281+ the following features (in addition to those provided in the
47282+ low additional security level) will be enabled:
47283+
47284+ - Failed fork logging
47285+ - Time change logging
47286+ - Signal logging
47287+ - Deny mounts in chroot
47288+ - Deny double chrooting
47289+ - Deny sysctl writes in chroot
47290+ - Deny mknod in chroot
47291+ - Deny access to abstract AF_UNIX sockets out of chroot
47292+ - Deny pivot_root in chroot
47293+ - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
47294+ - /proc restrictions with special GID set to 10 (usually wheel)
47295+ - Address Space Layout Randomization (ASLR)
47296+ - Prevent exploitation of most refcount overflows
47297+ - Bounds checking of copying between the kernel and userland
47298+
47299+config GRKERNSEC_HIGH
47300+ bool "High"
47301+ select GRKERNSEC_LINK
47302+ select GRKERNSEC_FIFO
47303+ select GRKERNSEC_DMESG
47304+ select GRKERNSEC_FORKFAIL
47305+ select GRKERNSEC_TIME
47306+ select GRKERNSEC_SIGNAL
47307+ select GRKERNSEC_CHROOT
47308+ select GRKERNSEC_CHROOT_SHMAT
47309+ select GRKERNSEC_CHROOT_UNIX
47310+ select GRKERNSEC_CHROOT_MOUNT
47311+ select GRKERNSEC_CHROOT_FCHDIR
47312+ select GRKERNSEC_CHROOT_PIVOT
47313+ select GRKERNSEC_CHROOT_DOUBLE
47314+ select GRKERNSEC_CHROOT_CHDIR
47315+ select GRKERNSEC_CHROOT_MKNOD
47316+ select GRKERNSEC_CHROOT_CAPS
47317+ select GRKERNSEC_CHROOT_SYSCTL
47318+ select GRKERNSEC_CHROOT_FINDTASK
47319+ select GRKERNSEC_SYSFS_RESTRICT
47320+ select GRKERNSEC_PROC
47321+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
47322+ select GRKERNSEC_HIDESYM
47323+ select GRKERNSEC_BRUTE
47324+ select GRKERNSEC_PROC_USERGROUP
47325+ select GRKERNSEC_KMEM
47326+ select GRKERNSEC_RESLOG
47327+ select GRKERNSEC_RANDNET
47328+ select GRKERNSEC_PROC_ADD
47329+ select GRKERNSEC_CHROOT_CHMOD
47330+ select GRKERNSEC_CHROOT_NICE
47331+ select GRKERNSEC_SETXID
47332+ select GRKERNSEC_AUDIT_MOUNT
47333+ select GRKERNSEC_MODHARDEN if (MODULES)
47334+ select GRKERNSEC_HARDEN_PTRACE
47335+ select GRKERNSEC_PTRACE_READEXEC
47336+ select GRKERNSEC_VM86 if (X86_32)
47337+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
47338+ select PAX
47339+ select PAX_RANDUSTACK
47340+ select PAX_ASLR
47341+ select PAX_RANDMMAP
47342+ select PAX_NOEXEC
47343+ select PAX_MPROTECT
47344+ select PAX_EI_PAX
47345+ select PAX_PT_PAX_FLAGS
47346+ select PAX_HAVE_ACL_FLAGS
47347+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
47348+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
47349+ select PAX_RANDKSTACK if (X86_TSC && X86)
47350+ select PAX_SEGMEXEC if (X86_32)
47351+ select PAX_PAGEEXEC
47352+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
47353+ select PAX_EMUTRAMP if (PARISC)
47354+ select PAX_EMUSIGRT if (PARISC)
47355+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
47356+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
47357+ select PAX_REFCOUNT if (X86 || SPARC64)
47358+ select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
47359+ help
47360+ If you say Y here, many of the features of grsecurity will be
47361+ enabled, which will protect you against many kinds of attacks
47362+ against your system. The heightened security comes at a cost
47363+ of an increased chance of incompatibilities with rare software
47364+ on your machine. Since this security level enables PaX, you should
47365+ view <http://pax.grsecurity.net> and read about the PaX
47366+ project. While you are there, download chpax and run it on
47367+ binaries that cause problems with PaX. Also remember that
47368+ since the /proc restrictions are enabled, you must run your
47369+ identd as gid 1001. This security level enables the following
47370+ features in addition to those listed in the low and medium
47371+ security levels:
47372+
47373+ - Additional /proc restrictions
47374+ - Chmod restrictions in chroot
47375+ - No signals, ptrace, or viewing of processes outside of chroot
47376+ - Capability restrictions in chroot
47377+ - Deny fchdir out of chroot
47378+ - Priority restrictions in chroot
47379+ - Segmentation-based implementation of PaX
47380+ - Mprotect restrictions
47381+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
47382+ - Kernel stack randomization
47383+ - Mount/unmount/remount logging
47384+ - Kernel symbol hiding
47385+ - Hardening of module auto-loading
47386+ - Ptrace restrictions
47387+ - Restricted vm86 mode
47388+ - Restricted sysfs/debugfs
47389+ - Active kernel exploit response
47390+
47391+config GRKERNSEC_CUSTOM
47392+ bool "Custom"
47393+ help
47394+ If you say Y here, you will be able to configure every grsecurity
47395+ option, which allows you to enable many more features that aren't
47396+ covered in the basic security levels. These additional features
47397+ include TPE, socket restrictions, and the sysctl system for
47398+ grsecurity. It is advised that you read through the help for
47399+ each option to determine its usefulness in your situation.
47400+
47401+endchoice
47402+
47403+menu "Address Space Protection"
47404+depends on GRKERNSEC
47405+
47406+config GRKERNSEC_KMEM
47407+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
47408+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
47409+ help
47410+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
47411+ be written to or read from to modify or leak the contents of the running
47412+ kernel. /dev/port will also not be allowed to be opened. If you have module
47413+ support disabled, enabling this will close up four ways that are
47414+ currently used to insert malicious code into the running kernel.
47415+ Even with all these features enabled, we still highly recommend that
47416+ you use the RBAC system, as it is still possible for an attacker to
47417+ modify the running kernel through privileged I/O granted by ioperm/iopl.
47418+ If you are not using XFree86, you may be able to stop this additional
47419+ case by enabling the 'Disable privileged I/O' option. Though nothing
47420+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
47421+ but only to video memory, which is the only writing we allow in this
47422+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
47423+ not be allowed to mprotect it with PROT_WRITE later.
47424+ It is highly recommended that you say Y here if you meet all the
47425+ conditions above.
47426+
47427+config GRKERNSEC_VM86
47428+ bool "Restrict VM86 mode"
47429+ depends on X86_32
47430+
47431+ help
47432+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
47433+ make use of a special execution mode on 32bit x86 processors called
47434+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
47435+ video cards and will still work with this option enabled. The purpose
47436+ of the option is to prevent exploitation of emulation errors in
47437+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
47438+ Nearly all users should be able to enable this option.
47439+
47440+config GRKERNSEC_IO
47441+ bool "Disable privileged I/O"
47442+ depends on X86
47443+ select RTC_CLASS
47444+ select RTC_INTF_DEV
47445+ select RTC_DRV_CMOS
47446+
47447+ help
47448+ If you say Y here, all ioperm and iopl calls will return an error.
47449+ Ioperm and iopl can be used to modify the running kernel.
47450+ Unfortunately, some programs need this access to operate properly,
47451+ the most notable of which are XFree86 and hwclock. hwclock can be
47452+ remedied by having RTC support in the kernel, so real-time
47453+ clock support is enabled if this option is enabled, to ensure
47454+ that hwclock operates correctly. XFree86 still will not
47455+ operate correctly with this option enabled, so DO NOT CHOOSE Y
47456+ IF YOU USE XFree86. If you use XFree86 and you still want to
47457+ protect your kernel against modification, use the RBAC system.
47458+
47459+config GRKERNSEC_PROC_MEMMAP
47460+ bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
47461+ default y if (PAX_NOEXEC || PAX_ASLR)
47462+ depends on PAX_NOEXEC || PAX_ASLR
47463+ help
47464+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
47465+ give no information about the addresses of its mappings if
47466+ PaX features that rely on random addresses are enabled on the task.
47467+ If you use PaX it is greatly recommended that you say Y here as it
47468+ closes up a hole that makes the full ASLR useless for suid
47469+ binaries.
47470+
47471+config GRKERNSEC_BRUTE
47472+ bool "Deter exploit bruteforcing"
47473+ help
47474+ If you say Y here, attempts to bruteforce exploits against forking
47475+ daemons such as apache or sshd, as well as against suid/sgid binaries
47476+ will be deterred. When a child of a forking daemon is killed by PaX
47477+ or crashes due to an illegal instruction or other suspicious signal,
47478+ the parent process will be delayed 30 seconds upon every subsequent
47479+ fork until the administrator is able to assess the situation and
47480+ restart the daemon.
47481+ In the suid/sgid case, the attempt is logged, the user has all their
47482+ processes terminated, and they are prevented from executing any further
47483+ processes for 15 minutes.
47484+ It is recommended that you also enable signal logging in the auditing
47485+ section so that logs are generated when a process triggers a suspicious
47486+ signal.
47487+ If the sysctl option is enabled, a sysctl option with name
47488+ "deter_bruteforce" is created.
47489+
47490+
47491+config GRKERNSEC_MODHARDEN
47492+ bool "Harden module auto-loading"
47493+ depends on MODULES
47494+ help
47495+ If you say Y here, module auto-loading in response to use of some
47496+ feature implemented by an unloaded module will be restricted to
47497+ root users. Enabling this option helps defend against attacks
47498+ by unprivileged users who abuse the auto-loading behavior to
47499+ cause a vulnerable module to load that is then exploited.
47500+
47501+ If this option prevents a legitimate use of auto-loading for a
47502+ non-root user, the administrator can execute modprobe manually
47503+ with the exact name of the module mentioned in the alert log.
47504+ Alternatively, the administrator can add the module to the list
47505+ of modules loaded at boot by modifying init scripts.
47506+
47507+ Modification of init scripts will most likely be needed on
47508+ Ubuntu servers with encrypted home directory support enabled,
47509+ as the first non-root user logging in will cause the ecb(aes),
47510+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
47511+
47512+config GRKERNSEC_HIDESYM
47513+ bool "Hide kernel symbols"
47514+ help
47515+ If you say Y here, getting information on loaded modules, and
47516+ displaying all kernel symbols through a syscall will be restricted
47517+ to users with CAP_SYS_MODULE. For software compatibility reasons,
47518+ /proc/kallsyms will be restricted to the root user. The RBAC
47519+ system can hide that entry even from root.
47520+
47521+ This option also prevents leaking of kernel addresses through
47522+ several /proc entries.
47523+
47524+ Note that this option is only effective provided the following
47525+ conditions are met:
47526+ 1) The kernel using grsecurity is not precompiled by some distribution
47527+ 2) You have also enabled GRKERNSEC_DMESG
47528+ 3) You are using the RBAC system and hiding other files such as your
47529+ kernel image and System.map. Alternatively, enabling this option
47530+ causes the permissions on /boot, /lib/modules, and the kernel
47531+ source directory to change at compile time to prevent
47532+ reading by non-root users.
47533+ If the above conditions are met, this option will aid in providing a
47534+ useful protection against local kernel exploitation of overflows
47535+ and arbitrary read/write vulnerabilities.
47536+
47537+config GRKERNSEC_KERN_LOCKOUT
47538+ bool "Active kernel exploit response"
47539+ depends on X86 || ARM || PPC || SPARC
47540+ help
47541+ If you say Y here, when a PaX alert is triggered due to suspicious
47542+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
47543+ or an OOPs occurs due to bad memory accesses, instead of just
47544+ terminating the offending process (and potentially allowing
47545+ a subsequent exploit from the same user), we will take one of two
47546+ actions:
47547+ If the user was root, we will panic the system
47548+ If the user was non-root, we will log the attempt, terminate
47549+ all processes owned by the user, then prevent them from creating
47550+ any new processes until the system is restarted
47551+ This deters repeated kernel exploitation/bruteforcing attempts
47552+ and is useful for later forensics.
47553+
47554+endmenu
47555+menu "Role Based Access Control Options"
47556+depends on GRKERNSEC
47557+
47558+config GRKERNSEC_RBAC_DEBUG
47559+ bool
47560+
47561+config GRKERNSEC_NO_RBAC
47562+ bool "Disable RBAC system"
47563+ help
47564+ If you say Y here, the /dev/grsec device will be removed from the kernel,
47565+ preventing the RBAC system from being enabled. You should only say Y
47566+ here if you have no intention of using the RBAC system, so as to prevent
47567+ an attacker with root access from misusing the RBAC system to hide files
47568+ and processes when loadable module support and /dev/[k]mem have been
47569+ locked down.
47570+
47571+config GRKERNSEC_ACL_HIDEKERN
47572+ bool "Hide kernel processes"
47573+ help
47574+ If you say Y here, all kernel threads will be hidden to all
47575+ processes but those whose subject has the "view hidden processes"
47576+ flag.
47577+
47578+config GRKERNSEC_ACL_MAXTRIES
47579+ int "Maximum tries before password lockout"
47580+ default 3
47581+ help
47582+ This option enforces the maximum number of times a user can attempt
47583+ to authorize themselves with the grsecurity RBAC system before being
47584+ denied the ability to attempt authorization again for a specified time.
47585+ The lower the number, the harder it will be to brute-force a password.
47586+
47587+config GRKERNSEC_ACL_TIMEOUT
47588+ int "Time to wait after max password tries, in seconds"
47589+ default 30
47590+ help
47591+ This option specifies the time the user must wait after attempting to
47592+ authorize to the RBAC system with the maximum number of invalid
47593+ passwords. The higher the number, the harder it will be to brute-force
47594+ a password.
47595+
47596+endmenu
47597+menu "Filesystem Protections"
47598+depends on GRKERNSEC
47599+
47600+config GRKERNSEC_PROC
47601+ bool "Proc restrictions"
47602+ help
47603+ If you say Y here, the permissions of the /proc filesystem
47604+ will be altered to enhance system security and privacy. You MUST
47605+ choose either a user only restriction or a user and group restriction.
47606+ Depending upon the option you choose, you can either restrict users to
47607+ see only the processes they themselves run, or choose a group that can
47608+ view all processes and files normally restricted to root if you choose
47609+ the "restrict to user only" option. NOTE: If you're running identd as
47610+ a non-root user, you will have to run it as the group you specify here.
47611+
47612+config GRKERNSEC_PROC_USER
47613+ bool "Restrict /proc to user only"
47614+ depends on GRKERNSEC_PROC
47615+ help
47616+ If you say Y here, non-root users will only be able to view their own
47617+ processes, and restricts them from viewing network-related information,
47618+ and viewing kernel symbol and module information.
47619+
47620+config GRKERNSEC_PROC_USERGROUP
47621+ bool "Allow special group"
47622+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
47623+ help
47624+ If you say Y here, you will be able to select a group that will be
47625+ able to view all processes and network-related information. If you've
47626+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
47627+ remain hidden. This option is useful if you want to run identd as
47628+ a non-root user.
47629+
47630+config GRKERNSEC_PROC_GID
47631+ int "GID for special group"
47632+ depends on GRKERNSEC_PROC_USERGROUP
47633+ default 1001
47634+
47635+config GRKERNSEC_PROC_ADD
47636+ bool "Additional restrictions"
47637+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
47638+ help
47639+ If you say Y here, additional restrictions will be placed on
47640+ /proc that keep normal users from viewing device information and
47641+ slabinfo information that could be useful for exploits.
47642+
47643+config GRKERNSEC_LINK
47644+ bool "Linking restrictions"
47645+ help
47646+ If you say Y here, /tmp race exploits will be prevented, since users
47647+ will no longer be able to follow symlinks owned by other users in
47648+ world-writable +t directories (e.g. /tmp), unless the owner of the
47649+ symlink is the owner of the directory. users will also not be
47650+ able to hardlink to files they do not own. If the sysctl option is
47651+ enabled, a sysctl option with name "linking_restrictions" is created.
47652+
47653+config GRKERNSEC_FIFO
47654+ bool "FIFO restrictions"
47655+ help
47656+ If you say Y here, users will not be able to write to FIFOs they don't
47657+ own in world-writable +t directories (e.g. /tmp), unless the owner of
47658+ the FIFO is the same owner of the directory it's held in. If the sysctl
47659+ option is enabled, a sysctl option with name "fifo_restrictions" is
47660+ created.
47661+
47662+config GRKERNSEC_SYSFS_RESTRICT
47663+ bool "Sysfs/debugfs restriction"
47664+ depends on SYSFS
47665+ help
47666+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
47667+ any filesystem normally mounted under it (e.g. debugfs) will be
47668+ mostly accessible only by root. These filesystems generally provide access
47669+ to hardware and debug information that isn't appropriate for unprivileged
47670+ users of the system. Sysfs and debugfs have also become a large source
47671+ of new vulnerabilities, ranging from infoleaks to local compromise.
47672+ There has been very little oversight with an eye toward security involved
47673+ in adding new exporters of information to these filesystems, so their
47674+ use is discouraged.
47675+ For reasons of compatibility, a few directories have been whitelisted
47676+ for access by non-root users:
47677+ /sys/fs/selinux
47678+ /sys/fs/fuse
47679+ /sys/devices/system/cpu
47680+
47681+config GRKERNSEC_ROFS
47682+ bool "Runtime read-only mount protection"
47683+ help
47684+ If you say Y here, a sysctl option with name "romount_protect" will
47685+ be created. By setting this option to 1 at runtime, filesystems
47686+ will be protected in the following ways:
47687+ * No new writable mounts will be allowed
47688+ * Existing read-only mounts won't be able to be remounted read/write
47689+ * Write operations will be denied on all block devices
47690+ This option acts independently of grsec_lock: once it is set to 1,
47691+ it cannot be turned off. Therefore, please be mindful of the resulting
47692+ behavior if this option is enabled in an init script on a read-only
47693+ filesystem. This feature is mainly intended for secure embedded systems.
47694+
47695+config GRKERNSEC_CHROOT
47696+ bool "Chroot jail restrictions"
47697+ help
47698+ If you say Y here, you will be able to choose several options that will
47699+ make breaking out of a chrooted jail much more difficult. If you
47700+ encounter no software incompatibilities with the following options, it
47701+ is recommended that you enable each one.
47702+
47703+config GRKERNSEC_CHROOT_MOUNT
47704+ bool "Deny mounts"
47705+ depends on GRKERNSEC_CHROOT
47706+ help
47707+ If you say Y here, processes inside a chroot will not be able to
47708+ mount or remount filesystems. If the sysctl option is enabled, a
47709+ sysctl option with name "chroot_deny_mount" is created.
47710+
47711+config GRKERNSEC_CHROOT_DOUBLE
47712+ bool "Deny double-chroots"
47713+ depends on GRKERNSEC_CHROOT
47714+ help
47715+ If you say Y here, processes inside a chroot will not be able to chroot
47716+ again outside the chroot. This is a widely used method of breaking
47717+ out of a chroot jail and should not be allowed. If the sysctl
47718+ option is enabled, a sysctl option with name
47719+ "chroot_deny_chroot" is created.
47720+
47721+config GRKERNSEC_CHROOT_PIVOT
47722+ bool "Deny pivot_root in chroot"
47723+ depends on GRKERNSEC_CHROOT
47724+ help
47725+ If you say Y here, processes inside a chroot will not be able to use
47726+ a function called pivot_root() that was introduced in Linux 2.3.41. It
47727+ works similar to chroot in that it changes the root filesystem. This
47728+ function could be misused in a chrooted process to attempt to break out
47729+ of the chroot, and therefore should not be allowed. If the sysctl
47730+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
47731+ created.
47732+
47733+config GRKERNSEC_CHROOT_CHDIR
47734+ bool "Enforce chdir(\"/\") on all chroots"
47735+ depends on GRKERNSEC_CHROOT
47736+ help
47737+ If you say Y here, the current working directory of all newly-chrooted
47738+ applications will be set to the the root directory of the chroot.
47739+ The man page on chroot(2) states:
47740+ Note that this call does not change the current working
47741+ directory, so that `.' can be outside the tree rooted at
47742+ `/'. In particular, the super-user can escape from a
47743+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
47744+
47745+ It is recommended that you say Y here, since it's not known to break
47746+ any software. If the sysctl option is enabled, a sysctl option with
47747+ name "chroot_enforce_chdir" is created.
47748+
47749+config GRKERNSEC_CHROOT_CHMOD
47750+ bool "Deny (f)chmod +s"
47751+ depends on GRKERNSEC_CHROOT
47752+ help
47753+ If you say Y here, processes inside a chroot will not be able to chmod
47754+ or fchmod files to make them have suid or sgid bits. This protects
47755+ against another published method of breaking a chroot. If the sysctl
47756+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
47757+ created.
47758+
47759+config GRKERNSEC_CHROOT_FCHDIR
47760+ bool "Deny fchdir out of chroot"
47761+ depends on GRKERNSEC_CHROOT
47762+ help
47763+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
47764+ to a file descriptor of the chrooting process that points to a directory
47765+ outside the filesystem will be stopped. If the sysctl option
47766+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
47767+
47768+config GRKERNSEC_CHROOT_MKNOD
47769+ bool "Deny mknod"
47770+ depends on GRKERNSEC_CHROOT
47771+ help
47772+ If you say Y here, processes inside a chroot will not be allowed to
47773+ mknod. The problem with using mknod inside a chroot is that it
47774+ would allow an attacker to create a device entry that is the same
47775+ as one on the physical root of your system, which could range from
47776+ anything from the console device to a device for your harddrive (which
47777+ they could then use to wipe the drive or steal data). It is recommended
47778+ that you say Y here, unless you run into software incompatibilities.
47779+ If the sysctl option is enabled, a sysctl option with name
47780+ "chroot_deny_mknod" is created.
47781+
47782+config GRKERNSEC_CHROOT_SHMAT
47783+ bool "Deny shmat() out of chroot"
47784+ depends on GRKERNSEC_CHROOT
47785+ help
47786+ If you say Y here, processes inside a chroot will not be able to attach
47787+ to shared memory segments that were created outside of the chroot jail.
47788+ It is recommended that you say Y here. If the sysctl option is enabled,
47789+ a sysctl option with name "chroot_deny_shmat" is created.
47790+
47791+config GRKERNSEC_CHROOT_UNIX
47792+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
47793+ depends on GRKERNSEC_CHROOT
47794+ help
47795+ If you say Y here, processes inside a chroot will not be able to
47796+ connect to abstract (meaning not belonging to a filesystem) Unix
47797+ domain sockets that were bound outside of a chroot. It is recommended
47798+ that you say Y here. If the sysctl option is enabled, a sysctl option
47799+ with name "chroot_deny_unix" is created.
47800+
47801+config GRKERNSEC_CHROOT_FINDTASK
47802+ bool "Protect outside processes"
47803+ depends on GRKERNSEC_CHROOT
47804+ help
47805+ If you say Y here, processes inside a chroot will not be able to
47806+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
47807+ getsid, or view any process outside of the chroot. If the sysctl
47808+ option is enabled, a sysctl option with name "chroot_findtask" is
47809+ created.
47810+
47811+config GRKERNSEC_CHROOT_NICE
47812+ bool "Restrict priority changes"
47813+ depends on GRKERNSEC_CHROOT
47814+ help
47815+ If you say Y here, processes inside a chroot will not be able to raise
47816+ the priority of processes in the chroot, or alter the priority of
47817+ processes outside the chroot. This provides more security than simply
47818+ removing CAP_SYS_NICE from the process' capability set. If the
47819+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
47820+ is created.
47821+
47822+config GRKERNSEC_CHROOT_SYSCTL
47823+ bool "Deny sysctl writes"
47824+ depends on GRKERNSEC_CHROOT
47825+ help
47826+ If you say Y here, an attacker in a chroot will not be able to
47827+ write to sysctl entries, either by sysctl(2) or through a /proc
47828+ interface. It is strongly recommended that you say Y here. If the
47829+ sysctl option is enabled, a sysctl option with name
47830+ "chroot_deny_sysctl" is created.
47831+
47832+config GRKERNSEC_CHROOT_CAPS
47833+ bool "Capability restrictions"
47834+ depends on GRKERNSEC_CHROOT
47835+ help
47836+ If you say Y here, the capabilities on all processes within a
47837+ chroot jail will be lowered to stop module insertion, raw i/o,
47838+ system and net admin tasks, rebooting the system, modifying immutable
47839+ files, modifying IPC owned by another, and changing the system time.
47840+ This is left an option because it can break some apps. Disable this
47841+ if your chrooted apps are having problems performing those kinds of
47842+ tasks. If the sysctl option is enabled, a sysctl option with
47843+ name "chroot_caps" is created.
47844+
47845+endmenu
47846+menu "Kernel Auditing"
47847+depends on GRKERNSEC
47848+
47849+config GRKERNSEC_AUDIT_GROUP
47850+ bool "Single group for auditing"
47851+ help
47852+ If you say Y here, the exec, chdir, and (un)mount logging features
47853+ will only operate on a group you specify. This option is recommended
47854+ if you only want to watch certain users instead of having a large
47855+ amount of logs from the entire system. If the sysctl option is enabled,
47856+ a sysctl option with name "audit_group" is created.
47857+
47858+config GRKERNSEC_AUDIT_GID
47859+ int "GID for auditing"
47860+ depends on GRKERNSEC_AUDIT_GROUP
47861+ default 1007
47862+
47863+config GRKERNSEC_EXECLOG
47864+ bool "Exec logging"
47865+ help
47866+ If you say Y here, all execve() calls will be logged (since the
47867+ other exec*() calls are frontends to execve(), all execution
47868+ will be logged). Useful for shell-servers that like to keep track
47869+ of their users. If the sysctl option is enabled, a sysctl option with
47870+ name "exec_logging" is created.
47871+ WARNING: This option when enabled will produce a LOT of logs, especially
47872+ on an active system.
47873+
47874+config GRKERNSEC_RESLOG
47875+ bool "Resource logging"
47876+ help
47877+ If you say Y here, all attempts to overstep resource limits will
47878+ be logged with the resource name, the requested size, and the current
47879+ limit. It is highly recommended that you say Y here. If the sysctl
47880+ option is enabled, a sysctl option with name "resource_logging" is
47881+ created. If the RBAC system is enabled, the sysctl value is ignored.
47882+
47883+config GRKERNSEC_CHROOT_EXECLOG
47884+ bool "Log execs within chroot"
47885+ help
47886+ If you say Y here, all executions inside a chroot jail will be logged
47887+ to syslog. This can cause a large amount of logs if certain
47888+ applications (eg. djb's daemontools) are installed on the system, and
47889+ is therefore left as an option. If the sysctl option is enabled, a
47890+ sysctl option with name "chroot_execlog" is created.
47891+
47892+config GRKERNSEC_AUDIT_PTRACE
47893+ bool "Ptrace logging"
47894+ help
47895+ If you say Y here, all attempts to attach to a process via ptrace
47896+ will be logged. If the sysctl option is enabled, a sysctl option
47897+ with name "audit_ptrace" is created.
47898+
47899+config GRKERNSEC_AUDIT_CHDIR
47900+ bool "Chdir logging"
47901+ help
47902+ If you say Y here, all chdir() calls will be logged. If the sysctl
47903+ option is enabled, a sysctl option with name "audit_chdir" is created.
47904+
47905+config GRKERNSEC_AUDIT_MOUNT
47906+ bool "(Un)Mount logging"
47907+ help
47908+ If you say Y here, all mounts and unmounts will be logged. If the
47909+ sysctl option is enabled, a sysctl option with name "audit_mount" is
47910+ created.
47911+
47912+config GRKERNSEC_SIGNAL
47913+ bool "Signal logging"
47914+ help
47915+ If you say Y here, certain important signals will be logged, such as
47916+ SIGSEGV, which will as a result inform you of when a error in a program
47917+ occurred, which in some cases could mean a possible exploit attempt.
47918+ If the sysctl option is enabled, a sysctl option with name
47919+ "signal_logging" is created.
47920+
47921+config GRKERNSEC_FORKFAIL
47922+ bool "Fork failure logging"
47923+ help
47924+ If you say Y here, all failed fork() attempts will be logged.
47925+ This could suggest a fork bomb, or someone attempting to overstep
47926+ their process limit. If the sysctl option is enabled, a sysctl option
47927+ with name "forkfail_logging" is created.
47928+
47929+config GRKERNSEC_TIME
47930+ bool "Time change logging"
47931+ help
47932+ If you say Y here, any changes of the system clock will be logged.
47933+ If the sysctl option is enabled, a sysctl option with name
47934+ "timechange_logging" is created.
47935+
47936+config GRKERNSEC_PROC_IPADDR
47937+ bool "/proc/<pid>/ipaddr support"
47938+ help
47939+ If you say Y here, a new entry will be added to each /proc/<pid>
47940+ directory that contains the IP address of the person using the task.
47941+ The IP is carried across local TCP and AF_UNIX stream sockets.
47942+ This information can be useful for IDS/IPSes to perform remote response
47943+ to a local attack. The entry is readable by only the owner of the
47944+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
47945+ the RBAC system), and thus does not create privacy concerns.
47946+
47947+config GRKERNSEC_RWXMAP_LOG
47948+ bool 'Denied RWX mmap/mprotect logging'
47949+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
47950+ help
47951+ If you say Y here, calls to mmap() and mprotect() with explicit
47952+ usage of PROT_WRITE and PROT_EXEC together will be logged when
47953+ denied by the PAX_MPROTECT feature. If the sysctl option is
47954+ enabled, a sysctl option with name "rwxmap_logging" is created.
47955+
47956+config GRKERNSEC_AUDIT_TEXTREL
47957+ bool 'ELF text relocations logging (READ HELP)'
47958+ depends on PAX_MPROTECT
47959+ help
47960+ If you say Y here, text relocations will be logged with the filename
47961+ of the offending library or binary. The purpose of the feature is
47962+ to help Linux distribution developers get rid of libraries and
47963+ binaries that need text relocations which hinder the future progress
47964+ of PaX. Only Linux distribution developers should say Y here, and
47965+ never on a production machine, as this option creates an information
47966+ leak that could aid an attacker in defeating the randomization of
47967+ a single memory region. If the sysctl option is enabled, a sysctl
47968+ option with name "audit_textrel" is created.
47969+
47970+endmenu
47971+
47972+menu "Executable Protections"
47973+depends on GRKERNSEC
47974+
47975+config GRKERNSEC_DMESG
47976+ bool "Dmesg(8) restriction"
47977+ help
47978+ If you say Y here, non-root users will not be able to use dmesg(8)
47979+ to view up to the last 4kb of messages in the kernel's log buffer.
47980+ The kernel's log buffer often contains kernel addresses and other
47981+ identifying information useful to an attacker in fingerprinting a
47982+ system for a targeted exploit.
47983+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
47984+ created.
47985+
47986+config GRKERNSEC_HARDEN_PTRACE
47987+ bool "Deter ptrace-based process snooping"
47988+ help
47989+ If you say Y here, TTY sniffers and other malicious monitoring
47990+ programs implemented through ptrace will be defeated. If you
47991+ have been using the RBAC system, this option has already been
47992+ enabled for several years for all users, with the ability to make
47993+ fine-grained exceptions.
47994+
47995+ This option only affects the ability of non-root users to ptrace
47996+ processes that are not a descendent of the ptracing process.
47997+ This means that strace ./binary and gdb ./binary will still work,
47998+ but attaching to arbitrary processes will not. If the sysctl
47999+ option is enabled, a sysctl option with name "harden_ptrace" is
48000+ created.
48001+
48002+config GRKERNSEC_PTRACE_READEXEC
48003+ bool "Require read access to ptrace sensitive binaries"
48004+ help
48005+ If you say Y here, unprivileged users will not be able to ptrace unreadable
48006+ binaries. This option is useful in environments that
48007+ remove the read bits (e.g. file mode 4711) from suid binaries to
48008+ prevent infoleaking of their contents. This option adds
48009+ consistency to the use of that file mode, as the binary could normally
48010+ be read out when run without privileges while ptracing.
48011+
48012+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
48013+ is created.
48014+
48015+config GRKERNSEC_SETXID
48016+ bool "Enforce consistent multithreaded privileges"
48017+ help
48018+ If you say Y here, a change from a root uid to a non-root uid
48019+ in a multithreaded application will cause the resulting uids,
48020+ gids, supplementary groups, and capabilities in that thread
48021+ to be propagated to the other threads of the process. In most
48022+ cases this is unnecessary, as glibc will emulate this behavior
48023+ on behalf of the application. Other libcs do not act in the
48024+ same way, allowing the other threads of the process to continue
48025+ running with root privileges. If the sysctl option is enabled,
48026+ a sysctl option with name "consistent_setxid" is created.
48027+
48028+config GRKERNSEC_TPE
48029+ bool "Trusted Path Execution (TPE)"
48030+ help
48031+ If you say Y here, you will be able to choose a gid to add to the
48032+ supplementary groups of users you want to mark as "untrusted."
48033+ These users will not be able to execute any files that are not in
48034+ root-owned directories writable only by root. If the sysctl option
48035+ is enabled, a sysctl option with name "tpe" is created.
48036+
48037+config GRKERNSEC_TPE_ALL
48038+ bool "Partially restrict all non-root users"
48039+ depends on GRKERNSEC_TPE
48040+ help
48041+ If you say Y here, all non-root users will be covered under
48042+ a weaker TPE restriction. This is separate from, and in addition to,
48043+ the main TPE options that you have selected elsewhere. Thus, if a
48044+ "trusted" GID is chosen, this restriction applies to even that GID.
48045+ Under this restriction, all non-root users will only be allowed to
48046+ execute files in directories they own that are not group or
48047+ world-writable, or in directories owned by root and writable only by
48048+ root. If the sysctl option is enabled, a sysctl option with name
48049+ "tpe_restrict_all" is created.
48050+
48051+config GRKERNSEC_TPE_INVERT
48052+ bool "Invert GID option"
48053+ depends on GRKERNSEC_TPE
48054+ help
48055+ If you say Y here, the group you specify in the TPE configuration will
48056+ decide what group TPE restrictions will be *disabled* for. This
48057+ option is useful if you want TPE restrictions to be applied to most
48058+ users on the system. If the sysctl option is enabled, a sysctl option
48059+ with name "tpe_invert" is created. Unlike other sysctl options, this
48060+ entry will default to on for backward-compatibility.
48061+
48062+config GRKERNSEC_TPE_GID
48063+ int "GID for untrusted users"
48064+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
48065+ default 1005
48066+ help
48067+ Setting this GID determines what group TPE restrictions will be
48068+ *enabled* for. If the sysctl option is enabled, a sysctl option
48069+ with name "tpe_gid" is created.
48070+
48071+config GRKERNSEC_TPE_GID
48072+ int "GID for trusted users"
48073+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
48074+ default 1005
48075+ help
48076+ Setting this GID determines what group TPE restrictions will be
48077+ *disabled* for. If the sysctl option is enabled, a sysctl option
48078+ with name "tpe_gid" is created.
48079+
48080+endmenu
48081+menu "Network Protections"
48082+depends on GRKERNSEC
48083+
48084+config GRKERNSEC_RANDNET
48085+ bool "Larger entropy pools"
48086+ help
48087+ If you say Y here, the entropy pools used for many features of Linux
48088+ and grsecurity will be doubled in size. Since several grsecurity
48089+ features use additional randomness, it is recommended that you say Y
48090+ here. Saying Y here has a similar effect as modifying
48091+ /proc/sys/kernel/random/poolsize.
48092+
48093+config GRKERNSEC_BLACKHOLE
48094+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
48095+ depends on NET
48096+ help
48097+ If you say Y here, neither TCP resets nor ICMP
48098+ destination-unreachable packets will be sent in response to packets
48099+ sent to ports for which no associated listening process exists.
48100+ This feature supports both IPV4 and IPV6 and exempts the
48101+ loopback interface from blackholing. Enabling this feature
48102+ makes a host more resilient to DoS attacks and reduces network
48103+ visibility against scanners.
48104+
48105+ The blackhole feature as-implemented is equivalent to the FreeBSD
48106+ blackhole feature, as it prevents RST responses to all packets, not
48107+ just SYNs. Under most application behavior this causes no
48108+ problems, but applications (like haproxy) may not close certain
48109+ connections in a way that cleanly terminates them on the remote
48110+ end, leaving the remote host in LAST_ACK state. Because of this
48111+ side-effect and to prevent intentional LAST_ACK DoSes, this
48112+ feature also adds automatic mitigation against such attacks.
48113+ The mitigation drastically reduces the amount of time a socket
48114+ can spend in LAST_ACK state. If you're using haproxy and not
48115+ all servers it connects to have this option enabled, consider
48116+ disabling this feature on the haproxy host.
48117+
48118+ If the sysctl option is enabled, two sysctl options with names
48119+ "ip_blackhole" and "lastack_retries" will be created.
48120+ While "ip_blackhole" takes the standard zero/non-zero on/off
48121+ toggle, "lastack_retries" uses the same kinds of values as
48122+ "tcp_retries1" and "tcp_retries2". The default value of 4
48123+ prevents a socket from lasting more than 45 seconds in LAST_ACK
48124+ state.
48125+
48126+config GRKERNSEC_SOCKET
48127+ bool "Socket restrictions"
48128+ depends on NET
48129+ help
48130+ If you say Y here, you will be able to choose from several options.
48131+ If you assign a GID on your system and add it to the supplementary
48132+ groups of users you want to restrict socket access to, this patch
48133+ will perform up to three things, based on the option(s) you choose.
48134+
48135+config GRKERNSEC_SOCKET_ALL
48136+ bool "Deny any sockets to group"
48137+ depends on GRKERNSEC_SOCKET
48138+ help
48139+ If you say Y here, you will be able to choose a GID of whose users will
48140+ be unable to connect to other hosts from your machine or run server
48141+ applications from your machine. If the sysctl option is enabled, a
48142+ sysctl option with name "socket_all" is created.
48143+
48144+config GRKERNSEC_SOCKET_ALL_GID
48145+ int "GID to deny all sockets for"
48146+ depends on GRKERNSEC_SOCKET_ALL
48147+ default 1004
48148+ help
48149+ Here you can choose the GID to disable socket access for. Remember to
48150+ add the users you want socket access disabled for to the GID
48151+ specified here. If the sysctl option is enabled, a sysctl option
48152+ with name "socket_all_gid" is created.
48153+
48154+config GRKERNSEC_SOCKET_CLIENT
48155+ bool "Deny client sockets to group"
48156+ depends on GRKERNSEC_SOCKET
48157+ help
48158+ If you say Y here, you will be able to choose a GID of whose users will
48159+ be unable to connect to other hosts from your machine, but will be
48160+ able to run servers. If this option is enabled, all users in the group
48161+ you specify will have to use passive mode when initiating ftp transfers
48162+ from the shell on your machine. If the sysctl option is enabled, a
48163+ sysctl option with name "socket_client" is created.
48164+
48165+config GRKERNSEC_SOCKET_CLIENT_GID
48166+ int "GID to deny client sockets for"
48167+ depends on GRKERNSEC_SOCKET_CLIENT
48168+ default 1003
48169+ help
48170+ Here you can choose the GID to disable client socket access for.
48171+ Remember to add the users you want client socket access disabled for to
48172+ the GID specified here. If the sysctl option is enabled, a sysctl
48173+ option with name "socket_client_gid" is created.
48174+
48175+config GRKERNSEC_SOCKET_SERVER
48176+ bool "Deny server sockets to group"
48177+ depends on GRKERNSEC_SOCKET
48178+ help
48179+ If you say Y here, you will be able to choose a GID of whose users will
48180+ be unable to run server applications from your machine. If the sysctl
48181+ option is enabled, a sysctl option with name "socket_server" is created.
48182+
48183+config GRKERNSEC_SOCKET_SERVER_GID
48184+ int "GID to deny server sockets for"
48185+ depends on GRKERNSEC_SOCKET_SERVER
48186+ default 1002
48187+ help
48188+ Here you can choose the GID to disable server socket access for.
48189+ Remember to add the users you want server socket access disabled for to
48190+ the GID specified here. If the sysctl option is enabled, a sysctl
48191+ option with name "socket_server_gid" is created.
48192+
48193+endmenu
48194+menu "Sysctl support"
48195+depends on GRKERNSEC && SYSCTL
48196+
48197+config GRKERNSEC_SYSCTL
48198+ bool "Sysctl support"
48199+ help
48200+ If you say Y here, you will be able to change the options that
48201+ grsecurity runs with at bootup, without having to recompile your
48202+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
48203+ to enable (1) or disable (0) various features. All the sysctl entries
48204+ are mutable until the "grsec_lock" entry is set to a non-zero value.
48205+ All features enabled in the kernel configuration are disabled at boot
48206+ if you do not say Y to the "Turn on features by default" option.
48207+ All options should be set at startup, and the grsec_lock entry should
48208+ be set to a non-zero value after all the options are set.
48209+ *THIS IS EXTREMELY IMPORTANT*
48210+
48211+config GRKERNSEC_SYSCTL_DISTRO
48212+ bool "Extra sysctl support for distro makers (READ HELP)"
48213+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
48214+ help
48215+ If you say Y here, additional sysctl options will be created
48216+ for features that affect processes running as root. Therefore,
48217+ it is critical when using this option that the grsec_lock entry be
48218+ enabled after boot. Only distros with prebuilt kernel packages
48219+ with this option enabled that can ensure grsec_lock is enabled
48220+ after boot should use this option.
48221+ *Failure to set grsec_lock after boot makes all grsec features
48222+ this option covers useless*
48223+
48224+ Currently this option creates the following sysctl entries:
48225+ "Disable Privileged I/O": "disable_priv_io"
48226+
48227+config GRKERNSEC_SYSCTL_ON
48228+ bool "Turn on features by default"
48229+ depends on GRKERNSEC_SYSCTL
48230+ help
48231+ If you say Y here, instead of having all features enabled in the
48232+ kernel configuration disabled at boot time, the features will be
48233+ enabled at boot time. It is recommended you say Y here unless
48234+ there is some reason you would want all sysctl-tunable features to
48235+ be disabled by default. As mentioned elsewhere, it is important
48236+ to enable the grsec_lock entry once you have finished modifying
48237+ the sysctl entries.
48238+
48239+endmenu
48240+menu "Logging Options"
48241+depends on GRKERNSEC
48242+
48243+config GRKERNSEC_FLOODTIME
48244+ int "Seconds in between log messages (minimum)"
48245+ default 10
48246+ help
48247+ This option allows you to enforce the number of seconds between
48248+ grsecurity log messages. The default should be suitable for most
48249+ people, however, if you choose to change it, choose a value small enough
48250+ to allow informative logs to be produced, but large enough to
48251+ prevent flooding.
48252+
48253+config GRKERNSEC_FLOODBURST
48254+ int "Number of messages in a burst (maximum)"
48255+ default 6
48256+ help
48257+ This option allows you to choose the maximum number of messages allowed
48258+ within the flood time interval you chose in a separate option. The
48259+ default should be suitable for most people, however if you find that
48260+ many of your logs are being interpreted as flooding, you may want to
48261+ raise this value.
48262+
48263+endmenu
48264+
48265+endmenu
48266diff --git a/grsecurity/Makefile b/grsecurity/Makefile
48267new file mode 100644
48268index 0000000..be9ae3a
48269--- /dev/null
48270+++ b/grsecurity/Makefile
48271@@ -0,0 +1,36 @@
48272+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
48273+# during 2001-2009 it has been completely redesigned by Brad Spengler
48274+# into an RBAC system
48275+#
48276+# All code in this directory and various hooks inserted throughout the kernel
48277+# are copyright Brad Spengler - Open Source Security, Inc., and released
48278+# under the GPL v2 or higher
48279+
48280+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
48281+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
48282+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
48283+
48284+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
48285+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
48286+ gracl_learn.o grsec_log.o
48287+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
48288+
48289+ifdef CONFIG_NET
48290+obj-y += grsec_sock.o
48291+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
48292+endif
48293+
48294+ifndef CONFIG_GRKERNSEC
48295+obj-y += grsec_disabled.o
48296+endif
48297+
48298+ifdef CONFIG_GRKERNSEC_HIDESYM
48299+extra-y := grsec_hidesym.o
48300+$(obj)/grsec_hidesym.o:
48301+ @-chmod -f 500 /boot
48302+ @-chmod -f 500 /lib/modules
48303+ @-chmod -f 500 /lib64/modules
48304+ @-chmod -f 500 /lib32/modules
48305+ @-chmod -f 700 .
48306+ @echo ' grsec: protected kernel image paths'
48307+endif
48308diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
48309new file mode 100644
48310index 0000000..d3b423d
48311--- /dev/null
48312+++ b/grsecurity/gracl.c
48313@@ -0,0 +1,4155 @@
48314+#include <linux/kernel.h>
48315+#include <linux/module.h>
48316+#include <linux/sched.h>
48317+#include <linux/mm.h>
48318+#include <linux/file.h>
48319+#include <linux/fs.h>
48320+#include <linux/namei.h>
48321+#include <linux/mount.h>
48322+#include <linux/tty.h>
48323+#include <linux/proc_fs.h>
48324+#include <linux/lglock.h>
48325+#include <linux/slab.h>
48326+#include <linux/vmalloc.h>
48327+#include <linux/types.h>
48328+#include <linux/sysctl.h>
48329+#include <linux/netdevice.h>
48330+#include <linux/ptrace.h>
48331+#include <linux/gracl.h>
48332+#include <linux/gralloc.h>
48333+#include <linux/security.h>
48334+#include <linux/grinternal.h>
48335+#include <linux/pid_namespace.h>
48336+#include <linux/fdtable.h>
48337+#include <linux/percpu.h>
48338+
48339+#include <asm/uaccess.h>
48340+#include <asm/errno.h>
48341+#include <asm/mman.h>
48342+
48343+static struct acl_role_db acl_role_set;
48344+static struct name_db name_set;
48345+static struct inodev_db inodev_set;
48346+
48347+/* for keeping track of userspace pointers used for subjects, so we
48348+ can share references in the kernel as well
48349+*/
48350+
48351+static struct path real_root;
48352+
48353+static struct acl_subj_map_db subj_map_set;
48354+
48355+static struct acl_role_label *default_role;
48356+
48357+static struct acl_role_label *role_list;
48358+
48359+static u16 acl_sp_role_value;
48360+
48361+extern char *gr_shared_page[4];
48362+static DEFINE_MUTEX(gr_dev_mutex);
48363+DEFINE_RWLOCK(gr_inode_lock);
48364+
48365+struct gr_arg *gr_usermode;
48366+
48367+static unsigned int gr_status __read_only = GR_STATUS_INIT;
48368+
48369+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
48370+extern void gr_clear_learn_entries(void);
48371+
48372+#ifdef CONFIG_GRKERNSEC_RESLOG
48373+extern void gr_log_resource(const struct task_struct *task,
48374+ const int res, const unsigned long wanted, const int gt);
48375+#endif
48376+
48377+unsigned char *gr_system_salt;
48378+unsigned char *gr_system_sum;
48379+
48380+static struct sprole_pw **acl_special_roles = NULL;
48381+static __u16 num_sprole_pws = 0;
48382+
48383+static struct acl_role_label *kernel_role = NULL;
48384+
48385+static unsigned int gr_auth_attempts = 0;
48386+static unsigned long gr_auth_expires = 0UL;
48387+
48388+#ifdef CONFIG_NET
48389+extern struct vfsmount *sock_mnt;
48390+#endif
48391+
48392+extern struct vfsmount *pipe_mnt;
48393+extern struct vfsmount *shm_mnt;
48394+#ifdef CONFIG_HUGETLBFS
48395+extern struct vfsmount *hugetlbfs_vfsmount;
48396+#endif
48397+
48398+static struct acl_object_label *fakefs_obj_rw;
48399+static struct acl_object_label *fakefs_obj_rwx;
48400+
48401+extern int gr_init_uidset(void);
48402+extern void gr_free_uidset(void);
48403+extern void gr_remove_uid(uid_t uid);
48404+extern int gr_find_uid(uid_t uid);
48405+
48406+DECLARE_BRLOCK(vfsmount_lock);
48407+
48408+__inline__ int
48409+gr_acl_is_enabled(void)
48410+{
48411+ return (gr_status & GR_READY);
48412+}
48413+
48414+#ifdef CONFIG_BTRFS_FS
48415+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
48416+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
48417+#endif
48418+
48419+static inline dev_t __get_dev(const struct dentry *dentry)
48420+{
48421+#ifdef CONFIG_BTRFS_FS
48422+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
48423+ return get_btrfs_dev_from_inode(dentry->d_inode);
48424+ else
48425+#endif
48426+ return dentry->d_inode->i_sb->s_dev;
48427+}
48428+
48429+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
48430+{
48431+ return __get_dev(dentry);
48432+}
48433+
48434+static char gr_task_roletype_to_char(struct task_struct *task)
48435+{
48436+ switch (task->role->roletype &
48437+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
48438+ GR_ROLE_SPECIAL)) {
48439+ case GR_ROLE_DEFAULT:
48440+ return 'D';
48441+ case GR_ROLE_USER:
48442+ return 'U';
48443+ case GR_ROLE_GROUP:
48444+ return 'G';
48445+ case GR_ROLE_SPECIAL:
48446+ return 'S';
48447+ }
48448+
48449+ return 'X';
48450+}
48451+
48452+char gr_roletype_to_char(void)
48453+{
48454+ return gr_task_roletype_to_char(current);
48455+}
48456+
48457+__inline__ int
48458+gr_acl_tpe_check(void)
48459+{
48460+ if (unlikely(!(gr_status & GR_READY)))
48461+ return 0;
48462+ if (current->role->roletype & GR_ROLE_TPE)
48463+ return 1;
48464+ else
48465+ return 0;
48466+}
48467+
48468+int
48469+gr_handle_rawio(const struct inode *inode)
48470+{
48471+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
48472+ if (inode && S_ISBLK(inode->i_mode) &&
48473+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
48474+ !capable(CAP_SYS_RAWIO))
48475+ return 1;
48476+#endif
48477+ return 0;
48478+}
48479+
48480+static int
48481+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
48482+{
48483+ if (likely(lena != lenb))
48484+ return 0;
48485+
48486+ return !memcmp(a, b, lena);
48487+}
48488+
48489+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
48490+{
48491+ *buflen -= namelen;
48492+ if (*buflen < 0)
48493+ return -ENAMETOOLONG;
48494+ *buffer -= namelen;
48495+ memcpy(*buffer, str, namelen);
48496+ return 0;
48497+}
48498+
48499+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
48500+{
48501+ return prepend(buffer, buflen, name->name, name->len);
48502+}
48503+
48504+static int prepend_path(const struct path *path, struct path *root,
48505+ char **buffer, int *buflen)
48506+{
48507+ struct dentry *dentry = path->dentry;
48508+ struct vfsmount *vfsmnt = path->mnt;
48509+ bool slash = false;
48510+ int error = 0;
48511+
48512+ while (dentry != root->dentry || vfsmnt != root->mnt) {
48513+ struct dentry * parent;
48514+
48515+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
48516+ /* Global root? */
48517+ if (vfsmnt->mnt_parent == vfsmnt) {
48518+ goto out;
48519+ }
48520+ dentry = vfsmnt->mnt_mountpoint;
48521+ vfsmnt = vfsmnt->mnt_parent;
48522+ continue;
48523+ }
48524+ parent = dentry->d_parent;
48525+ prefetch(parent);
48526+ spin_lock(&dentry->d_lock);
48527+ error = prepend_name(buffer, buflen, &dentry->d_name);
48528+ spin_unlock(&dentry->d_lock);
48529+ if (!error)
48530+ error = prepend(buffer, buflen, "/", 1);
48531+ if (error)
48532+ break;
48533+
48534+ slash = true;
48535+ dentry = parent;
48536+ }
48537+
48538+out:
48539+ if (!error && !slash)
48540+ error = prepend(buffer, buflen, "/", 1);
48541+
48542+ return error;
48543+}
48544+
48545+/* this must be called with vfsmount_lock and rename_lock held */
48546+
48547+static char *__our_d_path(const struct path *path, struct path *root,
48548+ char *buf, int buflen)
48549+{
48550+ char *res = buf + buflen;
48551+ int error;
48552+
48553+ prepend(&res, &buflen, "\0", 1);
48554+ error = prepend_path(path, root, &res, &buflen);
48555+ if (error)
48556+ return ERR_PTR(error);
48557+
48558+ return res;
48559+}
48560+
48561+static char *
48562+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
48563+{
48564+ char *retval;
48565+
48566+ retval = __our_d_path(path, root, buf, buflen);
48567+ if (unlikely(IS_ERR(retval)))
48568+ retval = strcpy(buf, "<path too long>");
48569+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
48570+ retval[1] = '\0';
48571+
48572+ return retval;
48573+}
48574+
48575+static char *
48576+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
48577+ char *buf, int buflen)
48578+{
48579+ struct path path;
48580+ char *res;
48581+
48582+ path.dentry = (struct dentry *)dentry;
48583+ path.mnt = (struct vfsmount *)vfsmnt;
48584+
48585+ /* we can use real_root.dentry, real_root.mnt, because this is only called
48586+ by the RBAC system */
48587+ res = gen_full_path(&path, &real_root, buf, buflen);
48588+
48589+ return res;
48590+}
48591+
48592+static char *
48593+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
48594+ char *buf, int buflen)
48595+{
48596+ char *res;
48597+ struct path path;
48598+ struct path root;
48599+ struct task_struct *reaper = &init_task;
48600+
48601+ path.dentry = (struct dentry *)dentry;
48602+ path.mnt = (struct vfsmount *)vfsmnt;
48603+
48604+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
48605+ get_fs_root(reaper->fs, &root);
48606+
48607+ write_seqlock(&rename_lock);
48608+ br_read_lock(vfsmount_lock);
48609+ res = gen_full_path(&path, &root, buf, buflen);
48610+ br_read_unlock(vfsmount_lock);
48611+ write_sequnlock(&rename_lock);
48612+
48613+ path_put(&root);
48614+ return res;
48615+}
48616+
48617+static char *
48618+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
48619+{
48620+ char *ret;
48621+ write_seqlock(&rename_lock);
48622+ br_read_lock(vfsmount_lock);
48623+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
48624+ PAGE_SIZE);
48625+ br_read_unlock(vfsmount_lock);
48626+ write_sequnlock(&rename_lock);
48627+ return ret;
48628+}
48629+
48630+static char *
48631+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
48632+{
48633+ char *ret;
48634+ char *buf;
48635+ int buflen;
48636+
48637+ write_seqlock(&rename_lock);
48638+ br_read_lock(vfsmount_lock);
48639+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
48640+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
48641+ buflen = (int)(ret - buf);
48642+ if (buflen >= 5)
48643+ prepend(&ret, &buflen, "/proc", 5);
48644+ else
48645+ ret = strcpy(buf, "<path too long>");
48646+ br_read_unlock(vfsmount_lock);
48647+ write_sequnlock(&rename_lock);
48648+ return ret;
48649+}
48650+
48651+char *
48652+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
48653+{
48654+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
48655+ PAGE_SIZE);
48656+}
48657+
48658+char *
48659+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
48660+{
48661+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
48662+ PAGE_SIZE);
48663+}
48664+
48665+char *
48666+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
48667+{
48668+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
48669+ PAGE_SIZE);
48670+}
48671+
48672+char *
48673+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
48674+{
48675+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
48676+ PAGE_SIZE);
48677+}
48678+
48679+char *
48680+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
48681+{
48682+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
48683+ PAGE_SIZE);
48684+}
48685+
48686+__inline__ __u32
48687+to_gr_audit(const __u32 reqmode)
48688+{
48689+ /* masks off auditable permission flags, then shifts them to create
48690+ auditing flags, and adds the special case of append auditing if
48691+ we're requesting write */
48692+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
48693+}
48694+
48695+struct acl_subject_label *
48696+lookup_subject_map(const struct acl_subject_label *userp)
48697+{
48698+ unsigned int index = shash(userp, subj_map_set.s_size);
48699+ struct subject_map *match;
48700+
48701+ match = subj_map_set.s_hash[index];
48702+
48703+ while (match && match->user != userp)
48704+ match = match->next;
48705+
48706+ if (match != NULL)
48707+ return match->kernel;
48708+ else
48709+ return NULL;
48710+}
48711+
48712+static void
48713+insert_subj_map_entry(struct subject_map *subjmap)
48714+{
48715+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
48716+ struct subject_map **curr;
48717+
48718+ subjmap->prev = NULL;
48719+
48720+ curr = &subj_map_set.s_hash[index];
48721+ if (*curr != NULL)
48722+ (*curr)->prev = subjmap;
48723+
48724+ subjmap->next = *curr;
48725+ *curr = subjmap;
48726+
48727+ return;
48728+}
48729+
48730+static struct acl_role_label *
48731+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
48732+ const gid_t gid)
48733+{
48734+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
48735+ struct acl_role_label *match;
48736+ struct role_allowed_ip *ipp;
48737+ unsigned int x;
48738+ u32 curr_ip = task->signal->curr_ip;
48739+
48740+ task->signal->saved_ip = curr_ip;
48741+
48742+ match = acl_role_set.r_hash[index];
48743+
48744+ while (match) {
48745+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
48746+ for (x = 0; x < match->domain_child_num; x++) {
48747+ if (match->domain_children[x] == uid)
48748+ goto found;
48749+ }
48750+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
48751+ break;
48752+ match = match->next;
48753+ }
48754+found:
48755+ if (match == NULL) {
48756+ try_group:
48757+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
48758+ match = acl_role_set.r_hash[index];
48759+
48760+ while (match) {
48761+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
48762+ for (x = 0; x < match->domain_child_num; x++) {
48763+ if (match->domain_children[x] == gid)
48764+ goto found2;
48765+ }
48766+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
48767+ break;
48768+ match = match->next;
48769+ }
48770+found2:
48771+ if (match == NULL)
48772+ match = default_role;
48773+ if (match->allowed_ips == NULL)
48774+ return match;
48775+ else {
48776+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
48777+ if (likely
48778+ ((ntohl(curr_ip) & ipp->netmask) ==
48779+ (ntohl(ipp->addr) & ipp->netmask)))
48780+ return match;
48781+ }
48782+ match = default_role;
48783+ }
48784+ } else if (match->allowed_ips == NULL) {
48785+ return match;
48786+ } else {
48787+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
48788+ if (likely
48789+ ((ntohl(curr_ip) & ipp->netmask) ==
48790+ (ntohl(ipp->addr) & ipp->netmask)))
48791+ return match;
48792+ }
48793+ goto try_group;
48794+ }
48795+
48796+ return match;
48797+}
48798+
48799+struct acl_subject_label *
48800+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
48801+ const struct acl_role_label *role)
48802+{
48803+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
48804+ struct acl_subject_label *match;
48805+
48806+ match = role->subj_hash[index];
48807+
48808+ while (match && (match->inode != ino || match->device != dev ||
48809+ (match->mode & GR_DELETED))) {
48810+ match = match->next;
48811+ }
48812+
48813+ if (match && !(match->mode & GR_DELETED))
48814+ return match;
48815+ else
48816+ return NULL;
48817+}
48818+
48819+struct acl_subject_label *
48820+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
48821+ const struct acl_role_label *role)
48822+{
48823+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
48824+ struct acl_subject_label *match;
48825+
48826+ match = role->subj_hash[index];
48827+
48828+ while (match && (match->inode != ino || match->device != dev ||
48829+ !(match->mode & GR_DELETED))) {
48830+ match = match->next;
48831+ }
48832+
48833+ if (match && (match->mode & GR_DELETED))
48834+ return match;
48835+ else
48836+ return NULL;
48837+}
48838+
48839+static struct acl_object_label *
48840+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
48841+ const struct acl_subject_label *subj)
48842+{
48843+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
48844+ struct acl_object_label *match;
48845+
48846+ match = subj->obj_hash[index];
48847+
48848+ while (match && (match->inode != ino || match->device != dev ||
48849+ (match->mode & GR_DELETED))) {
48850+ match = match->next;
48851+ }
48852+
48853+ if (match && !(match->mode & GR_DELETED))
48854+ return match;
48855+ else
48856+ return NULL;
48857+}
48858+
48859+static struct acl_object_label *
48860+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
48861+ const struct acl_subject_label *subj)
48862+{
48863+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
48864+ struct acl_object_label *match;
48865+
48866+ match = subj->obj_hash[index];
48867+
48868+ while (match && (match->inode != ino || match->device != dev ||
48869+ !(match->mode & GR_DELETED))) {
48870+ match = match->next;
48871+ }
48872+
48873+ if (match && (match->mode & GR_DELETED))
48874+ return match;
48875+
48876+ match = subj->obj_hash[index];
48877+
48878+ while (match && (match->inode != ino || match->device != dev ||
48879+ (match->mode & GR_DELETED))) {
48880+ match = match->next;
48881+ }
48882+
48883+ if (match && !(match->mode & GR_DELETED))
48884+ return match;
48885+ else
48886+ return NULL;
48887+}
48888+
48889+static struct name_entry *
48890+lookup_name_entry(const char *name)
48891+{
48892+ unsigned int len = strlen(name);
48893+ unsigned int key = full_name_hash(name, len);
48894+ unsigned int index = key % name_set.n_size;
48895+ struct name_entry *match;
48896+
48897+ match = name_set.n_hash[index];
48898+
48899+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
48900+ match = match->next;
48901+
48902+ return match;
48903+}
48904+
48905+static struct name_entry *
48906+lookup_name_entry_create(const char *name)
48907+{
48908+ unsigned int len = strlen(name);
48909+ unsigned int key = full_name_hash(name, len);
48910+ unsigned int index = key % name_set.n_size;
48911+ struct name_entry *match;
48912+
48913+ match = name_set.n_hash[index];
48914+
48915+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
48916+ !match->deleted))
48917+ match = match->next;
48918+
48919+ if (match && match->deleted)
48920+ return match;
48921+
48922+ match = name_set.n_hash[index];
48923+
48924+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
48925+ match->deleted))
48926+ match = match->next;
48927+
48928+ if (match && !match->deleted)
48929+ return match;
48930+ else
48931+ return NULL;
48932+}
48933+
48934+static struct inodev_entry *
48935+lookup_inodev_entry(const ino_t ino, const dev_t dev)
48936+{
48937+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
48938+ struct inodev_entry *match;
48939+
48940+ match = inodev_set.i_hash[index];
48941+
48942+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
48943+ match = match->next;
48944+
48945+ return match;
48946+}
48947+
48948+static void
48949+insert_inodev_entry(struct inodev_entry *entry)
48950+{
48951+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
48952+ inodev_set.i_size);
48953+ struct inodev_entry **curr;
48954+
48955+ entry->prev = NULL;
48956+
48957+ curr = &inodev_set.i_hash[index];
48958+ if (*curr != NULL)
48959+ (*curr)->prev = entry;
48960+
48961+ entry->next = *curr;
48962+ *curr = entry;
48963+
48964+ return;
48965+}
48966+
48967+static void
48968+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
48969+{
48970+ unsigned int index =
48971+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
48972+ struct acl_role_label **curr;
48973+ struct acl_role_label *tmp;
48974+
48975+ curr = &acl_role_set.r_hash[index];
48976+
48977+ /* if role was already inserted due to domains and already has
48978+ a role in the same bucket as it attached, then we need to
48979+ combine these two buckets
48980+ */
48981+ if (role->next) {
48982+ tmp = role->next;
48983+ while (tmp->next)
48984+ tmp = tmp->next;
48985+ tmp->next = *curr;
48986+ } else
48987+ role->next = *curr;
48988+ *curr = role;
48989+
48990+ return;
48991+}
48992+
48993+static void
48994+insert_acl_role_label(struct acl_role_label *role)
48995+{
48996+ int i;
48997+
48998+ if (role_list == NULL) {
48999+ role_list = role;
49000+ role->prev = NULL;
49001+ } else {
49002+ role->prev = role_list;
49003+ role_list = role;
49004+ }
49005+
49006+ /* used for hash chains */
49007+ role->next = NULL;
49008+
49009+ if (role->roletype & GR_ROLE_DOMAIN) {
49010+ for (i = 0; i < role->domain_child_num; i++)
49011+ __insert_acl_role_label(role, role->domain_children[i]);
49012+ } else
49013+ __insert_acl_role_label(role, role->uidgid);
49014+}
49015+
49016+static int
49017+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
49018+{
49019+ struct name_entry **curr, *nentry;
49020+ struct inodev_entry *ientry;
49021+ unsigned int len = strlen(name);
49022+ unsigned int key = full_name_hash(name, len);
49023+ unsigned int index = key % name_set.n_size;
49024+
49025+ curr = &name_set.n_hash[index];
49026+
49027+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
49028+ curr = &((*curr)->next);
49029+
49030+ if (*curr != NULL)
49031+ return 1;
49032+
49033+ nentry = acl_alloc(sizeof (struct name_entry));
49034+ if (nentry == NULL)
49035+ return 0;
49036+ ientry = acl_alloc(sizeof (struct inodev_entry));
49037+ if (ientry == NULL)
49038+ return 0;
49039+ ientry->nentry = nentry;
49040+
49041+ nentry->key = key;
49042+ nentry->name = name;
49043+ nentry->inode = inode;
49044+ nentry->device = device;
49045+ nentry->len = len;
49046+ nentry->deleted = deleted;
49047+
49048+ nentry->prev = NULL;
49049+ curr = &name_set.n_hash[index];
49050+ if (*curr != NULL)
49051+ (*curr)->prev = nentry;
49052+ nentry->next = *curr;
49053+ *curr = nentry;
49054+
49055+ /* insert us into the table searchable by inode/dev */
49056+ insert_inodev_entry(ientry);
49057+
49058+ return 1;
49059+}
49060+
49061+static void
49062+insert_acl_obj_label(struct acl_object_label *obj,
49063+ struct acl_subject_label *subj)
49064+{
49065+ unsigned int index =
49066+ fhash(obj->inode, obj->device, subj->obj_hash_size);
49067+ struct acl_object_label **curr;
49068+
49069+
49070+ obj->prev = NULL;
49071+
49072+ curr = &subj->obj_hash[index];
49073+ if (*curr != NULL)
49074+ (*curr)->prev = obj;
49075+
49076+ obj->next = *curr;
49077+ *curr = obj;
49078+
49079+ return;
49080+}
49081+
49082+static void
49083+insert_acl_subj_label(struct acl_subject_label *obj,
49084+ struct acl_role_label *role)
49085+{
49086+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
49087+ struct acl_subject_label **curr;
49088+
49089+ obj->prev = NULL;
49090+
49091+ curr = &role->subj_hash[index];
49092+ if (*curr != NULL)
49093+ (*curr)->prev = obj;
49094+
49095+ obj->next = *curr;
49096+ *curr = obj;
49097+
49098+ return;
49099+}
49100+
49101+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
49102+
49103+static void *
49104+create_table(__u32 * len, int elementsize)
49105+{
49106+ unsigned int table_sizes[] = {
49107+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
49108+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
49109+ 4194301, 8388593, 16777213, 33554393, 67108859
49110+ };
49111+ void *newtable = NULL;
49112+ unsigned int pwr = 0;
49113+
49114+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
49115+ table_sizes[pwr] <= *len)
49116+ pwr++;
49117+
49118+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
49119+ return newtable;
49120+
49121+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
49122+ newtable =
49123+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
49124+ else
49125+ newtable = vmalloc(table_sizes[pwr] * elementsize);
49126+
49127+ *len = table_sizes[pwr];
49128+
49129+ return newtable;
49130+}
49131+
49132+static int
49133+init_variables(const struct gr_arg *arg)
49134+{
49135+ struct task_struct *reaper = &init_task;
49136+ unsigned int stacksize;
49137+
49138+ subj_map_set.s_size = arg->role_db.num_subjects;
49139+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
49140+ name_set.n_size = arg->role_db.num_objects;
49141+ inodev_set.i_size = arg->role_db.num_objects;
49142+
49143+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
49144+ !name_set.n_size || !inodev_set.i_size)
49145+ return 1;
49146+
49147+ if (!gr_init_uidset())
49148+ return 1;
49149+
49150+ /* set up the stack that holds allocation info */
49151+
49152+ stacksize = arg->role_db.num_pointers + 5;
49153+
49154+ if (!acl_alloc_stack_init(stacksize))
49155+ return 1;
49156+
49157+ /* grab reference for the real root dentry and vfsmount */
49158+ get_fs_root(reaper->fs, &real_root);
49159+
49160+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49161+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
49162+#endif
49163+
49164+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
49165+ if (fakefs_obj_rw == NULL)
49166+ return 1;
49167+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
49168+
49169+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
49170+ if (fakefs_obj_rwx == NULL)
49171+ return 1;
49172+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
49173+
49174+ subj_map_set.s_hash =
49175+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
49176+ acl_role_set.r_hash =
49177+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
49178+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
49179+ inodev_set.i_hash =
49180+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
49181+
49182+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
49183+ !name_set.n_hash || !inodev_set.i_hash)
49184+ return 1;
49185+
49186+ memset(subj_map_set.s_hash, 0,
49187+ sizeof(struct subject_map *) * subj_map_set.s_size);
49188+ memset(acl_role_set.r_hash, 0,
49189+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
49190+ memset(name_set.n_hash, 0,
49191+ sizeof (struct name_entry *) * name_set.n_size);
49192+ memset(inodev_set.i_hash, 0,
49193+ sizeof (struct inodev_entry *) * inodev_set.i_size);
49194+
49195+ return 0;
49196+}
49197+
49198+/* free information not needed after startup
49199+ currently contains user->kernel pointer mappings for subjects
49200+*/
49201+
49202+static void
49203+free_init_variables(void)
49204+{
49205+ __u32 i;
49206+
49207+ if (subj_map_set.s_hash) {
49208+ for (i = 0; i < subj_map_set.s_size; i++) {
49209+ if (subj_map_set.s_hash[i]) {
49210+ kfree(subj_map_set.s_hash[i]);
49211+ subj_map_set.s_hash[i] = NULL;
49212+ }
49213+ }
49214+
49215+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
49216+ PAGE_SIZE)
49217+ kfree(subj_map_set.s_hash);
49218+ else
49219+ vfree(subj_map_set.s_hash);
49220+ }
49221+
49222+ return;
49223+}
49224+
49225+static void
49226+free_variables(void)
49227+{
49228+ struct acl_subject_label *s;
49229+ struct acl_role_label *r;
49230+ struct task_struct *task, *task2;
49231+ unsigned int x;
49232+
49233+ gr_clear_learn_entries();
49234+
49235+ read_lock(&tasklist_lock);
49236+ do_each_thread(task2, task) {
49237+ task->acl_sp_role = 0;
49238+ task->acl_role_id = 0;
49239+ task->acl = NULL;
49240+ task->role = NULL;
49241+ } while_each_thread(task2, task);
49242+ read_unlock(&tasklist_lock);
49243+
49244+ /* release the reference to the real root dentry and vfsmount */
49245+ path_put(&real_root);
49246+
49247+ /* free all object hash tables */
49248+
49249+ FOR_EACH_ROLE_START(r)
49250+ if (r->subj_hash == NULL)
49251+ goto next_role;
49252+ FOR_EACH_SUBJECT_START(r, s, x)
49253+ if (s->obj_hash == NULL)
49254+ break;
49255+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
49256+ kfree(s->obj_hash);
49257+ else
49258+ vfree(s->obj_hash);
49259+ FOR_EACH_SUBJECT_END(s, x)
49260+ FOR_EACH_NESTED_SUBJECT_START(r, s)
49261+ if (s->obj_hash == NULL)
49262+ break;
49263+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
49264+ kfree(s->obj_hash);
49265+ else
49266+ vfree(s->obj_hash);
49267+ FOR_EACH_NESTED_SUBJECT_END(s)
49268+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
49269+ kfree(r->subj_hash);
49270+ else
49271+ vfree(r->subj_hash);
49272+ r->subj_hash = NULL;
49273+next_role:
49274+ FOR_EACH_ROLE_END(r)
49275+
49276+ acl_free_all();
49277+
49278+ if (acl_role_set.r_hash) {
49279+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
49280+ PAGE_SIZE)
49281+ kfree(acl_role_set.r_hash);
49282+ else
49283+ vfree(acl_role_set.r_hash);
49284+ }
49285+ if (name_set.n_hash) {
49286+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
49287+ PAGE_SIZE)
49288+ kfree(name_set.n_hash);
49289+ else
49290+ vfree(name_set.n_hash);
49291+ }
49292+
49293+ if (inodev_set.i_hash) {
49294+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
49295+ PAGE_SIZE)
49296+ kfree(inodev_set.i_hash);
49297+ else
49298+ vfree(inodev_set.i_hash);
49299+ }
49300+
49301+ gr_free_uidset();
49302+
49303+ memset(&name_set, 0, sizeof (struct name_db));
49304+ memset(&inodev_set, 0, sizeof (struct inodev_db));
49305+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
49306+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
49307+
49308+ default_role = NULL;
49309+ role_list = NULL;
49310+
49311+ return;
49312+}
49313+
49314+static __u32
49315+count_user_objs(struct acl_object_label *userp)
49316+{
49317+ struct acl_object_label o_tmp;
49318+ __u32 num = 0;
49319+
49320+ while (userp) {
49321+ if (copy_from_user(&o_tmp, userp,
49322+ sizeof (struct acl_object_label)))
49323+ break;
49324+
49325+ userp = o_tmp.prev;
49326+ num++;
49327+ }
49328+
49329+ return num;
49330+}
49331+
49332+static struct acl_subject_label *
49333+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
49334+
49335+static int
49336+copy_user_glob(struct acl_object_label *obj)
49337+{
49338+ struct acl_object_label *g_tmp, **guser;
49339+ unsigned int len;
49340+ char *tmp;
49341+
49342+ if (obj->globbed == NULL)
49343+ return 0;
49344+
49345+ guser = &obj->globbed;
49346+ while (*guser) {
49347+ g_tmp = (struct acl_object_label *)
49348+ acl_alloc(sizeof (struct acl_object_label));
49349+ if (g_tmp == NULL)
49350+ return -ENOMEM;
49351+
49352+ if (copy_from_user(g_tmp, *guser,
49353+ sizeof (struct acl_object_label)))
49354+ return -EFAULT;
49355+
49356+ len = strnlen_user(g_tmp->filename, PATH_MAX);
49357+
49358+ if (!len || len >= PATH_MAX)
49359+ return -EINVAL;
49360+
49361+ if ((tmp = (char *) acl_alloc(len)) == NULL)
49362+ return -ENOMEM;
49363+
49364+ if (copy_from_user(tmp, g_tmp->filename, len))
49365+ return -EFAULT;
49366+ tmp[len-1] = '\0';
49367+ g_tmp->filename = tmp;
49368+
49369+ *guser = g_tmp;
49370+ guser = &(g_tmp->next);
49371+ }
49372+
49373+ return 0;
49374+}
49375+
49376+static int
49377+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
49378+ struct acl_role_label *role)
49379+{
49380+ struct acl_object_label *o_tmp;
49381+ unsigned int len;
49382+ int ret;
49383+ char *tmp;
49384+
49385+ while (userp) {
49386+ if ((o_tmp = (struct acl_object_label *)
49387+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
49388+ return -ENOMEM;
49389+
49390+ if (copy_from_user(o_tmp, userp,
49391+ sizeof (struct acl_object_label)))
49392+ return -EFAULT;
49393+
49394+ userp = o_tmp->prev;
49395+
49396+ len = strnlen_user(o_tmp->filename, PATH_MAX);
49397+
49398+ if (!len || len >= PATH_MAX)
49399+ return -EINVAL;
49400+
49401+ if ((tmp = (char *) acl_alloc(len)) == NULL)
49402+ return -ENOMEM;
49403+
49404+ if (copy_from_user(tmp, o_tmp->filename, len))
49405+ return -EFAULT;
49406+ tmp[len-1] = '\0';
49407+ o_tmp->filename = tmp;
49408+
49409+ insert_acl_obj_label(o_tmp, subj);
49410+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
49411+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
49412+ return -ENOMEM;
49413+
49414+ ret = copy_user_glob(o_tmp);
49415+ if (ret)
49416+ return ret;
49417+
49418+ if (o_tmp->nested) {
49419+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
49420+ if (IS_ERR(o_tmp->nested))
49421+ return PTR_ERR(o_tmp->nested);
49422+
49423+ /* insert into nested subject list */
49424+ o_tmp->nested->next = role->hash->first;
49425+ role->hash->first = o_tmp->nested;
49426+ }
49427+ }
49428+
49429+ return 0;
49430+}
49431+
49432+static __u32
49433+count_user_subjs(struct acl_subject_label *userp)
49434+{
49435+ struct acl_subject_label s_tmp;
49436+ __u32 num = 0;
49437+
49438+ while (userp) {
49439+ if (copy_from_user(&s_tmp, userp,
49440+ sizeof (struct acl_subject_label)))
49441+ break;
49442+
49443+ userp = s_tmp.prev;
49444+ /* do not count nested subjects against this count, since
49445+ they are not included in the hash table, but are
49446+ attached to objects. We have already counted
49447+ the subjects in userspace for the allocation
49448+ stack
49449+ */
49450+ if (!(s_tmp.mode & GR_NESTED))
49451+ num++;
49452+ }
49453+
49454+ return num;
49455+}
49456+
49457+static int
49458+copy_user_allowedips(struct acl_role_label *rolep)
49459+{
49460+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
49461+
49462+ ruserip = rolep->allowed_ips;
49463+
49464+ while (ruserip) {
49465+ rlast = rtmp;
49466+
49467+ if ((rtmp = (struct role_allowed_ip *)
49468+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
49469+ return -ENOMEM;
49470+
49471+ if (copy_from_user(rtmp, ruserip,
49472+ sizeof (struct role_allowed_ip)))
49473+ return -EFAULT;
49474+
49475+ ruserip = rtmp->prev;
49476+
49477+ if (!rlast) {
49478+ rtmp->prev = NULL;
49479+ rolep->allowed_ips = rtmp;
49480+ } else {
49481+ rlast->next = rtmp;
49482+ rtmp->prev = rlast;
49483+ }
49484+
49485+ if (!ruserip)
49486+ rtmp->next = NULL;
49487+ }
49488+
49489+ return 0;
49490+}
49491+
49492+static int
49493+copy_user_transitions(struct acl_role_label *rolep)
49494+{
49495+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
49496+
49497+ unsigned int len;
49498+ char *tmp;
49499+
49500+ rusertp = rolep->transitions;
49501+
49502+ while (rusertp) {
49503+ rlast = rtmp;
49504+
49505+ if ((rtmp = (struct role_transition *)
49506+ acl_alloc(sizeof (struct role_transition))) == NULL)
49507+ return -ENOMEM;
49508+
49509+ if (copy_from_user(rtmp, rusertp,
49510+ sizeof (struct role_transition)))
49511+ return -EFAULT;
49512+
49513+ rusertp = rtmp->prev;
49514+
49515+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
49516+
49517+ if (!len || len >= GR_SPROLE_LEN)
49518+ return -EINVAL;
49519+
49520+ if ((tmp = (char *) acl_alloc(len)) == NULL)
49521+ return -ENOMEM;
49522+
49523+ if (copy_from_user(tmp, rtmp->rolename, len))
49524+ return -EFAULT;
49525+ tmp[len-1] = '\0';
49526+ rtmp->rolename = tmp;
49527+
49528+ if (!rlast) {
49529+ rtmp->prev = NULL;
49530+ rolep->transitions = rtmp;
49531+ } else {
49532+ rlast->next = rtmp;
49533+ rtmp->prev = rlast;
49534+ }
49535+
49536+ if (!rusertp)
49537+ rtmp->next = NULL;
49538+ }
49539+
49540+ return 0;
49541+}
49542+
49543+static struct acl_subject_label *
49544+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
49545+{
49546+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
49547+ unsigned int len;
49548+ char *tmp;
49549+ __u32 num_objs;
49550+ struct acl_ip_label **i_tmp, *i_utmp2;
49551+ struct gr_hash_struct ghash;
49552+ struct subject_map *subjmap;
49553+ unsigned int i_num;
49554+ int err;
49555+
49556+ s_tmp = lookup_subject_map(userp);
49557+
49558+ /* we've already copied this subject into the kernel, just return
49559+ the reference to it, and don't copy it over again
49560+ */
49561+ if (s_tmp)
49562+ return(s_tmp);
49563+
49564+ if ((s_tmp = (struct acl_subject_label *)
49565+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
49566+ return ERR_PTR(-ENOMEM);
49567+
49568+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
49569+ if (subjmap == NULL)
49570+ return ERR_PTR(-ENOMEM);
49571+
49572+ subjmap->user = userp;
49573+ subjmap->kernel = s_tmp;
49574+ insert_subj_map_entry(subjmap);
49575+
49576+ if (copy_from_user(s_tmp, userp,
49577+ sizeof (struct acl_subject_label)))
49578+ return ERR_PTR(-EFAULT);
49579+
49580+ len = strnlen_user(s_tmp->filename, PATH_MAX);
49581+
49582+ if (!len || len >= PATH_MAX)
49583+ return ERR_PTR(-EINVAL);
49584+
49585+ if ((tmp = (char *) acl_alloc(len)) == NULL)
49586+ return ERR_PTR(-ENOMEM);
49587+
49588+ if (copy_from_user(tmp, s_tmp->filename, len))
49589+ return ERR_PTR(-EFAULT);
49590+ tmp[len-1] = '\0';
49591+ s_tmp->filename = tmp;
49592+
49593+ if (!strcmp(s_tmp->filename, "/"))
49594+ role->root_label = s_tmp;
49595+
49596+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
49597+ return ERR_PTR(-EFAULT);
49598+
49599+ /* copy user and group transition tables */
49600+
49601+ if (s_tmp->user_trans_num) {
49602+ uid_t *uidlist;
49603+
49604+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
49605+ if (uidlist == NULL)
49606+ return ERR_PTR(-ENOMEM);
49607+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
49608+ return ERR_PTR(-EFAULT);
49609+
49610+ s_tmp->user_transitions = uidlist;
49611+ }
49612+
49613+ if (s_tmp->group_trans_num) {
49614+ gid_t *gidlist;
49615+
49616+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
49617+ if (gidlist == NULL)
49618+ return ERR_PTR(-ENOMEM);
49619+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
49620+ return ERR_PTR(-EFAULT);
49621+
49622+ s_tmp->group_transitions = gidlist;
49623+ }
49624+
49625+ /* set up object hash table */
49626+ num_objs = count_user_objs(ghash.first);
49627+
49628+ s_tmp->obj_hash_size = num_objs;
49629+ s_tmp->obj_hash =
49630+ (struct acl_object_label **)
49631+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
49632+
49633+ if (!s_tmp->obj_hash)
49634+ return ERR_PTR(-ENOMEM);
49635+
49636+ memset(s_tmp->obj_hash, 0,
49637+ s_tmp->obj_hash_size *
49638+ sizeof (struct acl_object_label *));
49639+
49640+ /* add in objects */
49641+ err = copy_user_objs(ghash.first, s_tmp, role);
49642+
49643+ if (err)
49644+ return ERR_PTR(err);
49645+
49646+ /* set pointer for parent subject */
49647+ if (s_tmp->parent_subject) {
49648+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
49649+
49650+ if (IS_ERR(s_tmp2))
49651+ return s_tmp2;
49652+
49653+ s_tmp->parent_subject = s_tmp2;
49654+ }
49655+
49656+ /* add in ip acls */
49657+
49658+ if (!s_tmp->ip_num) {
49659+ s_tmp->ips = NULL;
49660+ goto insert;
49661+ }
49662+
49663+ i_tmp =
49664+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
49665+ sizeof (struct acl_ip_label *));
49666+
49667+ if (!i_tmp)
49668+ return ERR_PTR(-ENOMEM);
49669+
49670+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
49671+ *(i_tmp + i_num) =
49672+ (struct acl_ip_label *)
49673+ acl_alloc(sizeof (struct acl_ip_label));
49674+ if (!*(i_tmp + i_num))
49675+ return ERR_PTR(-ENOMEM);
49676+
49677+ if (copy_from_user
49678+ (&i_utmp2, s_tmp->ips + i_num,
49679+ sizeof (struct acl_ip_label *)))
49680+ return ERR_PTR(-EFAULT);
49681+
49682+ if (copy_from_user
49683+ (*(i_tmp + i_num), i_utmp2,
49684+ sizeof (struct acl_ip_label)))
49685+ return ERR_PTR(-EFAULT);
49686+
49687+ if ((*(i_tmp + i_num))->iface == NULL)
49688+ continue;
49689+
49690+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
49691+ if (!len || len >= IFNAMSIZ)
49692+ return ERR_PTR(-EINVAL);
49693+ tmp = acl_alloc(len);
49694+ if (tmp == NULL)
49695+ return ERR_PTR(-ENOMEM);
49696+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
49697+ return ERR_PTR(-EFAULT);
49698+ (*(i_tmp + i_num))->iface = tmp;
49699+ }
49700+
49701+ s_tmp->ips = i_tmp;
49702+
49703+insert:
49704+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
49705+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
49706+ return ERR_PTR(-ENOMEM);
49707+
49708+ return s_tmp;
49709+}
49710+
49711+static int
49712+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
49713+{
49714+ struct acl_subject_label s_pre;
49715+ struct acl_subject_label * ret;
49716+ int err;
49717+
49718+ while (userp) {
49719+ if (copy_from_user(&s_pre, userp,
49720+ sizeof (struct acl_subject_label)))
49721+ return -EFAULT;
49722+
49723+ /* do not add nested subjects here, add
49724+ while parsing objects
49725+ */
49726+
49727+ if (s_pre.mode & GR_NESTED) {
49728+ userp = s_pre.prev;
49729+ continue;
49730+ }
49731+
49732+ ret = do_copy_user_subj(userp, role);
49733+
49734+ err = PTR_ERR(ret);
49735+ if (IS_ERR(ret))
49736+ return err;
49737+
49738+ insert_acl_subj_label(ret, role);
49739+
49740+ userp = s_pre.prev;
49741+ }
49742+
49743+ return 0;
49744+}
49745+
49746+static int
49747+copy_user_acl(struct gr_arg *arg)
49748+{
49749+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
49750+ struct sprole_pw *sptmp;
49751+ struct gr_hash_struct *ghash;
49752+ uid_t *domainlist;
49753+ unsigned int r_num;
49754+ unsigned int len;
49755+ char *tmp;
49756+ int err = 0;
49757+ __u16 i;
49758+ __u32 num_subjs;
49759+
49760+ /* we need a default and kernel role */
49761+ if (arg->role_db.num_roles < 2)
49762+ return -EINVAL;
49763+
49764+ /* copy special role authentication info from userspace */
49765+
49766+ num_sprole_pws = arg->num_sprole_pws;
49767+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
49768+
49769+ if (!acl_special_roles) {
49770+ err = -ENOMEM;
49771+ goto cleanup;
49772+ }
49773+
49774+ for (i = 0; i < num_sprole_pws; i++) {
49775+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
49776+ if (!sptmp) {
49777+ err = -ENOMEM;
49778+ goto cleanup;
49779+ }
49780+ if (copy_from_user(sptmp, arg->sprole_pws + i,
49781+ sizeof (struct sprole_pw))) {
49782+ err = -EFAULT;
49783+ goto cleanup;
49784+ }
49785+
49786+ len =
49787+ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
49788+
49789+ if (!len || len >= GR_SPROLE_LEN) {
49790+ err = -EINVAL;
49791+ goto cleanup;
49792+ }
49793+
49794+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
49795+ err = -ENOMEM;
49796+ goto cleanup;
49797+ }
49798+
49799+ if (copy_from_user(tmp, sptmp->rolename, len)) {
49800+ err = -EFAULT;
49801+ goto cleanup;
49802+ }
49803+ tmp[len-1] = '\0';
49804+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49805+ printk(KERN_ALERT "Copying special role %s\n", tmp);
49806+#endif
49807+ sptmp->rolename = tmp;
49808+ acl_special_roles[i] = sptmp;
49809+ }
49810+
49811+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
49812+
49813+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
49814+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
49815+
49816+ if (!r_tmp) {
49817+ err = -ENOMEM;
49818+ goto cleanup;
49819+ }
49820+
49821+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
49822+ sizeof (struct acl_role_label *))) {
49823+ err = -EFAULT;
49824+ goto cleanup;
49825+ }
49826+
49827+ if (copy_from_user(r_tmp, r_utmp2,
49828+ sizeof (struct acl_role_label))) {
49829+ err = -EFAULT;
49830+ goto cleanup;
49831+ }
49832+
49833+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
49834+
49835+ if (!len || len >= PATH_MAX) {
49836+ err = -EINVAL;
49837+ goto cleanup;
49838+ }
49839+
49840+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
49841+ err = -ENOMEM;
49842+ goto cleanup;
49843+ }
49844+ if (copy_from_user(tmp, r_tmp->rolename, len)) {
49845+ err = -EFAULT;
49846+ goto cleanup;
49847+ }
49848+ tmp[len-1] = '\0';
49849+ r_tmp->rolename = tmp;
49850+
49851+ if (!strcmp(r_tmp->rolename, "default")
49852+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
49853+ default_role = r_tmp;
49854+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
49855+ kernel_role = r_tmp;
49856+ }
49857+
49858+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
49859+ err = -ENOMEM;
49860+ goto cleanup;
49861+ }
49862+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
49863+ err = -EFAULT;
49864+ goto cleanup;
49865+ }
49866+
49867+ r_tmp->hash = ghash;
49868+
49869+ num_subjs = count_user_subjs(r_tmp->hash->first);
49870+
49871+ r_tmp->subj_hash_size = num_subjs;
49872+ r_tmp->subj_hash =
49873+ (struct acl_subject_label **)
49874+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
49875+
49876+ if (!r_tmp->subj_hash) {
49877+ err = -ENOMEM;
49878+ goto cleanup;
49879+ }
49880+
49881+ err = copy_user_allowedips(r_tmp);
49882+ if (err)
49883+ goto cleanup;
49884+
49885+ /* copy domain info */
49886+ if (r_tmp->domain_children != NULL) {
49887+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
49888+ if (domainlist == NULL) {
49889+ err = -ENOMEM;
49890+ goto cleanup;
49891+ }
49892+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
49893+ err = -EFAULT;
49894+ goto cleanup;
49895+ }
49896+ r_tmp->domain_children = domainlist;
49897+ }
49898+
49899+ err = copy_user_transitions(r_tmp);
49900+ if (err)
49901+ goto cleanup;
49902+
49903+ memset(r_tmp->subj_hash, 0,
49904+ r_tmp->subj_hash_size *
49905+ sizeof (struct acl_subject_label *));
49906+
49907+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
49908+
49909+ if (err)
49910+ goto cleanup;
49911+
49912+ /* set nested subject list to null */
49913+ r_tmp->hash->first = NULL;
49914+
49915+ insert_acl_role_label(r_tmp);
49916+ }
49917+
49918+ goto return_err;
49919+ cleanup:
49920+ free_variables();
49921+ return_err:
49922+ return err;
49923+
49924+}
49925+
49926+static int
49927+gracl_init(struct gr_arg *args)
49928+{
49929+ int error = 0;
49930+
49931+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
49932+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
49933+
49934+ if (init_variables(args)) {
49935+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
49936+ error = -ENOMEM;
49937+ free_variables();
49938+ goto out;
49939+ }
49940+
49941+ error = copy_user_acl(args);
49942+ free_init_variables();
49943+ if (error) {
49944+ free_variables();
49945+ goto out;
49946+ }
49947+
49948+ if ((error = gr_set_acls(0))) {
49949+ free_variables();
49950+ goto out;
49951+ }
49952+
49953+ pax_open_kernel();
49954+ gr_status |= GR_READY;
49955+ pax_close_kernel();
49956+
49957+ out:
49958+ return error;
49959+}
49960+
49961+/* derived from glibc fnmatch() 0: match, 1: no match*/
49962+
49963+static int
49964+glob_match(const char *p, const char *n)
49965+{
49966+ char c;
49967+
49968+ while ((c = *p++) != '\0') {
49969+ switch (c) {
49970+ case '?':
49971+ if (*n == '\0')
49972+ return 1;
49973+ else if (*n == '/')
49974+ return 1;
49975+ break;
49976+ case '\\':
49977+ if (*n != c)
49978+ return 1;
49979+ break;
49980+ case '*':
49981+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
49982+ if (*n == '/')
49983+ return 1;
49984+ else if (c == '?') {
49985+ if (*n == '\0')
49986+ return 1;
49987+ else
49988+ ++n;
49989+ }
49990+ }
49991+ if (c == '\0') {
49992+ return 0;
49993+ } else {
49994+ const char *endp;
49995+
49996+ if ((endp = strchr(n, '/')) == NULL)
49997+ endp = n + strlen(n);
49998+
49999+ if (c == '[') {
50000+ for (--p; n < endp; ++n)
50001+ if (!glob_match(p, n))
50002+ return 0;
50003+ } else if (c == '/') {
50004+ while (*n != '\0' && *n != '/')
50005+ ++n;
50006+ if (*n == '/' && !glob_match(p, n + 1))
50007+ return 0;
50008+ } else {
50009+ for (--p; n < endp; ++n)
50010+ if (*n == c && !glob_match(p, n))
50011+ return 0;
50012+ }
50013+
50014+ return 1;
50015+ }
50016+ case '[':
50017+ {
50018+ int not;
50019+ char cold;
50020+
50021+ if (*n == '\0' || *n == '/')
50022+ return 1;
50023+
50024+ not = (*p == '!' || *p == '^');
50025+ if (not)
50026+ ++p;
50027+
50028+ c = *p++;
50029+ for (;;) {
50030+ unsigned char fn = (unsigned char)*n;
50031+
50032+ if (c == '\0')
50033+ return 1;
50034+ else {
50035+ if (c == fn)
50036+ goto matched;
50037+ cold = c;
50038+ c = *p++;
50039+
50040+ if (c == '-' && *p != ']') {
50041+ unsigned char cend = *p++;
50042+
50043+ if (cend == '\0')
50044+ return 1;
50045+
50046+ if (cold <= fn && fn <= cend)
50047+ goto matched;
50048+
50049+ c = *p++;
50050+ }
50051+ }
50052+
50053+ if (c == ']')
50054+ break;
50055+ }
50056+ if (!not)
50057+ return 1;
50058+ break;
50059+ matched:
50060+ while (c != ']') {
50061+ if (c == '\0')
50062+ return 1;
50063+
50064+ c = *p++;
50065+ }
50066+ if (not)
50067+ return 1;
50068+ }
50069+ break;
50070+ default:
50071+ if (c != *n)
50072+ return 1;
50073+ }
50074+
50075+ ++n;
50076+ }
50077+
50078+ if (*n == '\0')
50079+ return 0;
50080+
50081+ if (*n == '/')
50082+ return 0;
50083+
50084+ return 1;
50085+}
50086+
50087+static struct acl_object_label *
50088+chk_glob_label(struct acl_object_label *globbed,
50089+ struct dentry *dentry, struct vfsmount *mnt, char **path)
50090+{
50091+ struct acl_object_label *tmp;
50092+
50093+ if (*path == NULL)
50094+ *path = gr_to_filename_nolock(dentry, mnt);
50095+
50096+ tmp = globbed;
50097+
50098+ while (tmp) {
50099+ if (!glob_match(tmp->filename, *path))
50100+ return tmp;
50101+ tmp = tmp->next;
50102+ }
50103+
50104+ return NULL;
50105+}
50106+
50107+static struct acl_object_label *
50108+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
50109+ const ino_t curr_ino, const dev_t curr_dev,
50110+ const struct acl_subject_label *subj, char **path, const int checkglob)
50111+{
50112+ struct acl_subject_label *tmpsubj;
50113+ struct acl_object_label *retval;
50114+ struct acl_object_label *retval2;
50115+
50116+ tmpsubj = (struct acl_subject_label *) subj;
50117+ read_lock(&gr_inode_lock);
50118+ do {
50119+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
50120+ if (retval) {
50121+ if (checkglob && retval->globbed) {
50122+ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
50123+ (struct vfsmount *)orig_mnt, path);
50124+ if (retval2)
50125+ retval = retval2;
50126+ }
50127+ break;
50128+ }
50129+ } while ((tmpsubj = tmpsubj->parent_subject));
50130+ read_unlock(&gr_inode_lock);
50131+
50132+ return retval;
50133+}
50134+
50135+static __inline__ struct acl_object_label *
50136+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
50137+ struct dentry *curr_dentry,
50138+ const struct acl_subject_label *subj, char **path, const int checkglob)
50139+{
50140+ int newglob = checkglob;
50141+ ino_t inode;
50142+ dev_t device;
50143+
50144+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
50145+ as we don't want a / * rule to match instead of the / object
50146+ don't do this for create lookups that call this function though, since they're looking up
50147+ on the parent and thus need globbing checks on all paths
50148+ */
50149+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
50150+ newglob = GR_NO_GLOB;
50151+
50152+ spin_lock(&curr_dentry->d_lock);
50153+ inode = curr_dentry->d_inode->i_ino;
50154+ device = __get_dev(curr_dentry);
50155+ spin_unlock(&curr_dentry->d_lock);
50156+
50157+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
50158+}
50159+
50160+static struct acl_object_label *
50161+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50162+ const struct acl_subject_label *subj, char *path, const int checkglob)
50163+{
50164+ struct dentry *dentry = (struct dentry *) l_dentry;
50165+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
50166+ struct acl_object_label *retval;
50167+ struct dentry *parent;
50168+
50169+ write_seqlock(&rename_lock);
50170+ br_read_lock(vfsmount_lock);
50171+
50172+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
50173+#ifdef CONFIG_NET
50174+ mnt == sock_mnt ||
50175+#endif
50176+#ifdef CONFIG_HUGETLBFS
50177+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
50178+#endif
50179+ /* ignore Eric Biederman */
50180+ IS_PRIVATE(l_dentry->d_inode))) {
50181+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
50182+ goto out;
50183+ }
50184+
50185+ for (;;) {
50186+ if (dentry == real_root.dentry && mnt == real_root.mnt)
50187+ break;
50188+
50189+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
50190+ if (mnt->mnt_parent == mnt)
50191+ break;
50192+
50193+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
50194+ if (retval != NULL)
50195+ goto out;
50196+
50197+ dentry = mnt->mnt_mountpoint;
50198+ mnt = mnt->mnt_parent;
50199+ continue;
50200+ }
50201+
50202+ parent = dentry->d_parent;
50203+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
50204+ if (retval != NULL)
50205+ goto out;
50206+
50207+ dentry = parent;
50208+ }
50209+
50210+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
50211+
50212+ /* real_root is pinned so we don't have to hold a reference */
50213+ if (retval == NULL)
50214+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
50215+out:
50216+ br_read_unlock(vfsmount_lock);
50217+ write_sequnlock(&rename_lock);
50218+
50219+ BUG_ON(retval == NULL);
50220+
50221+ return retval;
50222+}
50223+
50224+static __inline__ struct acl_object_label *
50225+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50226+ const struct acl_subject_label *subj)
50227+{
50228+ char *path = NULL;
50229+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
50230+}
50231+
50232+static __inline__ struct acl_object_label *
50233+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50234+ const struct acl_subject_label *subj)
50235+{
50236+ char *path = NULL;
50237+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
50238+}
50239+
50240+static __inline__ struct acl_object_label *
50241+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50242+ const struct acl_subject_label *subj, char *path)
50243+{
50244+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
50245+}
50246+
50247+static struct acl_subject_label *
50248+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
50249+ const struct acl_role_label *role)
50250+{
50251+ struct dentry *dentry = (struct dentry *) l_dentry;
50252+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
50253+ struct acl_subject_label *retval;
50254+ struct dentry *parent;
50255+
50256+ write_seqlock(&rename_lock);
50257+ br_read_lock(vfsmount_lock);
50258+
50259+ for (;;) {
50260+ if (dentry == real_root.dentry && mnt == real_root.mnt)
50261+ break;
50262+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
50263+ if (mnt->mnt_parent == mnt)
50264+ break;
50265+
50266+ spin_lock(&dentry->d_lock);
50267+ read_lock(&gr_inode_lock);
50268+ retval =
50269+ lookup_acl_subj_label(dentry->d_inode->i_ino,
50270+ __get_dev(dentry), role);
50271+ read_unlock(&gr_inode_lock);
50272+ spin_unlock(&dentry->d_lock);
50273+ if (retval != NULL)
50274+ goto out;
50275+
50276+ dentry = mnt->mnt_mountpoint;
50277+ mnt = mnt->mnt_parent;
50278+ continue;
50279+ }
50280+
50281+ spin_lock(&dentry->d_lock);
50282+ read_lock(&gr_inode_lock);
50283+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
50284+ __get_dev(dentry), role);
50285+ read_unlock(&gr_inode_lock);
50286+ parent = dentry->d_parent;
50287+ spin_unlock(&dentry->d_lock);
50288+
50289+ if (retval != NULL)
50290+ goto out;
50291+
50292+ dentry = parent;
50293+ }
50294+
50295+ spin_lock(&dentry->d_lock);
50296+ read_lock(&gr_inode_lock);
50297+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
50298+ __get_dev(dentry), role);
50299+ read_unlock(&gr_inode_lock);
50300+ spin_unlock(&dentry->d_lock);
50301+
50302+ if (unlikely(retval == NULL)) {
50303+ /* real_root is pinned, we don't need to hold a reference */
50304+ read_lock(&gr_inode_lock);
50305+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
50306+ __get_dev(real_root.dentry), role);
50307+ read_unlock(&gr_inode_lock);
50308+ }
50309+out:
50310+ br_read_unlock(vfsmount_lock);
50311+ write_sequnlock(&rename_lock);
50312+
50313+ BUG_ON(retval == NULL);
50314+
50315+ return retval;
50316+}
50317+
50318+static void
50319+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
50320+{
50321+ struct task_struct *task = current;
50322+ const struct cred *cred = current_cred();
50323+
50324+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
50325+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
50326+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
50327+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
50328+
50329+ return;
50330+}
50331+
50332+static void
50333+gr_log_learn_sysctl(const char *path, const __u32 mode)
50334+{
50335+ struct task_struct *task = current;
50336+ const struct cred *cred = current_cred();
50337+
50338+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
50339+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
50340+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
50341+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
50342+
50343+ return;
50344+}
50345+
50346+static void
50347+gr_log_learn_id_change(const char type, const unsigned int real,
50348+ const unsigned int effective, const unsigned int fs)
50349+{
50350+ struct task_struct *task = current;
50351+ const struct cred *cred = current_cred();
50352+
50353+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
50354+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
50355+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
50356+ type, real, effective, fs, &task->signal->saved_ip);
50357+
50358+ return;
50359+}
50360+
50361+__u32
50362+gr_search_file(const struct dentry * dentry, const __u32 mode,
50363+ const struct vfsmount * mnt)
50364+{
50365+ __u32 retval = mode;
50366+ struct acl_subject_label *curracl;
50367+ struct acl_object_label *currobj;
50368+
50369+ if (unlikely(!(gr_status & GR_READY)))
50370+ return (mode & ~GR_AUDITS);
50371+
50372+ curracl = current->acl;
50373+
50374+ currobj = chk_obj_label(dentry, mnt, curracl);
50375+ retval = currobj->mode & mode;
50376+
50377+ /* if we're opening a specified transfer file for writing
50378+ (e.g. /dev/initctl), then transfer our role to init
50379+ */
50380+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
50381+ current->role->roletype & GR_ROLE_PERSIST)) {
50382+ struct task_struct *task = init_pid_ns.child_reaper;
50383+
50384+ if (task->role != current->role) {
50385+ task->acl_sp_role = 0;
50386+ task->acl_role_id = current->acl_role_id;
50387+ task->role = current->role;
50388+ rcu_read_lock();
50389+ read_lock(&grsec_exec_file_lock);
50390+ gr_apply_subject_to_task(task);
50391+ read_unlock(&grsec_exec_file_lock);
50392+ rcu_read_unlock();
50393+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
50394+ }
50395+ }
50396+
50397+ if (unlikely
50398+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
50399+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
50400+ __u32 new_mode = mode;
50401+
50402+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
50403+
50404+ retval = new_mode;
50405+
50406+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
50407+ new_mode |= GR_INHERIT;
50408+
50409+ if (!(mode & GR_NOLEARN))
50410+ gr_log_learn(dentry, mnt, new_mode);
50411+ }
50412+
50413+ return retval;
50414+}
50415+
50416+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
50417+ const struct dentry *parent,
50418+ const struct vfsmount *mnt)
50419+{
50420+ struct name_entry *match;
50421+ struct acl_object_label *matchpo;
50422+ struct acl_subject_label *curracl;
50423+ char *path;
50424+
50425+ if (unlikely(!(gr_status & GR_READY)))
50426+ return NULL;
50427+
50428+ preempt_disable();
50429+ path = gr_to_filename_rbac(new_dentry, mnt);
50430+ match = lookup_name_entry_create(path);
50431+
50432+ curracl = current->acl;
50433+
50434+ if (match) {
50435+ read_lock(&gr_inode_lock);
50436+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
50437+ read_unlock(&gr_inode_lock);
50438+
50439+ if (matchpo) {
50440+ preempt_enable();
50441+ return matchpo;
50442+ }
50443+ }
50444+
50445+ // lookup parent
50446+
50447+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
50448+
50449+ preempt_enable();
50450+ return matchpo;
50451+}
50452+
50453+__u32
50454+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
50455+ const struct vfsmount * mnt, const __u32 mode)
50456+{
50457+ struct acl_object_label *matchpo;
50458+ __u32 retval;
50459+
50460+ if (unlikely(!(gr_status & GR_READY)))
50461+ return (mode & ~GR_AUDITS);
50462+
50463+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
50464+
50465+ retval = matchpo->mode & mode;
50466+
50467+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
50468+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
50469+ __u32 new_mode = mode;
50470+
50471+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
50472+
50473+ gr_log_learn(new_dentry, mnt, new_mode);
50474+ return new_mode;
50475+ }
50476+
50477+ return retval;
50478+}
50479+
50480+__u32
50481+gr_check_link(const struct dentry * new_dentry,
50482+ const struct dentry * parent_dentry,
50483+ const struct vfsmount * parent_mnt,
50484+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
50485+{
50486+ struct acl_object_label *obj;
50487+ __u32 oldmode, newmode;
50488+ __u32 needmode;
50489+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
50490+ GR_DELETE | GR_INHERIT;
50491+
50492+ if (unlikely(!(gr_status & GR_READY)))
50493+ return (GR_CREATE | GR_LINK);
50494+
50495+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
50496+ oldmode = obj->mode;
50497+
50498+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
50499+ newmode = obj->mode;
50500+
50501+ needmode = newmode & checkmodes;
50502+
50503+ // old name for hardlink must have at least the permissions of the new name
50504+ if ((oldmode & needmode) != needmode)
50505+ goto bad;
50506+
50507+ // if old name had restrictions/auditing, make sure the new name does as well
50508+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
50509+
50510+ // don't allow hardlinking of suid/sgid files without permission
50511+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
50512+ needmode |= GR_SETID;
50513+
50514+ if ((newmode & needmode) != needmode)
50515+ goto bad;
50516+
50517+ // enforce minimum permissions
50518+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
50519+ return newmode;
50520+bad:
50521+ needmode = oldmode;
50522+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
50523+ needmode |= GR_SETID;
50524+
50525+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
50526+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
50527+ return (GR_CREATE | GR_LINK);
50528+ } else if (newmode & GR_SUPPRESS)
50529+ return GR_SUPPRESS;
50530+ else
50531+ return 0;
50532+}
50533+
50534+int
50535+gr_check_hidden_task(const struct task_struct *task)
50536+{
50537+ if (unlikely(!(gr_status & GR_READY)))
50538+ return 0;
50539+
50540+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
50541+ return 1;
50542+
50543+ return 0;
50544+}
50545+
50546+int
50547+gr_check_protected_task(const struct task_struct *task)
50548+{
50549+ if (unlikely(!(gr_status & GR_READY) || !task))
50550+ return 0;
50551+
50552+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
50553+ task->acl != current->acl)
50554+ return 1;
50555+
50556+ return 0;
50557+}
50558+
50559+int
50560+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
50561+{
50562+ struct task_struct *p;
50563+ int ret = 0;
50564+
50565+ if (unlikely(!(gr_status & GR_READY) || !pid))
50566+ return ret;
50567+
50568+ read_lock(&tasklist_lock);
50569+ do_each_pid_task(pid, type, p) {
50570+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
50571+ p->acl != current->acl) {
50572+ ret = 1;
50573+ goto out;
50574+ }
50575+ } while_each_pid_task(pid, type, p);
50576+out:
50577+ read_unlock(&tasklist_lock);
50578+
50579+ return ret;
50580+}
50581+
50582+void
50583+gr_copy_label(struct task_struct *tsk)
50584+{
50585+ tsk->signal->used_accept = 0;
50586+ tsk->acl_sp_role = 0;
50587+ tsk->acl_role_id = current->acl_role_id;
50588+ tsk->acl = current->acl;
50589+ tsk->role = current->role;
50590+ tsk->signal->curr_ip = current->signal->curr_ip;
50591+ tsk->signal->saved_ip = current->signal->saved_ip;
50592+ if (current->exec_file)
50593+ get_file(current->exec_file);
50594+ tsk->exec_file = current->exec_file;
50595+ tsk->is_writable = current->is_writable;
50596+ if (unlikely(current->signal->used_accept)) {
50597+ current->signal->curr_ip = 0;
50598+ current->signal->saved_ip = 0;
50599+ }
50600+
50601+ return;
50602+}
50603+
50604+static void
50605+gr_set_proc_res(struct task_struct *task)
50606+{
50607+ struct acl_subject_label *proc;
50608+ unsigned short i;
50609+
50610+ proc = task->acl;
50611+
50612+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
50613+ return;
50614+
50615+ for (i = 0; i < RLIM_NLIMITS; i++) {
50616+ if (!(proc->resmask & (1 << i)))
50617+ continue;
50618+
50619+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
50620+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
50621+ }
50622+
50623+ return;
50624+}
50625+
50626+extern int __gr_process_user_ban(struct user_struct *user);
50627+
50628+int
50629+gr_check_user_change(int real, int effective, int fs)
50630+{
50631+ unsigned int i;
50632+ __u16 num;
50633+ uid_t *uidlist;
50634+ int curuid;
50635+ int realok = 0;
50636+ int effectiveok = 0;
50637+ int fsok = 0;
50638+
50639+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
50640+ struct user_struct *user;
50641+
50642+ if (real == -1)
50643+ goto skipit;
50644+
50645+ user = find_user(real);
50646+ if (user == NULL)
50647+ goto skipit;
50648+
50649+ if (__gr_process_user_ban(user)) {
50650+ /* for find_user */
50651+ free_uid(user);
50652+ return 1;
50653+ }
50654+
50655+ /* for find_user */
50656+ free_uid(user);
50657+
50658+skipit:
50659+#endif
50660+
50661+ if (unlikely(!(gr_status & GR_READY)))
50662+ return 0;
50663+
50664+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
50665+ gr_log_learn_id_change('u', real, effective, fs);
50666+
50667+ num = current->acl->user_trans_num;
50668+ uidlist = current->acl->user_transitions;
50669+
50670+ if (uidlist == NULL)
50671+ return 0;
50672+
50673+ if (real == -1)
50674+ realok = 1;
50675+ if (effective == -1)
50676+ effectiveok = 1;
50677+ if (fs == -1)
50678+ fsok = 1;
50679+
50680+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
50681+ for (i = 0; i < num; i++) {
50682+ curuid = (int)uidlist[i];
50683+ if (real == curuid)
50684+ realok = 1;
50685+ if (effective == curuid)
50686+ effectiveok = 1;
50687+ if (fs == curuid)
50688+ fsok = 1;
50689+ }
50690+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
50691+ for (i = 0; i < num; i++) {
50692+ curuid = (int)uidlist[i];
50693+ if (real == curuid)
50694+ break;
50695+ if (effective == curuid)
50696+ break;
50697+ if (fs == curuid)
50698+ break;
50699+ }
50700+ /* not in deny list */
50701+ if (i == num) {
50702+ realok = 1;
50703+ effectiveok = 1;
50704+ fsok = 1;
50705+ }
50706+ }
50707+
50708+ if (realok && effectiveok && fsok)
50709+ return 0;
50710+ else {
50711+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
50712+ return 1;
50713+ }
50714+}
50715+
50716+int
50717+gr_check_group_change(int real, int effective, int fs)
50718+{
50719+ unsigned int i;
50720+ __u16 num;
50721+ gid_t *gidlist;
50722+ int curgid;
50723+ int realok = 0;
50724+ int effectiveok = 0;
50725+ int fsok = 0;
50726+
50727+ if (unlikely(!(gr_status & GR_READY)))
50728+ return 0;
50729+
50730+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
50731+ gr_log_learn_id_change('g', real, effective, fs);
50732+
50733+ num = current->acl->group_trans_num;
50734+ gidlist = current->acl->group_transitions;
50735+
50736+ if (gidlist == NULL)
50737+ return 0;
50738+
50739+ if (real == -1)
50740+ realok = 1;
50741+ if (effective == -1)
50742+ effectiveok = 1;
50743+ if (fs == -1)
50744+ fsok = 1;
50745+
50746+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
50747+ for (i = 0; i < num; i++) {
50748+ curgid = (int)gidlist[i];
50749+ if (real == curgid)
50750+ realok = 1;
50751+ if (effective == curgid)
50752+ effectiveok = 1;
50753+ if (fs == curgid)
50754+ fsok = 1;
50755+ }
50756+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
50757+ for (i = 0; i < num; i++) {
50758+ curgid = (int)gidlist[i];
50759+ if (real == curgid)
50760+ break;
50761+ if (effective == curgid)
50762+ break;
50763+ if (fs == curgid)
50764+ break;
50765+ }
50766+ /* not in deny list */
50767+ if (i == num) {
50768+ realok = 1;
50769+ effectiveok = 1;
50770+ fsok = 1;
50771+ }
50772+ }
50773+
50774+ if (realok && effectiveok && fsok)
50775+ return 0;
50776+ else {
50777+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
50778+ return 1;
50779+ }
50780+}
50781+
50782+void
50783+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
50784+{
50785+ struct acl_role_label *role = task->role;
50786+ struct acl_subject_label *subj = NULL;
50787+ struct acl_object_label *obj;
50788+ struct file *filp;
50789+
50790+ if (unlikely(!(gr_status & GR_READY)))
50791+ return;
50792+
50793+ filp = task->exec_file;
50794+
50795+ /* kernel process, we'll give them the kernel role */
50796+ if (unlikely(!filp)) {
50797+ task->role = kernel_role;
50798+ task->acl = kernel_role->root_label;
50799+ return;
50800+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
50801+ role = lookup_acl_role_label(task, uid, gid);
50802+
50803+ /* perform subject lookup in possibly new role
50804+ we can use this result below in the case where role == task->role
50805+ */
50806+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
50807+
50808+ /* if we changed uid/gid, but result in the same role
50809+ and are using inheritance, don't lose the inherited subject
50810+ if current subject is other than what normal lookup
50811+ would result in, we arrived via inheritance, don't
50812+ lose subject
50813+ */
50814+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
50815+ (subj == task->acl)))
50816+ task->acl = subj;
50817+
50818+ task->role = role;
50819+
50820+ task->is_writable = 0;
50821+
50822+ /* ignore additional mmap checks for processes that are writable
50823+ by the default ACL */
50824+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50825+ if (unlikely(obj->mode & GR_WRITE))
50826+ task->is_writable = 1;
50827+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
50828+ if (unlikely(obj->mode & GR_WRITE))
50829+ task->is_writable = 1;
50830+
50831+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50832+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
50833+#endif
50834+
50835+ gr_set_proc_res(task);
50836+
50837+ return;
50838+}
50839+
50840+int
50841+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
50842+ const int unsafe_flags)
50843+{
50844+ struct task_struct *task = current;
50845+ struct acl_subject_label *newacl;
50846+ struct acl_object_label *obj;
50847+ __u32 retmode;
50848+
50849+ if (unlikely(!(gr_status & GR_READY)))
50850+ return 0;
50851+
50852+ newacl = chk_subj_label(dentry, mnt, task->role);
50853+
50854+ task_lock(task);
50855+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
50856+ !(task->role->roletype & GR_ROLE_GOD) &&
50857+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
50858+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
50859+ task_unlock(task);
50860+ if (unsafe_flags & LSM_UNSAFE_SHARE)
50861+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
50862+ else
50863+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
50864+ return -EACCES;
50865+ }
50866+ task_unlock(task);
50867+
50868+ obj = chk_obj_label(dentry, mnt, task->acl);
50869+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
50870+
50871+ if (!(task->acl->mode & GR_INHERITLEARN) &&
50872+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
50873+ if (obj->nested)
50874+ task->acl = obj->nested;
50875+ else
50876+ task->acl = newacl;
50877+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
50878+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
50879+
50880+ task->is_writable = 0;
50881+
50882+ /* ignore additional mmap checks for processes that are writable
50883+ by the default ACL */
50884+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
50885+ if (unlikely(obj->mode & GR_WRITE))
50886+ task->is_writable = 1;
50887+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
50888+ if (unlikely(obj->mode & GR_WRITE))
50889+ task->is_writable = 1;
50890+
50891+ gr_set_proc_res(task);
50892+
50893+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50894+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
50895+#endif
50896+ return 0;
50897+}
50898+
50899+/* always called with valid inodev ptr */
50900+static void
50901+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
50902+{
50903+ struct acl_object_label *matchpo;
50904+ struct acl_subject_label *matchps;
50905+ struct acl_subject_label *subj;
50906+ struct acl_role_label *role;
50907+ unsigned int x;
50908+
50909+ FOR_EACH_ROLE_START(role)
50910+ FOR_EACH_SUBJECT_START(role, subj, x)
50911+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
50912+ matchpo->mode |= GR_DELETED;
50913+ FOR_EACH_SUBJECT_END(subj,x)
50914+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
50915+ if (subj->inode == ino && subj->device == dev)
50916+ subj->mode |= GR_DELETED;
50917+ FOR_EACH_NESTED_SUBJECT_END(subj)
50918+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
50919+ matchps->mode |= GR_DELETED;
50920+ FOR_EACH_ROLE_END(role)
50921+
50922+ inodev->nentry->deleted = 1;
50923+
50924+ return;
50925+}
50926+
50927+void
50928+gr_handle_delete(const ino_t ino, const dev_t dev)
50929+{
50930+ struct inodev_entry *inodev;
50931+
50932+ if (unlikely(!(gr_status & GR_READY)))
50933+ return;
50934+
50935+ write_lock(&gr_inode_lock);
50936+ inodev = lookup_inodev_entry(ino, dev);
50937+ if (inodev != NULL)
50938+ do_handle_delete(inodev, ino, dev);
50939+ write_unlock(&gr_inode_lock);
50940+
50941+ return;
50942+}
50943+
50944+static void
50945+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
50946+ const ino_t newinode, const dev_t newdevice,
50947+ struct acl_subject_label *subj)
50948+{
50949+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
50950+ struct acl_object_label *match;
50951+
50952+ match = subj->obj_hash[index];
50953+
50954+ while (match && (match->inode != oldinode ||
50955+ match->device != olddevice ||
50956+ !(match->mode & GR_DELETED)))
50957+ match = match->next;
50958+
50959+ if (match && (match->inode == oldinode)
50960+ && (match->device == olddevice)
50961+ && (match->mode & GR_DELETED)) {
50962+ if (match->prev == NULL) {
50963+ subj->obj_hash[index] = match->next;
50964+ if (match->next != NULL)
50965+ match->next->prev = NULL;
50966+ } else {
50967+ match->prev->next = match->next;
50968+ if (match->next != NULL)
50969+ match->next->prev = match->prev;
50970+ }
50971+ match->prev = NULL;
50972+ match->next = NULL;
50973+ match->inode = newinode;
50974+ match->device = newdevice;
50975+ match->mode &= ~GR_DELETED;
50976+
50977+ insert_acl_obj_label(match, subj);
50978+ }
50979+
50980+ return;
50981+}
50982+
50983+static void
50984+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
50985+ const ino_t newinode, const dev_t newdevice,
50986+ struct acl_role_label *role)
50987+{
50988+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
50989+ struct acl_subject_label *match;
50990+
50991+ match = role->subj_hash[index];
50992+
50993+ while (match && (match->inode != oldinode ||
50994+ match->device != olddevice ||
50995+ !(match->mode & GR_DELETED)))
50996+ match = match->next;
50997+
50998+ if (match && (match->inode == oldinode)
50999+ && (match->device == olddevice)
51000+ && (match->mode & GR_DELETED)) {
51001+ if (match->prev == NULL) {
51002+ role->subj_hash[index] = match->next;
51003+ if (match->next != NULL)
51004+ match->next->prev = NULL;
51005+ } else {
51006+ match->prev->next = match->next;
51007+ if (match->next != NULL)
51008+ match->next->prev = match->prev;
51009+ }
51010+ match->prev = NULL;
51011+ match->next = NULL;
51012+ match->inode = newinode;
51013+ match->device = newdevice;
51014+ match->mode &= ~GR_DELETED;
51015+
51016+ insert_acl_subj_label(match, role);
51017+ }
51018+
51019+ return;
51020+}
51021+
51022+static void
51023+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
51024+ const ino_t newinode, const dev_t newdevice)
51025+{
51026+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
51027+ struct inodev_entry *match;
51028+
51029+ match = inodev_set.i_hash[index];
51030+
51031+ while (match && (match->nentry->inode != oldinode ||
51032+ match->nentry->device != olddevice || !match->nentry->deleted))
51033+ match = match->next;
51034+
51035+ if (match && (match->nentry->inode == oldinode)
51036+ && (match->nentry->device == olddevice) &&
51037+ match->nentry->deleted) {
51038+ if (match->prev == NULL) {
51039+ inodev_set.i_hash[index] = match->next;
51040+ if (match->next != NULL)
51041+ match->next->prev = NULL;
51042+ } else {
51043+ match->prev->next = match->next;
51044+ if (match->next != NULL)
51045+ match->next->prev = match->prev;
51046+ }
51047+ match->prev = NULL;
51048+ match->next = NULL;
51049+ match->nentry->inode = newinode;
51050+ match->nentry->device = newdevice;
51051+ match->nentry->deleted = 0;
51052+
51053+ insert_inodev_entry(match);
51054+ }
51055+
51056+ return;
51057+}
51058+
51059+static void
51060+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
51061+{
51062+ struct acl_subject_label *subj;
51063+ struct acl_role_label *role;
51064+ unsigned int x;
51065+
51066+ FOR_EACH_ROLE_START(role)
51067+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
51068+
51069+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
51070+ if ((subj->inode == ino) && (subj->device == dev)) {
51071+ subj->inode = ino;
51072+ subj->device = dev;
51073+ }
51074+ FOR_EACH_NESTED_SUBJECT_END(subj)
51075+ FOR_EACH_SUBJECT_START(role, subj, x)
51076+ update_acl_obj_label(matchn->inode, matchn->device,
51077+ ino, dev, subj);
51078+ FOR_EACH_SUBJECT_END(subj,x)
51079+ FOR_EACH_ROLE_END(role)
51080+
51081+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
51082+
51083+ return;
51084+}
51085+
51086+static void
51087+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
51088+ const struct vfsmount *mnt)
51089+{
51090+ ino_t ino = dentry->d_inode->i_ino;
51091+ dev_t dev = __get_dev(dentry);
51092+
51093+ __do_handle_create(matchn, ino, dev);
51094+
51095+ return;
51096+}
51097+
51098+void
51099+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
51100+{
51101+ struct name_entry *matchn;
51102+
51103+ if (unlikely(!(gr_status & GR_READY)))
51104+ return;
51105+
51106+ preempt_disable();
51107+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
51108+
51109+ if (unlikely((unsigned long)matchn)) {
51110+ write_lock(&gr_inode_lock);
51111+ do_handle_create(matchn, dentry, mnt);
51112+ write_unlock(&gr_inode_lock);
51113+ }
51114+ preempt_enable();
51115+
51116+ return;
51117+}
51118+
51119+void
51120+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
51121+{
51122+ struct name_entry *matchn;
51123+
51124+ if (unlikely(!(gr_status & GR_READY)))
51125+ return;
51126+
51127+ preempt_disable();
51128+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
51129+
51130+ if (unlikely((unsigned long)matchn)) {
51131+ write_lock(&gr_inode_lock);
51132+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
51133+ write_unlock(&gr_inode_lock);
51134+ }
51135+ preempt_enable();
51136+
51137+ return;
51138+}
51139+
51140+void
51141+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
51142+ struct dentry *old_dentry,
51143+ struct dentry *new_dentry,
51144+ struct vfsmount *mnt, const __u8 replace)
51145+{
51146+ struct name_entry *matchn;
51147+ struct inodev_entry *inodev;
51148+ struct inode *inode = new_dentry->d_inode;
51149+ ino_t old_ino = old_dentry->d_inode->i_ino;
51150+ dev_t old_dev = __get_dev(old_dentry);
51151+
51152+ /* vfs_rename swaps the name and parent link for old_dentry and
51153+ new_dentry
51154+ at this point, old_dentry has the new name, parent link, and inode
51155+ for the renamed file
51156+ if a file is being replaced by a rename, new_dentry has the inode
51157+ and name for the replaced file
51158+ */
51159+
51160+ if (unlikely(!(gr_status & GR_READY)))
51161+ return;
51162+
51163+ preempt_disable();
51164+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
51165+
51166+ /* we wouldn't have to check d_inode if it weren't for
51167+ NFS silly-renaming
51168+ */
51169+
51170+ write_lock(&gr_inode_lock);
51171+ if (unlikely(replace && inode)) {
51172+ ino_t new_ino = inode->i_ino;
51173+ dev_t new_dev = __get_dev(new_dentry);
51174+
51175+ inodev = lookup_inodev_entry(new_ino, new_dev);
51176+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
51177+ do_handle_delete(inodev, new_ino, new_dev);
51178+ }
51179+
51180+ inodev = lookup_inodev_entry(old_ino, old_dev);
51181+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
51182+ do_handle_delete(inodev, old_ino, old_dev);
51183+
51184+ if (unlikely((unsigned long)matchn))
51185+ do_handle_create(matchn, old_dentry, mnt);
51186+
51187+ write_unlock(&gr_inode_lock);
51188+ preempt_enable();
51189+
51190+ return;
51191+}
51192+
51193+static int
51194+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
51195+ unsigned char **sum)
51196+{
51197+ struct acl_role_label *r;
51198+ struct role_allowed_ip *ipp;
51199+ struct role_transition *trans;
51200+ unsigned int i;
51201+ int found = 0;
51202+ u32 curr_ip = current->signal->curr_ip;
51203+
51204+ current->signal->saved_ip = curr_ip;
51205+
51206+ /* check transition table */
51207+
51208+ for (trans = current->role->transitions; trans; trans = trans->next) {
51209+ if (!strcmp(rolename, trans->rolename)) {
51210+ found = 1;
51211+ break;
51212+ }
51213+ }
51214+
51215+ if (!found)
51216+ return 0;
51217+
51218+ /* handle special roles that do not require authentication
51219+ and check ip */
51220+
51221+ FOR_EACH_ROLE_START(r)
51222+ if (!strcmp(rolename, r->rolename) &&
51223+ (r->roletype & GR_ROLE_SPECIAL)) {
51224+ found = 0;
51225+ if (r->allowed_ips != NULL) {
51226+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
51227+ if ((ntohl(curr_ip) & ipp->netmask) ==
51228+ (ntohl(ipp->addr) & ipp->netmask))
51229+ found = 1;
51230+ }
51231+ } else
51232+ found = 2;
51233+ if (!found)
51234+ return 0;
51235+
51236+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
51237+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
51238+ *salt = NULL;
51239+ *sum = NULL;
51240+ return 1;
51241+ }
51242+ }
51243+ FOR_EACH_ROLE_END(r)
51244+
51245+ for (i = 0; i < num_sprole_pws; i++) {
51246+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
51247+ *salt = acl_special_roles[i]->salt;
51248+ *sum = acl_special_roles[i]->sum;
51249+ return 1;
51250+ }
51251+ }
51252+
51253+ return 0;
51254+}
51255+
51256+static void
51257+assign_special_role(char *rolename)
51258+{
51259+ struct acl_object_label *obj;
51260+ struct acl_role_label *r;
51261+ struct acl_role_label *assigned = NULL;
51262+ struct task_struct *tsk;
51263+ struct file *filp;
51264+
51265+ FOR_EACH_ROLE_START(r)
51266+ if (!strcmp(rolename, r->rolename) &&
51267+ (r->roletype & GR_ROLE_SPECIAL)) {
51268+ assigned = r;
51269+ break;
51270+ }
51271+ FOR_EACH_ROLE_END(r)
51272+
51273+ if (!assigned)
51274+ return;
51275+
51276+ read_lock(&tasklist_lock);
51277+ read_lock(&grsec_exec_file_lock);
51278+
51279+ tsk = current->real_parent;
51280+ if (tsk == NULL)
51281+ goto out_unlock;
51282+
51283+ filp = tsk->exec_file;
51284+ if (filp == NULL)
51285+ goto out_unlock;
51286+
51287+ tsk->is_writable = 0;
51288+
51289+ tsk->acl_sp_role = 1;
51290+ tsk->acl_role_id = ++acl_sp_role_value;
51291+ tsk->role = assigned;
51292+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
51293+
51294+ /* ignore additional mmap checks for processes that are writable
51295+ by the default ACL */
51296+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
51297+ if (unlikely(obj->mode & GR_WRITE))
51298+ tsk->is_writable = 1;
51299+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
51300+ if (unlikely(obj->mode & GR_WRITE))
51301+ tsk->is_writable = 1;
51302+
51303+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51304+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
51305+#endif
51306+
51307+out_unlock:
51308+ read_unlock(&grsec_exec_file_lock);
51309+ read_unlock(&tasklist_lock);
51310+ return;
51311+}
51312+
51313+int gr_check_secure_terminal(struct task_struct *task)
51314+{
51315+ struct task_struct *p, *p2, *p3;
51316+ struct files_struct *files;
51317+ struct fdtable *fdt;
51318+ struct file *our_file = NULL, *file;
51319+ int i;
51320+
51321+ if (task->signal->tty == NULL)
51322+ return 1;
51323+
51324+ files = get_files_struct(task);
51325+ if (files != NULL) {
51326+ rcu_read_lock();
51327+ fdt = files_fdtable(files);
51328+ for (i=0; i < fdt->max_fds; i++) {
51329+ file = fcheck_files(files, i);
51330+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
51331+ get_file(file);
51332+ our_file = file;
51333+ }
51334+ }
51335+ rcu_read_unlock();
51336+ put_files_struct(files);
51337+ }
51338+
51339+ if (our_file == NULL)
51340+ return 1;
51341+
51342+ read_lock(&tasklist_lock);
51343+ do_each_thread(p2, p) {
51344+ files = get_files_struct(p);
51345+ if (files == NULL ||
51346+ (p->signal && p->signal->tty == task->signal->tty)) {
51347+ if (files != NULL)
51348+ put_files_struct(files);
51349+ continue;
51350+ }
51351+ rcu_read_lock();
51352+ fdt = files_fdtable(files);
51353+ for (i=0; i < fdt->max_fds; i++) {
51354+ file = fcheck_files(files, i);
51355+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
51356+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
51357+ p3 = task;
51358+ while (p3->pid > 0) {
51359+ if (p3 == p)
51360+ break;
51361+ p3 = p3->real_parent;
51362+ }
51363+ if (p3 == p)
51364+ break;
51365+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
51366+ gr_handle_alertkill(p);
51367+ rcu_read_unlock();
51368+ put_files_struct(files);
51369+ read_unlock(&tasklist_lock);
51370+ fput(our_file);
51371+ return 0;
51372+ }
51373+ }
51374+ rcu_read_unlock();
51375+ put_files_struct(files);
51376+ } while_each_thread(p2, p);
51377+ read_unlock(&tasklist_lock);
51378+
51379+ fput(our_file);
51380+ return 1;
51381+}
51382+
51383+ssize_t
51384+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
51385+{
51386+ struct gr_arg_wrapper uwrap;
51387+ unsigned char *sprole_salt = NULL;
51388+ unsigned char *sprole_sum = NULL;
51389+ int error = sizeof (struct gr_arg_wrapper);
51390+ int error2 = 0;
51391+
51392+ mutex_lock(&gr_dev_mutex);
51393+
51394+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
51395+ error = -EPERM;
51396+ goto out;
51397+ }
51398+
51399+ if (count != sizeof (struct gr_arg_wrapper)) {
51400+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
51401+ error = -EINVAL;
51402+ goto out;
51403+ }
51404+
51405+
51406+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
51407+ gr_auth_expires = 0;
51408+ gr_auth_attempts = 0;
51409+ }
51410+
51411+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
51412+ error = -EFAULT;
51413+ goto out;
51414+ }
51415+
51416+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
51417+ error = -EINVAL;
51418+ goto out;
51419+ }
51420+
51421+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
51422+ error = -EFAULT;
51423+ goto out;
51424+ }
51425+
51426+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
51427+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
51428+ time_after(gr_auth_expires, get_seconds())) {
51429+ error = -EBUSY;
51430+ goto out;
51431+ }
51432+
51433+ /* if non-root trying to do anything other than use a special role,
51434+ do not attempt authentication, do not count towards authentication
51435+ locking
51436+ */
51437+
51438+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
51439+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
51440+ current_uid()) {
51441+ error = -EPERM;
51442+ goto out;
51443+ }
51444+
51445+ /* ensure pw and special role name are null terminated */
51446+
51447+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
51448+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
51449+
51450+ /* Okay.
51451+ * We have our enough of the argument structure..(we have yet
51452+ * to copy_from_user the tables themselves) . Copy the tables
51453+ * only if we need them, i.e. for loading operations. */
51454+
51455+ switch (gr_usermode->mode) {
51456+ case GR_STATUS:
51457+ if (gr_status & GR_READY) {
51458+ error = 1;
51459+ if (!gr_check_secure_terminal(current))
51460+ error = 3;
51461+ } else
51462+ error = 2;
51463+ goto out;
51464+ case GR_SHUTDOWN:
51465+ if ((gr_status & GR_READY)
51466+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
51467+ pax_open_kernel();
51468+ gr_status &= ~GR_READY;
51469+ pax_close_kernel();
51470+
51471+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
51472+ free_variables();
51473+ memset(gr_usermode, 0, sizeof (struct gr_arg));
51474+ memset(gr_system_salt, 0, GR_SALT_LEN);
51475+ memset(gr_system_sum, 0, GR_SHA_LEN);
51476+ } else if (gr_status & GR_READY) {
51477+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
51478+ error = -EPERM;
51479+ } else {
51480+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
51481+ error = -EAGAIN;
51482+ }
51483+ break;
51484+ case GR_ENABLE:
51485+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
51486+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
51487+ else {
51488+ if (gr_status & GR_READY)
51489+ error = -EAGAIN;
51490+ else
51491+ error = error2;
51492+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
51493+ }
51494+ break;
51495+ case GR_RELOAD:
51496+ if (!(gr_status & GR_READY)) {
51497+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
51498+ error = -EAGAIN;
51499+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
51500+ preempt_disable();
51501+
51502+ pax_open_kernel();
51503+ gr_status &= ~GR_READY;
51504+ pax_close_kernel();
51505+
51506+ free_variables();
51507+ if (!(error2 = gracl_init(gr_usermode))) {
51508+ preempt_enable();
51509+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
51510+ } else {
51511+ preempt_enable();
51512+ error = error2;
51513+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
51514+ }
51515+ } else {
51516+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
51517+ error = -EPERM;
51518+ }
51519+ break;
51520+ case GR_SEGVMOD:
51521+ if (unlikely(!(gr_status & GR_READY))) {
51522+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
51523+ error = -EAGAIN;
51524+ break;
51525+ }
51526+
51527+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
51528+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
51529+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
51530+ struct acl_subject_label *segvacl;
51531+ segvacl =
51532+ lookup_acl_subj_label(gr_usermode->segv_inode,
51533+ gr_usermode->segv_device,
51534+ current->role);
51535+ if (segvacl) {
51536+ segvacl->crashes = 0;
51537+ segvacl->expires = 0;
51538+ }
51539+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
51540+ gr_remove_uid(gr_usermode->segv_uid);
51541+ }
51542+ } else {
51543+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
51544+ error = -EPERM;
51545+ }
51546+ break;
51547+ case GR_SPROLE:
51548+ case GR_SPROLEPAM:
51549+ if (unlikely(!(gr_status & GR_READY))) {
51550+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
51551+ error = -EAGAIN;
51552+ break;
51553+ }
51554+
51555+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
51556+ current->role->expires = 0;
51557+ current->role->auth_attempts = 0;
51558+ }
51559+
51560+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
51561+ time_after(current->role->expires, get_seconds())) {
51562+ error = -EBUSY;
51563+ goto out;
51564+ }
51565+
51566+ if (lookup_special_role_auth
51567+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
51568+ && ((!sprole_salt && !sprole_sum)
51569+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
51570+ char *p = "";
51571+ assign_special_role(gr_usermode->sp_role);
51572+ read_lock(&tasklist_lock);
51573+ if (current->real_parent)
51574+ p = current->real_parent->role->rolename;
51575+ read_unlock(&tasklist_lock);
51576+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
51577+ p, acl_sp_role_value);
51578+ } else {
51579+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
51580+ error = -EPERM;
51581+ if(!(current->role->auth_attempts++))
51582+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
51583+
51584+ goto out;
51585+ }
51586+ break;
51587+ case GR_UNSPROLE:
51588+ if (unlikely(!(gr_status & GR_READY))) {
51589+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
51590+ error = -EAGAIN;
51591+ break;
51592+ }
51593+
51594+ if (current->role->roletype & GR_ROLE_SPECIAL) {
51595+ char *p = "";
51596+ int i = 0;
51597+
51598+ read_lock(&tasklist_lock);
51599+ if (current->real_parent) {
51600+ p = current->real_parent->role->rolename;
51601+ i = current->real_parent->acl_role_id;
51602+ }
51603+ read_unlock(&tasklist_lock);
51604+
51605+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
51606+ gr_set_acls(1);
51607+ } else {
51608+ error = -EPERM;
51609+ goto out;
51610+ }
51611+ break;
51612+ default:
51613+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
51614+ error = -EINVAL;
51615+ break;
51616+ }
51617+
51618+ if (error != -EPERM)
51619+ goto out;
51620+
51621+ if(!(gr_auth_attempts++))
51622+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
51623+
51624+ out:
51625+ mutex_unlock(&gr_dev_mutex);
51626+ return error;
51627+}
51628+
51629+/* must be called with
51630+ rcu_read_lock();
51631+ read_lock(&tasklist_lock);
51632+ read_lock(&grsec_exec_file_lock);
51633+*/
51634+int gr_apply_subject_to_task(struct task_struct *task)
51635+{
51636+ struct acl_object_label *obj;
51637+ char *tmpname;
51638+ struct acl_subject_label *tmpsubj;
51639+ struct file *filp;
51640+ struct name_entry *nmatch;
51641+
51642+ filp = task->exec_file;
51643+ if (filp == NULL)
51644+ return 0;
51645+
51646+ /* the following is to apply the correct subject
51647+ on binaries running when the RBAC system
51648+ is enabled, when the binaries have been
51649+ replaced or deleted since their execution
51650+ -----
51651+ when the RBAC system starts, the inode/dev
51652+ from exec_file will be one the RBAC system
51653+ is unaware of. It only knows the inode/dev
51654+ of the present file on disk, or the absence
51655+ of it.
51656+ */
51657+ preempt_disable();
51658+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
51659+
51660+ nmatch = lookup_name_entry(tmpname);
51661+ preempt_enable();
51662+ tmpsubj = NULL;
51663+ if (nmatch) {
51664+ if (nmatch->deleted)
51665+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
51666+ else
51667+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
51668+ if (tmpsubj != NULL)
51669+ task->acl = tmpsubj;
51670+ }
51671+ if (tmpsubj == NULL)
51672+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
51673+ task->role);
51674+ if (task->acl) {
51675+ task->is_writable = 0;
51676+ /* ignore additional mmap checks for processes that are writable
51677+ by the default ACL */
51678+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
51679+ if (unlikely(obj->mode & GR_WRITE))
51680+ task->is_writable = 1;
51681+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
51682+ if (unlikely(obj->mode & GR_WRITE))
51683+ task->is_writable = 1;
51684+
51685+ gr_set_proc_res(task);
51686+
51687+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51688+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
51689+#endif
51690+ } else {
51691+ return 1;
51692+ }
51693+
51694+ return 0;
51695+}
51696+
51697+int
51698+gr_set_acls(const int type)
51699+{
51700+ struct task_struct *task, *task2;
51701+ struct acl_role_label *role = current->role;
51702+ __u16 acl_role_id = current->acl_role_id;
51703+ const struct cred *cred;
51704+ int ret;
51705+
51706+ rcu_read_lock();
51707+ read_lock(&tasklist_lock);
51708+ read_lock(&grsec_exec_file_lock);
51709+ do_each_thread(task2, task) {
51710+ /* check to see if we're called from the exit handler,
51711+ if so, only replace ACLs that have inherited the admin
51712+ ACL */
51713+
51714+ if (type && (task->role != role ||
51715+ task->acl_role_id != acl_role_id))
51716+ continue;
51717+
51718+ task->acl_role_id = 0;
51719+ task->acl_sp_role = 0;
51720+
51721+ if (task->exec_file) {
51722+ cred = __task_cred(task);
51723+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
51724+ ret = gr_apply_subject_to_task(task);
51725+ if (ret) {
51726+ read_unlock(&grsec_exec_file_lock);
51727+ read_unlock(&tasklist_lock);
51728+ rcu_read_unlock();
51729+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
51730+ return ret;
51731+ }
51732+ } else {
51733+ // it's a kernel process
51734+ task->role = kernel_role;
51735+ task->acl = kernel_role->root_label;
51736+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
51737+ task->acl->mode &= ~GR_PROCFIND;
51738+#endif
51739+ }
51740+ } while_each_thread(task2, task);
51741+ read_unlock(&grsec_exec_file_lock);
51742+ read_unlock(&tasklist_lock);
51743+ rcu_read_unlock();
51744+
51745+ return 0;
51746+}
51747+
51748+void
51749+gr_learn_resource(const struct task_struct *task,
51750+ const int res, const unsigned long wanted, const int gt)
51751+{
51752+ struct acl_subject_label *acl;
51753+ const struct cred *cred;
51754+
51755+ if (unlikely((gr_status & GR_READY) &&
51756+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
51757+ goto skip_reslog;
51758+
51759+#ifdef CONFIG_GRKERNSEC_RESLOG
51760+ gr_log_resource(task, res, wanted, gt);
51761+#endif
51762+ skip_reslog:
51763+
51764+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
51765+ return;
51766+
51767+ acl = task->acl;
51768+
51769+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
51770+ !(acl->resmask & (1 << (unsigned short) res))))
51771+ return;
51772+
51773+ if (wanted >= acl->res[res].rlim_cur) {
51774+ unsigned long res_add;
51775+
51776+ res_add = wanted;
51777+ switch (res) {
51778+ case RLIMIT_CPU:
51779+ res_add += GR_RLIM_CPU_BUMP;
51780+ break;
51781+ case RLIMIT_FSIZE:
51782+ res_add += GR_RLIM_FSIZE_BUMP;
51783+ break;
51784+ case RLIMIT_DATA:
51785+ res_add += GR_RLIM_DATA_BUMP;
51786+ break;
51787+ case RLIMIT_STACK:
51788+ res_add += GR_RLIM_STACK_BUMP;
51789+ break;
51790+ case RLIMIT_CORE:
51791+ res_add += GR_RLIM_CORE_BUMP;
51792+ break;
51793+ case RLIMIT_RSS:
51794+ res_add += GR_RLIM_RSS_BUMP;
51795+ break;
51796+ case RLIMIT_NPROC:
51797+ res_add += GR_RLIM_NPROC_BUMP;
51798+ break;
51799+ case RLIMIT_NOFILE:
51800+ res_add += GR_RLIM_NOFILE_BUMP;
51801+ break;
51802+ case RLIMIT_MEMLOCK:
51803+ res_add += GR_RLIM_MEMLOCK_BUMP;
51804+ break;
51805+ case RLIMIT_AS:
51806+ res_add += GR_RLIM_AS_BUMP;
51807+ break;
51808+ case RLIMIT_LOCKS:
51809+ res_add += GR_RLIM_LOCKS_BUMP;
51810+ break;
51811+ case RLIMIT_SIGPENDING:
51812+ res_add += GR_RLIM_SIGPENDING_BUMP;
51813+ break;
51814+ case RLIMIT_MSGQUEUE:
51815+ res_add += GR_RLIM_MSGQUEUE_BUMP;
51816+ break;
51817+ case RLIMIT_NICE:
51818+ res_add += GR_RLIM_NICE_BUMP;
51819+ break;
51820+ case RLIMIT_RTPRIO:
51821+ res_add += GR_RLIM_RTPRIO_BUMP;
51822+ break;
51823+ case RLIMIT_RTTIME:
51824+ res_add += GR_RLIM_RTTIME_BUMP;
51825+ break;
51826+ }
51827+
51828+ acl->res[res].rlim_cur = res_add;
51829+
51830+ if (wanted > acl->res[res].rlim_max)
51831+ acl->res[res].rlim_max = res_add;
51832+
51833+ /* only log the subject filename, since resource logging is supported for
51834+ single-subject learning only */
51835+ rcu_read_lock();
51836+ cred = __task_cred(task);
51837+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
51838+ task->role->roletype, cred->uid, cred->gid, acl->filename,
51839+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
51840+ "", (unsigned long) res, &task->signal->saved_ip);
51841+ rcu_read_unlock();
51842+ }
51843+
51844+ return;
51845+}
51846+
51847+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
51848+void
51849+pax_set_initial_flags(struct linux_binprm *bprm)
51850+{
51851+ struct task_struct *task = current;
51852+ struct acl_subject_label *proc;
51853+ unsigned long flags;
51854+
51855+ if (unlikely(!(gr_status & GR_READY)))
51856+ return;
51857+
51858+ flags = pax_get_flags(task);
51859+
51860+ proc = task->acl;
51861+
51862+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
51863+ flags &= ~MF_PAX_PAGEEXEC;
51864+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
51865+ flags &= ~MF_PAX_SEGMEXEC;
51866+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
51867+ flags &= ~MF_PAX_RANDMMAP;
51868+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
51869+ flags &= ~MF_PAX_EMUTRAMP;
51870+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
51871+ flags &= ~MF_PAX_MPROTECT;
51872+
51873+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
51874+ flags |= MF_PAX_PAGEEXEC;
51875+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
51876+ flags |= MF_PAX_SEGMEXEC;
51877+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
51878+ flags |= MF_PAX_RANDMMAP;
51879+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
51880+ flags |= MF_PAX_EMUTRAMP;
51881+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
51882+ flags |= MF_PAX_MPROTECT;
51883+
51884+ pax_set_flags(task, flags);
51885+
51886+ return;
51887+}
51888+#endif
51889+
51890+#ifdef CONFIG_SYSCTL
51891+/* Eric Biederman likes breaking userland ABI and every inode-based security
51892+ system to save 35kb of memory */
51893+
51894+/* we modify the passed in filename, but adjust it back before returning */
51895+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
51896+{
51897+ struct name_entry *nmatch;
51898+ char *p, *lastp = NULL;
51899+ struct acl_object_label *obj = NULL, *tmp;
51900+ struct acl_subject_label *tmpsubj;
51901+ char c = '\0';
51902+
51903+ read_lock(&gr_inode_lock);
51904+
51905+ p = name + len - 1;
51906+ do {
51907+ nmatch = lookup_name_entry(name);
51908+ if (lastp != NULL)
51909+ *lastp = c;
51910+
51911+ if (nmatch == NULL)
51912+ goto next_component;
51913+ tmpsubj = current->acl;
51914+ do {
51915+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
51916+ if (obj != NULL) {
51917+ tmp = obj->globbed;
51918+ while (tmp) {
51919+ if (!glob_match(tmp->filename, name)) {
51920+ obj = tmp;
51921+ goto found_obj;
51922+ }
51923+ tmp = tmp->next;
51924+ }
51925+ goto found_obj;
51926+ }
51927+ } while ((tmpsubj = tmpsubj->parent_subject));
51928+next_component:
51929+ /* end case */
51930+ if (p == name)
51931+ break;
51932+
51933+ while (*p != '/')
51934+ p--;
51935+ if (p == name)
51936+ lastp = p + 1;
51937+ else {
51938+ lastp = p;
51939+ p--;
51940+ }
51941+ c = *lastp;
51942+ *lastp = '\0';
51943+ } while (1);
51944+found_obj:
51945+ read_unlock(&gr_inode_lock);
51946+ /* obj returned will always be non-null */
51947+ return obj;
51948+}
51949+
51950+/* returns 0 when allowing, non-zero on error
51951+ op of 0 is used for readdir, so we don't log the names of hidden files
51952+*/
51953+__u32
51954+gr_handle_sysctl(const struct ctl_table *table, const int op)
51955+{
51956+ struct ctl_table *tmp;
51957+ const char *proc_sys = "/proc/sys";
51958+ char *path;
51959+ struct acl_object_label *obj;
51960+ unsigned short len = 0, pos = 0, depth = 0, i;
51961+ __u32 err = 0;
51962+ __u32 mode = 0;
51963+
51964+ if (unlikely(!(gr_status & GR_READY)))
51965+ return 0;
51966+
51967+ /* for now, ignore operations on non-sysctl entries if it's not a
51968+ readdir*/
51969+ if (table->child != NULL && op != 0)
51970+ return 0;
51971+
51972+ mode |= GR_FIND;
51973+ /* it's only a read if it's an entry, read on dirs is for readdir */
51974+ if (op & MAY_READ)
51975+ mode |= GR_READ;
51976+ if (op & MAY_WRITE)
51977+ mode |= GR_WRITE;
51978+
51979+ preempt_disable();
51980+
51981+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
51982+
51983+ /* it's only a read/write if it's an actual entry, not a dir
51984+ (which are opened for readdir)
51985+ */
51986+
51987+ /* convert the requested sysctl entry into a pathname */
51988+
51989+ for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
51990+ len += strlen(tmp->procname);
51991+ len++;
51992+ depth++;
51993+ }
51994+
51995+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
51996+ /* deny */
51997+ goto out;
51998+ }
51999+
52000+ memset(path, 0, PAGE_SIZE);
52001+
52002+ memcpy(path, proc_sys, strlen(proc_sys));
52003+
52004+ pos += strlen(proc_sys);
52005+
52006+ for (; depth > 0; depth--) {
52007+ path[pos] = '/';
52008+ pos++;
52009+ for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
52010+ if (depth == i) {
52011+ memcpy(path + pos, tmp->procname,
52012+ strlen(tmp->procname));
52013+ pos += strlen(tmp->procname);
52014+ }
52015+ i++;
52016+ }
52017+ }
52018+
52019+ obj = gr_lookup_by_name(path, pos);
52020+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
52021+
52022+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
52023+ ((err & mode) != mode))) {
52024+ __u32 new_mode = mode;
52025+
52026+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
52027+
52028+ err = 0;
52029+ gr_log_learn_sysctl(path, new_mode);
52030+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
52031+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
52032+ err = -ENOENT;
52033+ } else if (!(err & GR_FIND)) {
52034+ err = -ENOENT;
52035+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
52036+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
52037+ path, (mode & GR_READ) ? " reading" : "",
52038+ (mode & GR_WRITE) ? " writing" : "");
52039+ err = -EACCES;
52040+ } else if ((err & mode) != mode) {
52041+ err = -EACCES;
52042+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
52043+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
52044+ path, (mode & GR_READ) ? " reading" : "",
52045+ (mode & GR_WRITE) ? " writing" : "");
52046+ err = 0;
52047+ } else
52048+ err = 0;
52049+
52050+ out:
52051+ preempt_enable();
52052+
52053+ return err;
52054+}
52055+#endif
52056+
52057+int
52058+gr_handle_proc_ptrace(struct task_struct *task)
52059+{
52060+ struct file *filp;
52061+ struct task_struct *tmp = task;
52062+ struct task_struct *curtemp = current;
52063+ __u32 retmode;
52064+
52065+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
52066+ if (unlikely(!(gr_status & GR_READY)))
52067+ return 0;
52068+#endif
52069+
52070+ read_lock(&tasklist_lock);
52071+ read_lock(&grsec_exec_file_lock);
52072+ filp = task->exec_file;
52073+
52074+ while (tmp->pid > 0) {
52075+ if (tmp == curtemp)
52076+ break;
52077+ tmp = tmp->real_parent;
52078+ }
52079+
52080+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
52081+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
52082+ read_unlock(&grsec_exec_file_lock);
52083+ read_unlock(&tasklist_lock);
52084+ return 1;
52085+ }
52086+
52087+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
52088+ if (!(gr_status & GR_READY)) {
52089+ read_unlock(&grsec_exec_file_lock);
52090+ read_unlock(&tasklist_lock);
52091+ return 0;
52092+ }
52093+#endif
52094+
52095+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
52096+ read_unlock(&grsec_exec_file_lock);
52097+ read_unlock(&tasklist_lock);
52098+
52099+ if (retmode & GR_NOPTRACE)
52100+ return 1;
52101+
52102+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
52103+ && (current->acl != task->acl || (current->acl != current->role->root_label
52104+ && current->pid != task->pid)))
52105+ return 1;
52106+
52107+ return 0;
52108+}
52109+
52110+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
52111+{
52112+ if (unlikely(!(gr_status & GR_READY)))
52113+ return;
52114+
52115+ if (!(current->role->roletype & GR_ROLE_GOD))
52116+ return;
52117+
52118+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
52119+ p->role->rolename, gr_task_roletype_to_char(p),
52120+ p->acl->filename);
52121+}
52122+
52123+int
52124+gr_handle_ptrace(struct task_struct *task, const long request)
52125+{
52126+ struct task_struct *tmp = task;
52127+ struct task_struct *curtemp = current;
52128+ __u32 retmode;
52129+
52130+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
52131+ if (unlikely(!(gr_status & GR_READY)))
52132+ return 0;
52133+#endif
52134+
52135+ read_lock(&tasklist_lock);
52136+ while (tmp->pid > 0) {
52137+ if (tmp == curtemp)
52138+ break;
52139+ tmp = tmp->real_parent;
52140+ }
52141+
52142+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
52143+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
52144+ read_unlock(&tasklist_lock);
52145+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
52146+ return 1;
52147+ }
52148+ read_unlock(&tasklist_lock);
52149+
52150+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
52151+ if (!(gr_status & GR_READY))
52152+ return 0;
52153+#endif
52154+
52155+ read_lock(&grsec_exec_file_lock);
52156+ if (unlikely(!task->exec_file)) {
52157+ read_unlock(&grsec_exec_file_lock);
52158+ return 0;
52159+ }
52160+
52161+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
52162+ read_unlock(&grsec_exec_file_lock);
52163+
52164+ if (retmode & GR_NOPTRACE) {
52165+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
52166+ return 1;
52167+ }
52168+
52169+ if (retmode & GR_PTRACERD) {
52170+ switch (request) {
52171+ case PTRACE_SEIZE:
52172+ case PTRACE_POKETEXT:
52173+ case PTRACE_POKEDATA:
52174+ case PTRACE_POKEUSR:
52175+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
52176+ case PTRACE_SETREGS:
52177+ case PTRACE_SETFPREGS:
52178+#endif
52179+#ifdef CONFIG_X86
52180+ case PTRACE_SETFPXREGS:
52181+#endif
52182+#ifdef CONFIG_ALTIVEC
52183+ case PTRACE_SETVRREGS:
52184+#endif
52185+ return 1;
52186+ default:
52187+ return 0;
52188+ }
52189+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
52190+ !(current->role->roletype & GR_ROLE_GOD) &&
52191+ (current->acl != task->acl)) {
52192+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
52193+ return 1;
52194+ }
52195+
52196+ return 0;
52197+}
52198+
52199+static int is_writable_mmap(const struct file *filp)
52200+{
52201+ struct task_struct *task = current;
52202+ struct acl_object_label *obj, *obj2;
52203+
52204+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
52205+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
52206+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
52207+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
52208+ task->role->root_label);
52209+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
52210+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
52211+ return 1;
52212+ }
52213+ }
52214+ return 0;
52215+}
52216+
52217+int
52218+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
52219+{
52220+ __u32 mode;
52221+
52222+ if (unlikely(!file || !(prot & PROT_EXEC)))
52223+ return 1;
52224+
52225+ if (is_writable_mmap(file))
52226+ return 0;
52227+
52228+ mode =
52229+ gr_search_file(file->f_path.dentry,
52230+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
52231+ file->f_path.mnt);
52232+
52233+ if (!gr_tpe_allow(file))
52234+ return 0;
52235+
52236+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
52237+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52238+ return 0;
52239+ } else if (unlikely(!(mode & GR_EXEC))) {
52240+ return 0;
52241+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
52242+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52243+ return 1;
52244+ }
52245+
52246+ return 1;
52247+}
52248+
52249+int
52250+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
52251+{
52252+ __u32 mode;
52253+
52254+ if (unlikely(!file || !(prot & PROT_EXEC)))
52255+ return 1;
52256+
52257+ if (is_writable_mmap(file))
52258+ return 0;
52259+
52260+ mode =
52261+ gr_search_file(file->f_path.dentry,
52262+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
52263+ file->f_path.mnt);
52264+
52265+ if (!gr_tpe_allow(file))
52266+ return 0;
52267+
52268+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
52269+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52270+ return 0;
52271+ } else if (unlikely(!(mode & GR_EXEC))) {
52272+ return 0;
52273+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
52274+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
52275+ return 1;
52276+ }
52277+
52278+ return 1;
52279+}
52280+
52281+void
52282+gr_acl_handle_psacct(struct task_struct *task, const long code)
52283+{
52284+ unsigned long runtime;
52285+ unsigned long cputime;
52286+ unsigned int wday, cday;
52287+ __u8 whr, chr;
52288+ __u8 wmin, cmin;
52289+ __u8 wsec, csec;
52290+ struct timespec timeval;
52291+
52292+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
52293+ !(task->acl->mode & GR_PROCACCT)))
52294+ return;
52295+
52296+ do_posix_clock_monotonic_gettime(&timeval);
52297+ runtime = timeval.tv_sec - task->start_time.tv_sec;
52298+ wday = runtime / (3600 * 24);
52299+ runtime -= wday * (3600 * 24);
52300+ whr = runtime / 3600;
52301+ runtime -= whr * 3600;
52302+ wmin = runtime / 60;
52303+ runtime -= wmin * 60;
52304+ wsec = runtime;
52305+
52306+ cputime = (task->utime + task->stime) / HZ;
52307+ cday = cputime / (3600 * 24);
52308+ cputime -= cday * (3600 * 24);
52309+ chr = cputime / 3600;
52310+ cputime -= chr * 3600;
52311+ cmin = cputime / 60;
52312+ cputime -= cmin * 60;
52313+ csec = cputime;
52314+
52315+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
52316+
52317+ return;
52318+}
52319+
52320+void gr_set_kernel_label(struct task_struct *task)
52321+{
52322+ if (gr_status & GR_READY) {
52323+ task->role = kernel_role;
52324+ task->acl = kernel_role->root_label;
52325+ }
52326+ return;
52327+}
52328+
52329+#ifdef CONFIG_TASKSTATS
52330+int gr_is_taskstats_denied(int pid)
52331+{
52332+ struct task_struct *task;
52333+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52334+ const struct cred *cred;
52335+#endif
52336+ int ret = 0;
52337+
52338+ /* restrict taskstats viewing to un-chrooted root users
52339+ who have the 'view' subject flag if the RBAC system is enabled
52340+ */
52341+
52342+ rcu_read_lock();
52343+ read_lock(&tasklist_lock);
52344+ task = find_task_by_vpid(pid);
52345+ if (task) {
52346+#ifdef CONFIG_GRKERNSEC_CHROOT
52347+ if (proc_is_chrooted(task))
52348+ ret = -EACCES;
52349+#endif
52350+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52351+ cred = __task_cred(task);
52352+#ifdef CONFIG_GRKERNSEC_PROC_USER
52353+ if (cred->uid != 0)
52354+ ret = -EACCES;
52355+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52356+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
52357+ ret = -EACCES;
52358+#endif
52359+#endif
52360+ if (gr_status & GR_READY) {
52361+ if (!(task->acl->mode & GR_VIEW))
52362+ ret = -EACCES;
52363+ }
52364+ } else
52365+ ret = -ENOENT;
52366+
52367+ read_unlock(&tasklist_lock);
52368+ rcu_read_unlock();
52369+
52370+ return ret;
52371+}
52372+#endif
52373+
52374+/* AUXV entries are filled via a descendant of search_binary_handler
52375+ after we've already applied the subject for the target
52376+*/
52377+int gr_acl_enable_at_secure(void)
52378+{
52379+ if (unlikely(!(gr_status & GR_READY)))
52380+ return 0;
52381+
52382+ if (current->acl->mode & GR_ATSECURE)
52383+ return 1;
52384+
52385+ return 0;
52386+}
52387+
52388+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
52389+{
52390+ struct task_struct *task = current;
52391+ struct dentry *dentry = file->f_path.dentry;
52392+ struct vfsmount *mnt = file->f_path.mnt;
52393+ struct acl_object_label *obj, *tmp;
52394+ struct acl_subject_label *subj;
52395+ unsigned int bufsize;
52396+ int is_not_root;
52397+ char *path;
52398+ dev_t dev = __get_dev(dentry);
52399+
52400+ if (unlikely(!(gr_status & GR_READY)))
52401+ return 1;
52402+
52403+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
52404+ return 1;
52405+
52406+ /* ignore Eric Biederman */
52407+ if (IS_PRIVATE(dentry->d_inode))
52408+ return 1;
52409+
52410+ subj = task->acl;
52411+ do {
52412+ obj = lookup_acl_obj_label(ino, dev, subj);
52413+ if (obj != NULL)
52414+ return (obj->mode & GR_FIND) ? 1 : 0;
52415+ } while ((subj = subj->parent_subject));
52416+
52417+ /* this is purely an optimization since we're looking for an object
52418+ for the directory we're doing a readdir on
52419+ if it's possible for any globbed object to match the entry we're
52420+ filling into the directory, then the object we find here will be
52421+ an anchor point with attached globbed objects
52422+ */
52423+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
52424+ if (obj->globbed == NULL)
52425+ return (obj->mode & GR_FIND) ? 1 : 0;
52426+
52427+ is_not_root = ((obj->filename[0] == '/') &&
52428+ (obj->filename[1] == '\0')) ? 0 : 1;
52429+ bufsize = PAGE_SIZE - namelen - is_not_root;
52430+
52431+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
52432+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
52433+ return 1;
52434+
52435+ preempt_disable();
52436+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
52437+ bufsize);
52438+
52439+ bufsize = strlen(path);
52440+
52441+ /* if base is "/", don't append an additional slash */
52442+ if (is_not_root)
52443+ *(path + bufsize) = '/';
52444+ memcpy(path + bufsize + is_not_root, name, namelen);
52445+ *(path + bufsize + namelen + is_not_root) = '\0';
52446+
52447+ tmp = obj->globbed;
52448+ while (tmp) {
52449+ if (!glob_match(tmp->filename, path)) {
52450+ preempt_enable();
52451+ return (tmp->mode & GR_FIND) ? 1 : 0;
52452+ }
52453+ tmp = tmp->next;
52454+ }
52455+ preempt_enable();
52456+ return (obj->mode & GR_FIND) ? 1 : 0;
52457+}
52458+
52459+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
52460+EXPORT_SYMBOL(gr_acl_is_enabled);
52461+#endif
52462+EXPORT_SYMBOL(gr_learn_resource);
52463+EXPORT_SYMBOL(gr_set_kernel_label);
52464+#ifdef CONFIG_SECURITY
52465+EXPORT_SYMBOL(gr_check_user_change);
52466+EXPORT_SYMBOL(gr_check_group_change);
52467+#endif
52468+
52469diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
52470new file mode 100644
52471index 0000000..34fefda
52472--- /dev/null
52473+++ b/grsecurity/gracl_alloc.c
52474@@ -0,0 +1,105 @@
52475+#include <linux/kernel.h>
52476+#include <linux/mm.h>
52477+#include <linux/slab.h>
52478+#include <linux/vmalloc.h>
52479+#include <linux/gracl.h>
52480+#include <linux/grsecurity.h>
52481+
52482+static unsigned long alloc_stack_next = 1;
52483+static unsigned long alloc_stack_size = 1;
52484+static void **alloc_stack;
52485+
52486+static __inline__ int
52487+alloc_pop(void)
52488+{
52489+ if (alloc_stack_next == 1)
52490+ return 0;
52491+
52492+ kfree(alloc_stack[alloc_stack_next - 2]);
52493+
52494+ alloc_stack_next--;
52495+
52496+ return 1;
52497+}
52498+
52499+static __inline__ int
52500+alloc_push(void *buf)
52501+{
52502+ if (alloc_stack_next >= alloc_stack_size)
52503+ return 1;
52504+
52505+ alloc_stack[alloc_stack_next - 1] = buf;
52506+
52507+ alloc_stack_next++;
52508+
52509+ return 0;
52510+}
52511+
52512+void *
52513+acl_alloc(unsigned long len)
52514+{
52515+ void *ret = NULL;
52516+
52517+ if (!len || len > PAGE_SIZE)
52518+ goto out;
52519+
52520+ ret = kmalloc(len, GFP_KERNEL);
52521+
52522+ if (ret) {
52523+ if (alloc_push(ret)) {
52524+ kfree(ret);
52525+ ret = NULL;
52526+ }
52527+ }
52528+
52529+out:
52530+ return ret;
52531+}
52532+
52533+void *
52534+acl_alloc_num(unsigned long num, unsigned long len)
52535+{
52536+ if (!len || (num > (PAGE_SIZE / len)))
52537+ return NULL;
52538+
52539+ return acl_alloc(num * len);
52540+}
52541+
52542+void
52543+acl_free_all(void)
52544+{
52545+ if (gr_acl_is_enabled() || !alloc_stack)
52546+ return;
52547+
52548+ while (alloc_pop()) ;
52549+
52550+ if (alloc_stack) {
52551+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
52552+ kfree(alloc_stack);
52553+ else
52554+ vfree(alloc_stack);
52555+ }
52556+
52557+ alloc_stack = NULL;
52558+ alloc_stack_size = 1;
52559+ alloc_stack_next = 1;
52560+
52561+ return;
52562+}
52563+
52564+int
52565+acl_alloc_stack_init(unsigned long size)
52566+{
52567+ if ((size * sizeof (void *)) <= PAGE_SIZE)
52568+ alloc_stack =
52569+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
52570+ else
52571+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
52572+
52573+ alloc_stack_size = size;
52574+
52575+ if (!alloc_stack)
52576+ return 0;
52577+ else
52578+ return 1;
52579+}
52580diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
52581new file mode 100644
52582index 0000000..955ddfb
52583--- /dev/null
52584+++ b/grsecurity/gracl_cap.c
52585@@ -0,0 +1,101 @@
52586+#include <linux/kernel.h>
52587+#include <linux/module.h>
52588+#include <linux/sched.h>
52589+#include <linux/gracl.h>
52590+#include <linux/grsecurity.h>
52591+#include <linux/grinternal.h>
52592+
52593+extern const char *captab_log[];
52594+extern int captab_log_entries;
52595+
52596+int
52597+gr_acl_is_capable(const int cap)
52598+{
52599+ struct task_struct *task = current;
52600+ const struct cred *cred = current_cred();
52601+ struct acl_subject_label *curracl;
52602+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
52603+ kernel_cap_t cap_audit = __cap_empty_set;
52604+
52605+ if (!gr_acl_is_enabled())
52606+ return 1;
52607+
52608+ curracl = task->acl;
52609+
52610+ cap_drop = curracl->cap_lower;
52611+ cap_mask = curracl->cap_mask;
52612+ cap_audit = curracl->cap_invert_audit;
52613+
52614+ while ((curracl = curracl->parent_subject)) {
52615+ /* if the cap isn't specified in the current computed mask but is specified in the
52616+ current level subject, and is lowered in the current level subject, then add
52617+ it to the set of dropped capabilities
52618+ otherwise, add the current level subject's mask to the current computed mask
52619+ */
52620+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
52621+ cap_raise(cap_mask, cap);
52622+ if (cap_raised(curracl->cap_lower, cap))
52623+ cap_raise(cap_drop, cap);
52624+ if (cap_raised(curracl->cap_invert_audit, cap))
52625+ cap_raise(cap_audit, cap);
52626+ }
52627+ }
52628+
52629+ if (!cap_raised(cap_drop, cap)) {
52630+ if (cap_raised(cap_audit, cap))
52631+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
52632+ return 1;
52633+ }
52634+
52635+ curracl = task->acl;
52636+
52637+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
52638+ && cap_raised(cred->cap_effective, cap)) {
52639+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
52640+ task->role->roletype, cred->uid,
52641+ cred->gid, task->exec_file ?
52642+ gr_to_filename(task->exec_file->f_path.dentry,
52643+ task->exec_file->f_path.mnt) : curracl->filename,
52644+ curracl->filename, 0UL,
52645+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
52646+ return 1;
52647+ }
52648+
52649+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
52650+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
52651+ return 0;
52652+}
52653+
52654+int
52655+gr_acl_is_capable_nolog(const int cap)
52656+{
52657+ struct acl_subject_label *curracl;
52658+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
52659+
52660+ if (!gr_acl_is_enabled())
52661+ return 1;
52662+
52663+ curracl = current->acl;
52664+
52665+ cap_drop = curracl->cap_lower;
52666+ cap_mask = curracl->cap_mask;
52667+
52668+ while ((curracl = curracl->parent_subject)) {
52669+ /* if the cap isn't specified in the current computed mask but is specified in the
52670+ current level subject, and is lowered in the current level subject, then add
52671+ it to the set of dropped capabilities
52672+ otherwise, add the current level subject's mask to the current computed mask
52673+ */
52674+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
52675+ cap_raise(cap_mask, cap);
52676+ if (cap_raised(curracl->cap_lower, cap))
52677+ cap_raise(cap_drop, cap);
52678+ }
52679+ }
52680+
52681+ if (!cap_raised(cap_drop, cap))
52682+ return 1;
52683+
52684+ return 0;
52685+}
52686+
52687diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
52688new file mode 100644
52689index 0000000..4eda5c3
52690--- /dev/null
52691+++ b/grsecurity/gracl_fs.c
52692@@ -0,0 +1,433 @@
52693+#include <linux/kernel.h>
52694+#include <linux/sched.h>
52695+#include <linux/types.h>
52696+#include <linux/fs.h>
52697+#include <linux/file.h>
52698+#include <linux/stat.h>
52699+#include <linux/grsecurity.h>
52700+#include <linux/grinternal.h>
52701+#include <linux/gracl.h>
52702+
52703+__u32
52704+gr_acl_handle_hidden_file(const struct dentry * dentry,
52705+ const struct vfsmount * mnt)
52706+{
52707+ __u32 mode;
52708+
52709+ if (unlikely(!dentry->d_inode))
52710+ return GR_FIND;
52711+
52712+ mode =
52713+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
52714+
52715+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
52716+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
52717+ return mode;
52718+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
52719+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
52720+ return 0;
52721+ } else if (unlikely(!(mode & GR_FIND)))
52722+ return 0;
52723+
52724+ return GR_FIND;
52725+}
52726+
52727+__u32
52728+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
52729+ int acc_mode)
52730+{
52731+ __u32 reqmode = GR_FIND;
52732+ __u32 mode;
52733+
52734+ if (unlikely(!dentry->d_inode))
52735+ return reqmode;
52736+
52737+ if (acc_mode & MAY_APPEND)
52738+ reqmode |= GR_APPEND;
52739+ else if (acc_mode & MAY_WRITE)
52740+ reqmode |= GR_WRITE;
52741+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
52742+ reqmode |= GR_READ;
52743+
52744+ mode =
52745+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
52746+ mnt);
52747+
52748+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
52749+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
52750+ reqmode & GR_READ ? " reading" : "",
52751+ reqmode & GR_WRITE ? " writing" : reqmode &
52752+ GR_APPEND ? " appending" : "");
52753+ return reqmode;
52754+ } else
52755+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
52756+ {
52757+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
52758+ reqmode & GR_READ ? " reading" : "",
52759+ reqmode & GR_WRITE ? " writing" : reqmode &
52760+ GR_APPEND ? " appending" : "");
52761+ return 0;
52762+ } else if (unlikely((mode & reqmode) != reqmode))
52763+ return 0;
52764+
52765+ return reqmode;
52766+}
52767+
52768+__u32
52769+gr_acl_handle_creat(const struct dentry * dentry,
52770+ const struct dentry * p_dentry,
52771+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
52772+ const int imode)
52773+{
52774+ __u32 reqmode = GR_WRITE | GR_CREATE;
52775+ __u32 mode;
52776+
52777+ if (acc_mode & MAY_APPEND)
52778+ reqmode |= GR_APPEND;
52779+ // if a directory was required or the directory already exists, then
52780+ // don't count this open as a read
52781+ if ((acc_mode & MAY_READ) &&
52782+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
52783+ reqmode |= GR_READ;
52784+ if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
52785+ reqmode |= GR_SETID;
52786+
52787+ mode =
52788+ gr_check_create(dentry, p_dentry, p_mnt,
52789+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
52790+
52791+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
52792+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
52793+ reqmode & GR_READ ? " reading" : "",
52794+ reqmode & GR_WRITE ? " writing" : reqmode &
52795+ GR_APPEND ? " appending" : "");
52796+ return reqmode;
52797+ } else
52798+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
52799+ {
52800+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
52801+ reqmode & GR_READ ? " reading" : "",
52802+ reqmode & GR_WRITE ? " writing" : reqmode &
52803+ GR_APPEND ? " appending" : "");
52804+ return 0;
52805+ } else if (unlikely((mode & reqmode) != reqmode))
52806+ return 0;
52807+
52808+ return reqmode;
52809+}
52810+
52811+__u32
52812+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
52813+ const int fmode)
52814+{
52815+ __u32 mode, reqmode = GR_FIND;
52816+
52817+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
52818+ reqmode |= GR_EXEC;
52819+ if (fmode & S_IWOTH)
52820+ reqmode |= GR_WRITE;
52821+ if (fmode & S_IROTH)
52822+ reqmode |= GR_READ;
52823+
52824+ mode =
52825+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
52826+ mnt);
52827+
52828+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
52829+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
52830+ reqmode & GR_READ ? " reading" : "",
52831+ reqmode & GR_WRITE ? " writing" : "",
52832+ reqmode & GR_EXEC ? " executing" : "");
52833+ return reqmode;
52834+ } else
52835+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
52836+ {
52837+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
52838+ reqmode & GR_READ ? " reading" : "",
52839+ reqmode & GR_WRITE ? " writing" : "",
52840+ reqmode & GR_EXEC ? " executing" : "");
52841+ return 0;
52842+ } else if (unlikely((mode & reqmode) != reqmode))
52843+ return 0;
52844+
52845+ return reqmode;
52846+}
52847+
52848+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
52849+{
52850+ __u32 mode;
52851+
52852+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
52853+
52854+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
52855+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
52856+ return mode;
52857+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
52858+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
52859+ return 0;
52860+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
52861+ return 0;
52862+
52863+ return (reqmode);
52864+}
52865+
52866+__u32
52867+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
52868+{
52869+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
52870+}
52871+
52872+__u32
52873+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
52874+{
52875+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
52876+}
52877+
52878+__u32
52879+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
52880+{
52881+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
52882+}
52883+
52884+__u32
52885+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
52886+{
52887+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
52888+}
52889+
52890+__u32
52891+gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
52892+ mode_t mode)
52893+{
52894+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
52895+ return 1;
52896+
52897+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
52898+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
52899+ GR_FCHMOD_ACL_MSG);
52900+ } else {
52901+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
52902+ }
52903+}
52904+
52905+__u32
52906+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
52907+ mode_t mode)
52908+{
52909+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
52910+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
52911+ GR_CHMOD_ACL_MSG);
52912+ } else {
52913+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
52914+ }
52915+}
52916+
52917+__u32
52918+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
52919+{
52920+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
52921+}
52922+
52923+__u32
52924+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
52925+{
52926+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
52927+}
52928+
52929+__u32
52930+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
52931+{
52932+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
52933+}
52934+
52935+__u32
52936+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
52937+{
52938+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
52939+ GR_UNIXCONNECT_ACL_MSG);
52940+}
52941+
52942+/* hardlinks require at minimum create and link permission,
52943+ any additional privilege required is based on the
52944+ privilege of the file being linked to
52945+*/
52946+__u32
52947+gr_acl_handle_link(const struct dentry * new_dentry,
52948+ const struct dentry * parent_dentry,
52949+ const struct vfsmount * parent_mnt,
52950+ const struct dentry * old_dentry,
52951+ const struct vfsmount * old_mnt, const char *to)
52952+{
52953+ __u32 mode;
52954+ __u32 needmode = GR_CREATE | GR_LINK;
52955+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
52956+
52957+ mode =
52958+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
52959+ old_mnt);
52960+
52961+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
52962+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
52963+ return mode;
52964+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
52965+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
52966+ return 0;
52967+ } else if (unlikely((mode & needmode) != needmode))
52968+ return 0;
52969+
52970+ return 1;
52971+}
52972+
52973+__u32
52974+gr_acl_handle_symlink(const struct dentry * new_dentry,
52975+ const struct dentry * parent_dentry,
52976+ const struct vfsmount * parent_mnt, const char *from)
52977+{
52978+ __u32 needmode = GR_WRITE | GR_CREATE;
52979+ __u32 mode;
52980+
52981+ mode =
52982+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
52983+ GR_CREATE | GR_AUDIT_CREATE |
52984+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
52985+
52986+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
52987+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
52988+ return mode;
52989+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
52990+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
52991+ return 0;
52992+ } else if (unlikely((mode & needmode) != needmode))
52993+ return 0;
52994+
52995+ return (GR_WRITE | GR_CREATE);
52996+}
52997+
52998+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
52999+{
53000+ __u32 mode;
53001+
53002+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
53003+
53004+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
53005+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
53006+ return mode;
53007+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
53008+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
53009+ return 0;
53010+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
53011+ return 0;
53012+
53013+ return (reqmode);
53014+}
53015+
53016+__u32
53017+gr_acl_handle_mknod(const struct dentry * new_dentry,
53018+ const struct dentry * parent_dentry,
53019+ const struct vfsmount * parent_mnt,
53020+ const int mode)
53021+{
53022+ __u32 reqmode = GR_WRITE | GR_CREATE;
53023+ if (unlikely(mode & (S_ISUID | S_ISGID)))
53024+ reqmode |= GR_SETID;
53025+
53026+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
53027+ reqmode, GR_MKNOD_ACL_MSG);
53028+}
53029+
53030+__u32
53031+gr_acl_handle_mkdir(const struct dentry *new_dentry,
53032+ const struct dentry *parent_dentry,
53033+ const struct vfsmount *parent_mnt)
53034+{
53035+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
53036+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
53037+}
53038+
53039+#define RENAME_CHECK_SUCCESS(old, new) \
53040+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
53041+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
53042+
53043+int
53044+gr_acl_handle_rename(struct dentry *new_dentry,
53045+ struct dentry *parent_dentry,
53046+ const struct vfsmount *parent_mnt,
53047+ struct dentry *old_dentry,
53048+ struct inode *old_parent_inode,
53049+ struct vfsmount *old_mnt, const char *newname)
53050+{
53051+ __u32 comp1, comp2;
53052+ int error = 0;
53053+
53054+ if (unlikely(!gr_acl_is_enabled()))
53055+ return 0;
53056+
53057+ if (!new_dentry->d_inode) {
53058+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
53059+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
53060+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
53061+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
53062+ GR_DELETE | GR_AUDIT_DELETE |
53063+ GR_AUDIT_READ | GR_AUDIT_WRITE |
53064+ GR_SUPPRESS, old_mnt);
53065+ } else {
53066+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
53067+ GR_CREATE | GR_DELETE |
53068+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
53069+ GR_AUDIT_READ | GR_AUDIT_WRITE |
53070+ GR_SUPPRESS, parent_mnt);
53071+ comp2 =
53072+ gr_search_file(old_dentry,
53073+ GR_READ | GR_WRITE | GR_AUDIT_READ |
53074+ GR_DELETE | GR_AUDIT_DELETE |
53075+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
53076+ }
53077+
53078+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
53079+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
53080+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
53081+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
53082+ && !(comp2 & GR_SUPPRESS)) {
53083+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
53084+ error = -EACCES;
53085+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
53086+ error = -EACCES;
53087+
53088+ return error;
53089+}
53090+
53091+void
53092+gr_acl_handle_exit(void)
53093+{
53094+ u16 id;
53095+ char *rolename;
53096+ struct file *exec_file;
53097+
53098+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
53099+ !(current->role->roletype & GR_ROLE_PERSIST))) {
53100+ id = current->acl_role_id;
53101+ rolename = current->role->rolename;
53102+ gr_set_acls(1);
53103+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
53104+ }
53105+
53106+ write_lock(&grsec_exec_file_lock);
53107+ exec_file = current->exec_file;
53108+ current->exec_file = NULL;
53109+ write_unlock(&grsec_exec_file_lock);
53110+
53111+ if (exec_file)
53112+ fput(exec_file);
53113+}
53114+
53115+int
53116+gr_acl_handle_procpidmem(const struct task_struct *task)
53117+{
53118+ if (unlikely(!gr_acl_is_enabled()))
53119+ return 0;
53120+
53121+ if (task != current && task->acl->mode & GR_PROTPROCFD)
53122+ return -EACCES;
53123+
53124+ return 0;
53125+}
53126diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
53127new file mode 100644
53128index 0000000..17050ca
53129--- /dev/null
53130+++ b/grsecurity/gracl_ip.c
53131@@ -0,0 +1,381 @@
53132+#include <linux/kernel.h>
53133+#include <asm/uaccess.h>
53134+#include <asm/errno.h>
53135+#include <net/sock.h>
53136+#include <linux/file.h>
53137+#include <linux/fs.h>
53138+#include <linux/net.h>
53139+#include <linux/in.h>
53140+#include <linux/skbuff.h>
53141+#include <linux/ip.h>
53142+#include <linux/udp.h>
53143+#include <linux/types.h>
53144+#include <linux/sched.h>
53145+#include <linux/netdevice.h>
53146+#include <linux/inetdevice.h>
53147+#include <linux/gracl.h>
53148+#include <linux/grsecurity.h>
53149+#include <linux/grinternal.h>
53150+
53151+#define GR_BIND 0x01
53152+#define GR_CONNECT 0x02
53153+#define GR_INVERT 0x04
53154+#define GR_BINDOVERRIDE 0x08
53155+#define GR_CONNECTOVERRIDE 0x10
53156+#define GR_SOCK_FAMILY 0x20
53157+
53158+static const char * gr_protocols[IPPROTO_MAX] = {
53159+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
53160+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
53161+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
53162+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
53163+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
53164+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
53165+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
53166+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
53167+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
53168+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
53169+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
53170+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
53171+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
53172+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
53173+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
53174+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
53175+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
53176+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
53177+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
53178+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
53179+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
53180+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
53181+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
53182+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
53183+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
53184+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
53185+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
53186+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
53187+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
53188+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
53189+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
53190+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
53191+ };
53192+
53193+static const char * gr_socktypes[SOCK_MAX] = {
53194+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
53195+ "unknown:7", "unknown:8", "unknown:9", "packet"
53196+ };
53197+
53198+static const char * gr_sockfamilies[AF_MAX+1] = {
53199+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
53200+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
53201+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
53202+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
53203+ };
53204+
53205+const char *
53206+gr_proto_to_name(unsigned char proto)
53207+{
53208+ return gr_protocols[proto];
53209+}
53210+
53211+const char *
53212+gr_socktype_to_name(unsigned char type)
53213+{
53214+ return gr_socktypes[type];
53215+}
53216+
53217+const char *
53218+gr_sockfamily_to_name(unsigned char family)
53219+{
53220+ return gr_sockfamilies[family];
53221+}
53222+
53223+int
53224+gr_search_socket(const int domain, const int type, const int protocol)
53225+{
53226+ struct acl_subject_label *curr;
53227+ const struct cred *cred = current_cred();
53228+
53229+ if (unlikely(!gr_acl_is_enabled()))
53230+ goto exit;
53231+
53232+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
53233+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
53234+ goto exit; // let the kernel handle it
53235+
53236+ curr = current->acl;
53237+
53238+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
53239+ /* the family is allowed, if this is PF_INET allow it only if
53240+ the extra sock type/protocol checks pass */
53241+ if (domain == PF_INET)
53242+ goto inet_check;
53243+ goto exit;
53244+ } else {
53245+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
53246+ __u32 fakeip = 0;
53247+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53248+ current->role->roletype, cred->uid,
53249+ cred->gid, current->exec_file ?
53250+ gr_to_filename(current->exec_file->f_path.dentry,
53251+ current->exec_file->f_path.mnt) :
53252+ curr->filename, curr->filename,
53253+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
53254+ &current->signal->saved_ip);
53255+ goto exit;
53256+ }
53257+ goto exit_fail;
53258+ }
53259+
53260+inet_check:
53261+ /* the rest of this checking is for IPv4 only */
53262+ if (!curr->ips)
53263+ goto exit;
53264+
53265+ if ((curr->ip_type & (1 << type)) &&
53266+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
53267+ goto exit;
53268+
53269+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
53270+ /* we don't place acls on raw sockets , and sometimes
53271+ dgram/ip sockets are opened for ioctl and not
53272+ bind/connect, so we'll fake a bind learn log */
53273+ if (type == SOCK_RAW || type == SOCK_PACKET) {
53274+ __u32 fakeip = 0;
53275+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53276+ current->role->roletype, cred->uid,
53277+ cred->gid, current->exec_file ?
53278+ gr_to_filename(current->exec_file->f_path.dentry,
53279+ current->exec_file->f_path.mnt) :
53280+ curr->filename, curr->filename,
53281+ &fakeip, 0, type,
53282+ protocol, GR_CONNECT, &current->signal->saved_ip);
53283+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
53284+ __u32 fakeip = 0;
53285+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53286+ current->role->roletype, cred->uid,
53287+ cred->gid, current->exec_file ?
53288+ gr_to_filename(current->exec_file->f_path.dentry,
53289+ current->exec_file->f_path.mnt) :
53290+ curr->filename, curr->filename,
53291+ &fakeip, 0, type,
53292+ protocol, GR_BIND, &current->signal->saved_ip);
53293+ }
53294+ /* we'll log when they use connect or bind */
53295+ goto exit;
53296+ }
53297+
53298+exit_fail:
53299+ if (domain == PF_INET)
53300+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
53301+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
53302+ else
53303+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
53304+ gr_socktype_to_name(type), protocol);
53305+
53306+ return 0;
53307+exit:
53308+ return 1;
53309+}
53310+
53311+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
53312+{
53313+ if ((ip->mode & mode) &&
53314+ (ip_port >= ip->low) &&
53315+ (ip_port <= ip->high) &&
53316+ ((ntohl(ip_addr) & our_netmask) ==
53317+ (ntohl(our_addr) & our_netmask))
53318+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
53319+ && (ip->type & (1 << type))) {
53320+ if (ip->mode & GR_INVERT)
53321+ return 2; // specifically denied
53322+ else
53323+ return 1; // allowed
53324+ }
53325+
53326+ return 0; // not specifically allowed, may continue parsing
53327+}
53328+
53329+static int
53330+gr_search_connectbind(const int full_mode, struct sock *sk,
53331+ struct sockaddr_in *addr, const int type)
53332+{
53333+ char iface[IFNAMSIZ] = {0};
53334+ struct acl_subject_label *curr;
53335+ struct acl_ip_label *ip;
53336+ struct inet_sock *isk;
53337+ struct net_device *dev;
53338+ struct in_device *idev;
53339+ unsigned long i;
53340+ int ret;
53341+ int mode = full_mode & (GR_BIND | GR_CONNECT);
53342+ __u32 ip_addr = 0;
53343+ __u32 our_addr;
53344+ __u32 our_netmask;
53345+ char *p;
53346+ __u16 ip_port = 0;
53347+ const struct cred *cred = current_cred();
53348+
53349+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
53350+ return 0;
53351+
53352+ curr = current->acl;
53353+ isk = inet_sk(sk);
53354+
53355+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
53356+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
53357+ addr->sin_addr.s_addr = curr->inaddr_any_override;
53358+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
53359+ struct sockaddr_in saddr;
53360+ int err;
53361+
53362+ saddr.sin_family = AF_INET;
53363+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
53364+ saddr.sin_port = isk->inet_sport;
53365+
53366+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
53367+ if (err)
53368+ return err;
53369+
53370+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
53371+ if (err)
53372+ return err;
53373+ }
53374+
53375+ if (!curr->ips)
53376+ return 0;
53377+
53378+ ip_addr = addr->sin_addr.s_addr;
53379+ ip_port = ntohs(addr->sin_port);
53380+
53381+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
53382+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
53383+ current->role->roletype, cred->uid,
53384+ cred->gid, current->exec_file ?
53385+ gr_to_filename(current->exec_file->f_path.dentry,
53386+ current->exec_file->f_path.mnt) :
53387+ curr->filename, curr->filename,
53388+ &ip_addr, ip_port, type,
53389+ sk->sk_protocol, mode, &current->signal->saved_ip);
53390+ return 0;
53391+ }
53392+
53393+ for (i = 0; i < curr->ip_num; i++) {
53394+ ip = *(curr->ips + i);
53395+ if (ip->iface != NULL) {
53396+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
53397+ p = strchr(iface, ':');
53398+ if (p != NULL)
53399+ *p = '\0';
53400+ dev = dev_get_by_name(sock_net(sk), iface);
53401+ if (dev == NULL)
53402+ continue;
53403+ idev = in_dev_get(dev);
53404+ if (idev == NULL) {
53405+ dev_put(dev);
53406+ continue;
53407+ }
53408+ rcu_read_lock();
53409+ for_ifa(idev) {
53410+ if (!strcmp(ip->iface, ifa->ifa_label)) {
53411+ our_addr = ifa->ifa_address;
53412+ our_netmask = 0xffffffff;
53413+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
53414+ if (ret == 1) {
53415+ rcu_read_unlock();
53416+ in_dev_put(idev);
53417+ dev_put(dev);
53418+ return 0;
53419+ } else if (ret == 2) {
53420+ rcu_read_unlock();
53421+ in_dev_put(idev);
53422+ dev_put(dev);
53423+ goto denied;
53424+ }
53425+ }
53426+ } endfor_ifa(idev);
53427+ rcu_read_unlock();
53428+ in_dev_put(idev);
53429+ dev_put(dev);
53430+ } else {
53431+ our_addr = ip->addr;
53432+ our_netmask = ip->netmask;
53433+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
53434+ if (ret == 1)
53435+ return 0;
53436+ else if (ret == 2)
53437+ goto denied;
53438+ }
53439+ }
53440+
53441+denied:
53442+ if (mode == GR_BIND)
53443+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
53444+ else if (mode == GR_CONNECT)
53445+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
53446+
53447+ return -EACCES;
53448+}
53449+
53450+int
53451+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
53452+{
53453+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
53454+}
53455+
53456+int
53457+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
53458+{
53459+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
53460+}
53461+
53462+int gr_search_listen(struct socket *sock)
53463+{
53464+ struct sock *sk = sock->sk;
53465+ struct sockaddr_in addr;
53466+
53467+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
53468+ addr.sin_port = inet_sk(sk)->inet_sport;
53469+
53470+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
53471+}
53472+
53473+int gr_search_accept(struct socket *sock)
53474+{
53475+ struct sock *sk = sock->sk;
53476+ struct sockaddr_in addr;
53477+
53478+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
53479+ addr.sin_port = inet_sk(sk)->inet_sport;
53480+
53481+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
53482+}
53483+
53484+int
53485+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
53486+{
53487+ if (addr)
53488+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
53489+ else {
53490+ struct sockaddr_in sin;
53491+ const struct inet_sock *inet = inet_sk(sk);
53492+
53493+ sin.sin_addr.s_addr = inet->inet_daddr;
53494+ sin.sin_port = inet->inet_dport;
53495+
53496+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
53497+ }
53498+}
53499+
53500+int
53501+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
53502+{
53503+ struct sockaddr_in sin;
53504+
53505+ if (unlikely(skb->len < sizeof (struct udphdr)))
53506+ return 0; // skip this packet
53507+
53508+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
53509+ sin.sin_port = udp_hdr(skb)->source;
53510+
53511+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
53512+}
53513diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
53514new file mode 100644
53515index 0000000..25f54ef
53516--- /dev/null
53517+++ b/grsecurity/gracl_learn.c
53518@@ -0,0 +1,207 @@
53519+#include <linux/kernel.h>
53520+#include <linux/mm.h>
53521+#include <linux/sched.h>
53522+#include <linux/poll.h>
53523+#include <linux/string.h>
53524+#include <linux/file.h>
53525+#include <linux/types.h>
53526+#include <linux/vmalloc.h>
53527+#include <linux/grinternal.h>
53528+
53529+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
53530+ size_t count, loff_t *ppos);
53531+extern int gr_acl_is_enabled(void);
53532+
53533+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
53534+static int gr_learn_attached;
53535+
53536+/* use a 512k buffer */
53537+#define LEARN_BUFFER_SIZE (512 * 1024)
53538+
53539+static DEFINE_SPINLOCK(gr_learn_lock);
53540+static DEFINE_MUTEX(gr_learn_user_mutex);
53541+
53542+/* we need to maintain two buffers, so that the kernel context of grlearn
53543+ uses a semaphore around the userspace copying, and the other kernel contexts
53544+ use a spinlock when copying into the buffer, since they cannot sleep
53545+*/
53546+static char *learn_buffer;
53547+static char *learn_buffer_user;
53548+static int learn_buffer_len;
53549+static int learn_buffer_user_len;
53550+
53551+static ssize_t
53552+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
53553+{
53554+ DECLARE_WAITQUEUE(wait, current);
53555+ ssize_t retval = 0;
53556+
53557+ add_wait_queue(&learn_wait, &wait);
53558+ set_current_state(TASK_INTERRUPTIBLE);
53559+ do {
53560+ mutex_lock(&gr_learn_user_mutex);
53561+ spin_lock(&gr_learn_lock);
53562+ if (learn_buffer_len)
53563+ break;
53564+ spin_unlock(&gr_learn_lock);
53565+ mutex_unlock(&gr_learn_user_mutex);
53566+ if (file->f_flags & O_NONBLOCK) {
53567+ retval = -EAGAIN;
53568+ goto out;
53569+ }
53570+ if (signal_pending(current)) {
53571+ retval = -ERESTARTSYS;
53572+ goto out;
53573+ }
53574+
53575+ schedule();
53576+ } while (1);
53577+
53578+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
53579+ learn_buffer_user_len = learn_buffer_len;
53580+ retval = learn_buffer_len;
53581+ learn_buffer_len = 0;
53582+
53583+ spin_unlock(&gr_learn_lock);
53584+
53585+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
53586+ retval = -EFAULT;
53587+
53588+ mutex_unlock(&gr_learn_user_mutex);
53589+out:
53590+ set_current_state(TASK_RUNNING);
53591+ remove_wait_queue(&learn_wait, &wait);
53592+ return retval;
53593+}
53594+
53595+static unsigned int
53596+poll_learn(struct file * file, poll_table * wait)
53597+{
53598+ poll_wait(file, &learn_wait, wait);
53599+
53600+ if (learn_buffer_len)
53601+ return (POLLIN | POLLRDNORM);
53602+
53603+ return 0;
53604+}
53605+
53606+void
53607+gr_clear_learn_entries(void)
53608+{
53609+ char *tmp;
53610+
53611+ mutex_lock(&gr_learn_user_mutex);
53612+ spin_lock(&gr_learn_lock);
53613+ tmp = learn_buffer;
53614+ learn_buffer = NULL;
53615+ spin_unlock(&gr_learn_lock);
53616+ if (tmp)
53617+ vfree(tmp);
53618+ if (learn_buffer_user != NULL) {
53619+ vfree(learn_buffer_user);
53620+ learn_buffer_user = NULL;
53621+ }
53622+ learn_buffer_len = 0;
53623+ mutex_unlock(&gr_learn_user_mutex);
53624+
53625+ return;
53626+}
53627+
53628+void
53629+gr_add_learn_entry(const char *fmt, ...)
53630+{
53631+ va_list args;
53632+ unsigned int len;
53633+
53634+ if (!gr_learn_attached)
53635+ return;
53636+
53637+ spin_lock(&gr_learn_lock);
53638+
53639+ /* leave a gap at the end so we know when it's "full" but don't have to
53640+ compute the exact length of the string we're trying to append
53641+ */
53642+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
53643+ spin_unlock(&gr_learn_lock);
53644+ wake_up_interruptible(&learn_wait);
53645+ return;
53646+ }
53647+ if (learn_buffer == NULL) {
53648+ spin_unlock(&gr_learn_lock);
53649+ return;
53650+ }
53651+
53652+ va_start(args, fmt);
53653+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
53654+ va_end(args);
53655+
53656+ learn_buffer_len += len + 1;
53657+
53658+ spin_unlock(&gr_learn_lock);
53659+ wake_up_interruptible(&learn_wait);
53660+
53661+ return;
53662+}
53663+
53664+static int
53665+open_learn(struct inode *inode, struct file *file)
53666+{
53667+ if (file->f_mode & FMODE_READ && gr_learn_attached)
53668+ return -EBUSY;
53669+ if (file->f_mode & FMODE_READ) {
53670+ int retval = 0;
53671+ mutex_lock(&gr_learn_user_mutex);
53672+ if (learn_buffer == NULL)
53673+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
53674+ if (learn_buffer_user == NULL)
53675+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
53676+ if (learn_buffer == NULL) {
53677+ retval = -ENOMEM;
53678+ goto out_error;
53679+ }
53680+ if (learn_buffer_user == NULL) {
53681+ retval = -ENOMEM;
53682+ goto out_error;
53683+ }
53684+ learn_buffer_len = 0;
53685+ learn_buffer_user_len = 0;
53686+ gr_learn_attached = 1;
53687+out_error:
53688+ mutex_unlock(&gr_learn_user_mutex);
53689+ return retval;
53690+ }
53691+ return 0;
53692+}
53693+
53694+static int
53695+close_learn(struct inode *inode, struct file *file)
53696+{
53697+ if (file->f_mode & FMODE_READ) {
53698+ char *tmp = NULL;
53699+ mutex_lock(&gr_learn_user_mutex);
53700+ spin_lock(&gr_learn_lock);
53701+ tmp = learn_buffer;
53702+ learn_buffer = NULL;
53703+ spin_unlock(&gr_learn_lock);
53704+ if (tmp)
53705+ vfree(tmp);
53706+ if (learn_buffer_user != NULL) {
53707+ vfree(learn_buffer_user);
53708+ learn_buffer_user = NULL;
53709+ }
53710+ learn_buffer_len = 0;
53711+ learn_buffer_user_len = 0;
53712+ gr_learn_attached = 0;
53713+ mutex_unlock(&gr_learn_user_mutex);
53714+ }
53715+
53716+ return 0;
53717+}
53718+
53719+const struct file_operations grsec_fops = {
53720+ .read = read_learn,
53721+ .write = write_grsec_handler,
53722+ .open = open_learn,
53723+ .release = close_learn,
53724+ .poll = poll_learn,
53725+};
53726diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
53727new file mode 100644
53728index 0000000..39645c9
53729--- /dev/null
53730+++ b/grsecurity/gracl_res.c
53731@@ -0,0 +1,68 @@
53732+#include <linux/kernel.h>
53733+#include <linux/sched.h>
53734+#include <linux/gracl.h>
53735+#include <linux/grinternal.h>
53736+
53737+static const char *restab_log[] = {
53738+ [RLIMIT_CPU] = "RLIMIT_CPU",
53739+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
53740+ [RLIMIT_DATA] = "RLIMIT_DATA",
53741+ [RLIMIT_STACK] = "RLIMIT_STACK",
53742+ [RLIMIT_CORE] = "RLIMIT_CORE",
53743+ [RLIMIT_RSS] = "RLIMIT_RSS",
53744+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
53745+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
53746+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
53747+ [RLIMIT_AS] = "RLIMIT_AS",
53748+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
53749+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
53750+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
53751+ [RLIMIT_NICE] = "RLIMIT_NICE",
53752+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
53753+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
53754+ [GR_CRASH_RES] = "RLIMIT_CRASH"
53755+};
53756+
53757+void
53758+gr_log_resource(const struct task_struct *task,
53759+ const int res, const unsigned long wanted, const int gt)
53760+{
53761+ const struct cred *cred;
53762+ unsigned long rlim;
53763+
53764+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
53765+ return;
53766+
53767+ // not yet supported resource
53768+ if (unlikely(!restab_log[res]))
53769+ return;
53770+
53771+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
53772+ rlim = task_rlimit_max(task, res);
53773+ else
53774+ rlim = task_rlimit(task, res);
53775+
53776+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
53777+ return;
53778+
53779+ rcu_read_lock();
53780+ cred = __task_cred(task);
53781+
53782+ if (res == RLIMIT_NPROC &&
53783+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
53784+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
53785+ goto out_rcu_unlock;
53786+ else if (res == RLIMIT_MEMLOCK &&
53787+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
53788+ goto out_rcu_unlock;
53789+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
53790+ goto out_rcu_unlock;
53791+ rcu_read_unlock();
53792+
53793+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
53794+
53795+ return;
53796+out_rcu_unlock:
53797+ rcu_read_unlock();
53798+ return;
53799+}
53800diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
53801new file mode 100644
53802index 0000000..5556be3
53803--- /dev/null
53804+++ b/grsecurity/gracl_segv.c
53805@@ -0,0 +1,299 @@
53806+#include <linux/kernel.h>
53807+#include <linux/mm.h>
53808+#include <asm/uaccess.h>
53809+#include <asm/errno.h>
53810+#include <asm/mman.h>
53811+#include <net/sock.h>
53812+#include <linux/file.h>
53813+#include <linux/fs.h>
53814+#include <linux/net.h>
53815+#include <linux/in.h>
53816+#include <linux/slab.h>
53817+#include <linux/types.h>
53818+#include <linux/sched.h>
53819+#include <linux/timer.h>
53820+#include <linux/gracl.h>
53821+#include <linux/grsecurity.h>
53822+#include <linux/grinternal.h>
53823+
53824+static struct crash_uid *uid_set;
53825+static unsigned short uid_used;
53826+static DEFINE_SPINLOCK(gr_uid_lock);
53827+extern rwlock_t gr_inode_lock;
53828+extern struct acl_subject_label *
53829+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
53830+ struct acl_role_label *role);
53831+
53832+#ifdef CONFIG_BTRFS_FS
53833+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
53834+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
53835+#endif
53836+
53837+static inline dev_t __get_dev(const struct dentry *dentry)
53838+{
53839+#ifdef CONFIG_BTRFS_FS
53840+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
53841+ return get_btrfs_dev_from_inode(dentry->d_inode);
53842+ else
53843+#endif
53844+ return dentry->d_inode->i_sb->s_dev;
53845+}
53846+
53847+int
53848+gr_init_uidset(void)
53849+{
53850+ uid_set =
53851+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
53852+ uid_used = 0;
53853+
53854+ return uid_set ? 1 : 0;
53855+}
53856+
53857+void
53858+gr_free_uidset(void)
53859+{
53860+ if (uid_set)
53861+ kfree(uid_set);
53862+
53863+ return;
53864+}
53865+
53866+int
53867+gr_find_uid(const uid_t uid)
53868+{
53869+ struct crash_uid *tmp = uid_set;
53870+ uid_t buid;
53871+ int low = 0, high = uid_used - 1, mid;
53872+
53873+ while (high >= low) {
53874+ mid = (low + high) >> 1;
53875+ buid = tmp[mid].uid;
53876+ if (buid == uid)
53877+ return mid;
53878+ if (buid > uid)
53879+ high = mid - 1;
53880+ if (buid < uid)
53881+ low = mid + 1;
53882+ }
53883+
53884+ return -1;
53885+}
53886+
53887+static __inline__ void
53888+gr_insertsort(void)
53889+{
53890+ unsigned short i, j;
53891+ struct crash_uid index;
53892+
53893+ for (i = 1; i < uid_used; i++) {
53894+ index = uid_set[i];
53895+ j = i;
53896+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
53897+ uid_set[j] = uid_set[j - 1];
53898+ j--;
53899+ }
53900+ uid_set[j] = index;
53901+ }
53902+
53903+ return;
53904+}
53905+
53906+static __inline__ void
53907+gr_insert_uid(const uid_t uid, const unsigned long expires)
53908+{
53909+ int loc;
53910+
53911+ if (uid_used == GR_UIDTABLE_MAX)
53912+ return;
53913+
53914+ loc = gr_find_uid(uid);
53915+
53916+ if (loc >= 0) {
53917+ uid_set[loc].expires = expires;
53918+ return;
53919+ }
53920+
53921+ uid_set[uid_used].uid = uid;
53922+ uid_set[uid_used].expires = expires;
53923+ uid_used++;
53924+
53925+ gr_insertsort();
53926+
53927+ return;
53928+}
53929+
53930+void
53931+gr_remove_uid(const unsigned short loc)
53932+{
53933+ unsigned short i;
53934+
53935+ for (i = loc + 1; i < uid_used; i++)
53936+ uid_set[i - 1] = uid_set[i];
53937+
53938+ uid_used--;
53939+
53940+ return;
53941+}
53942+
53943+int
53944+gr_check_crash_uid(const uid_t uid)
53945+{
53946+ int loc;
53947+ int ret = 0;
53948+
53949+ if (unlikely(!gr_acl_is_enabled()))
53950+ return 0;
53951+
53952+ spin_lock(&gr_uid_lock);
53953+ loc = gr_find_uid(uid);
53954+
53955+ if (loc < 0)
53956+ goto out_unlock;
53957+
53958+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
53959+ gr_remove_uid(loc);
53960+ else
53961+ ret = 1;
53962+
53963+out_unlock:
53964+ spin_unlock(&gr_uid_lock);
53965+ return ret;
53966+}
53967+
53968+static __inline__ int
53969+proc_is_setxid(const struct cred *cred)
53970+{
53971+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
53972+ cred->uid != cred->fsuid)
53973+ return 1;
53974+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
53975+ cred->gid != cred->fsgid)
53976+ return 1;
53977+
53978+ return 0;
53979+}
53980+
53981+extern int gr_fake_force_sig(int sig, struct task_struct *t);
53982+
53983+void
53984+gr_handle_crash(struct task_struct *task, const int sig)
53985+{
53986+ struct acl_subject_label *curr;
53987+ struct task_struct *tsk, *tsk2;
53988+ const struct cred *cred;
53989+ const struct cred *cred2;
53990+
53991+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
53992+ return;
53993+
53994+ if (unlikely(!gr_acl_is_enabled()))
53995+ return;
53996+
53997+ curr = task->acl;
53998+
53999+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
54000+ return;
54001+
54002+ if (time_before_eq(curr->expires, get_seconds())) {
54003+ curr->expires = 0;
54004+ curr->crashes = 0;
54005+ }
54006+
54007+ curr->crashes++;
54008+
54009+ if (!curr->expires)
54010+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
54011+
54012+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
54013+ time_after(curr->expires, get_seconds())) {
54014+ rcu_read_lock();
54015+ cred = __task_cred(task);
54016+ if (cred->uid && proc_is_setxid(cred)) {
54017+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
54018+ spin_lock(&gr_uid_lock);
54019+ gr_insert_uid(cred->uid, curr->expires);
54020+ spin_unlock(&gr_uid_lock);
54021+ curr->expires = 0;
54022+ curr->crashes = 0;
54023+ read_lock(&tasklist_lock);
54024+ do_each_thread(tsk2, tsk) {
54025+ cred2 = __task_cred(tsk);
54026+ if (tsk != task && cred2->uid == cred->uid)
54027+ gr_fake_force_sig(SIGKILL, tsk);
54028+ } while_each_thread(tsk2, tsk);
54029+ read_unlock(&tasklist_lock);
54030+ } else {
54031+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
54032+ read_lock(&tasklist_lock);
54033+ read_lock(&grsec_exec_file_lock);
54034+ do_each_thread(tsk2, tsk) {
54035+ if (likely(tsk != task)) {
54036+ // if this thread has the same subject as the one that triggered
54037+ // RES_CRASH and it's the same binary, kill it
54038+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
54039+ gr_fake_force_sig(SIGKILL, tsk);
54040+ }
54041+ } while_each_thread(tsk2, tsk);
54042+ read_unlock(&grsec_exec_file_lock);
54043+ read_unlock(&tasklist_lock);
54044+ }
54045+ rcu_read_unlock();
54046+ }
54047+
54048+ return;
54049+}
54050+
54051+int
54052+gr_check_crash_exec(const struct file *filp)
54053+{
54054+ struct acl_subject_label *curr;
54055+
54056+ if (unlikely(!gr_acl_is_enabled()))
54057+ return 0;
54058+
54059+ read_lock(&gr_inode_lock);
54060+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
54061+ __get_dev(filp->f_path.dentry),
54062+ current->role);
54063+ read_unlock(&gr_inode_lock);
54064+
54065+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
54066+ (!curr->crashes && !curr->expires))
54067+ return 0;
54068+
54069+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
54070+ time_after(curr->expires, get_seconds()))
54071+ return 1;
54072+ else if (time_before_eq(curr->expires, get_seconds())) {
54073+ curr->crashes = 0;
54074+ curr->expires = 0;
54075+ }
54076+
54077+ return 0;
54078+}
54079+
54080+void
54081+gr_handle_alertkill(struct task_struct *task)
54082+{
54083+ struct acl_subject_label *curracl;
54084+ __u32 curr_ip;
54085+ struct task_struct *p, *p2;
54086+
54087+ if (unlikely(!gr_acl_is_enabled()))
54088+ return;
54089+
54090+ curracl = task->acl;
54091+ curr_ip = task->signal->curr_ip;
54092+
54093+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
54094+ read_lock(&tasklist_lock);
54095+ do_each_thread(p2, p) {
54096+ if (p->signal->curr_ip == curr_ip)
54097+ gr_fake_force_sig(SIGKILL, p);
54098+ } while_each_thread(p2, p);
54099+ read_unlock(&tasklist_lock);
54100+ } else if (curracl->mode & GR_KILLPROC)
54101+ gr_fake_force_sig(SIGKILL, task);
54102+
54103+ return;
54104+}
54105diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
54106new file mode 100644
54107index 0000000..9d83a69
54108--- /dev/null
54109+++ b/grsecurity/gracl_shm.c
54110@@ -0,0 +1,40 @@
54111+#include <linux/kernel.h>
54112+#include <linux/mm.h>
54113+#include <linux/sched.h>
54114+#include <linux/file.h>
54115+#include <linux/ipc.h>
54116+#include <linux/gracl.h>
54117+#include <linux/grsecurity.h>
54118+#include <linux/grinternal.h>
54119+
54120+int
54121+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
54122+ const time_t shm_createtime, const uid_t cuid, const int shmid)
54123+{
54124+ struct task_struct *task;
54125+
54126+ if (!gr_acl_is_enabled())
54127+ return 1;
54128+
54129+ rcu_read_lock();
54130+ read_lock(&tasklist_lock);
54131+
54132+ task = find_task_by_vpid(shm_cprid);
54133+
54134+ if (unlikely(!task))
54135+ task = find_task_by_vpid(shm_lapid);
54136+
54137+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
54138+ (task->pid == shm_lapid)) &&
54139+ (task->acl->mode & GR_PROTSHM) &&
54140+ (task->acl != current->acl))) {
54141+ read_unlock(&tasklist_lock);
54142+ rcu_read_unlock();
54143+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
54144+ return 0;
54145+ }
54146+ read_unlock(&tasklist_lock);
54147+ rcu_read_unlock();
54148+
54149+ return 1;
54150+}
54151diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
54152new file mode 100644
54153index 0000000..bc0be01
54154--- /dev/null
54155+++ b/grsecurity/grsec_chdir.c
54156@@ -0,0 +1,19 @@
54157+#include <linux/kernel.h>
54158+#include <linux/sched.h>
54159+#include <linux/fs.h>
54160+#include <linux/file.h>
54161+#include <linux/grsecurity.h>
54162+#include <linux/grinternal.h>
54163+
54164+void
54165+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
54166+{
54167+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
54168+ if ((grsec_enable_chdir && grsec_enable_group &&
54169+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
54170+ !grsec_enable_group)) {
54171+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
54172+ }
54173+#endif
54174+ return;
54175+}
54176diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
54177new file mode 100644
54178index 0000000..a2dc675
54179--- /dev/null
54180+++ b/grsecurity/grsec_chroot.c
54181@@ -0,0 +1,351 @@
54182+#include <linux/kernel.h>
54183+#include <linux/module.h>
54184+#include <linux/sched.h>
54185+#include <linux/file.h>
54186+#include <linux/fs.h>
54187+#include <linux/mount.h>
54188+#include <linux/types.h>
54189+#include <linux/pid_namespace.h>
54190+#include <linux/grsecurity.h>
54191+#include <linux/grinternal.h>
54192+
54193+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
54194+{
54195+#ifdef CONFIG_GRKERNSEC
54196+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
54197+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
54198+ task->gr_is_chrooted = 1;
54199+ else
54200+ task->gr_is_chrooted = 0;
54201+
54202+ task->gr_chroot_dentry = path->dentry;
54203+#endif
54204+ return;
54205+}
54206+
54207+void gr_clear_chroot_entries(struct task_struct *task)
54208+{
54209+#ifdef CONFIG_GRKERNSEC
54210+ task->gr_is_chrooted = 0;
54211+ task->gr_chroot_dentry = NULL;
54212+#endif
54213+ return;
54214+}
54215+
54216+int
54217+gr_handle_chroot_unix(const pid_t pid)
54218+{
54219+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
54220+ struct task_struct *p;
54221+
54222+ if (unlikely(!grsec_enable_chroot_unix))
54223+ return 1;
54224+
54225+ if (likely(!proc_is_chrooted(current)))
54226+ return 1;
54227+
54228+ rcu_read_lock();
54229+ read_lock(&tasklist_lock);
54230+ p = find_task_by_vpid_unrestricted(pid);
54231+ if (unlikely(p && !have_same_root(current, p))) {
54232+ read_unlock(&tasklist_lock);
54233+ rcu_read_unlock();
54234+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
54235+ return 0;
54236+ }
54237+ read_unlock(&tasklist_lock);
54238+ rcu_read_unlock();
54239+#endif
54240+ return 1;
54241+}
54242+
54243+int
54244+gr_handle_chroot_nice(void)
54245+{
54246+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
54247+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
54248+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
54249+ return -EPERM;
54250+ }
54251+#endif
54252+ return 0;
54253+}
54254+
54255+int
54256+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
54257+{
54258+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
54259+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
54260+ && proc_is_chrooted(current)) {
54261+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
54262+ return -EACCES;
54263+ }
54264+#endif
54265+ return 0;
54266+}
54267+
54268+int
54269+gr_handle_chroot_rawio(const struct inode *inode)
54270+{
54271+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54272+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
54273+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
54274+ return 1;
54275+#endif
54276+ return 0;
54277+}
54278+
54279+int
54280+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
54281+{
54282+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
54283+ struct task_struct *p;
54284+ int ret = 0;
54285+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
54286+ return ret;
54287+
54288+ read_lock(&tasklist_lock);
54289+ do_each_pid_task(pid, type, p) {
54290+ if (!have_same_root(current, p)) {
54291+ ret = 1;
54292+ goto out;
54293+ }
54294+ } while_each_pid_task(pid, type, p);
54295+out:
54296+ read_unlock(&tasklist_lock);
54297+ return ret;
54298+#endif
54299+ return 0;
54300+}
54301+
54302+int
54303+gr_pid_is_chrooted(struct task_struct *p)
54304+{
54305+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
54306+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
54307+ return 0;
54308+
54309+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
54310+ !have_same_root(current, p)) {
54311+ return 1;
54312+ }
54313+#endif
54314+ return 0;
54315+}
54316+
54317+EXPORT_SYMBOL(gr_pid_is_chrooted);
54318+
54319+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
54320+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
54321+{
54322+ struct path path, currentroot;
54323+ int ret = 0;
54324+
54325+ path.dentry = (struct dentry *)u_dentry;
54326+ path.mnt = (struct vfsmount *)u_mnt;
54327+ get_fs_root(current->fs, &currentroot);
54328+ if (path_is_under(&path, &currentroot))
54329+ ret = 1;
54330+ path_put(&currentroot);
54331+
54332+ return ret;
54333+}
54334+#endif
54335+
54336+int
54337+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
54338+{
54339+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
54340+ if (!grsec_enable_chroot_fchdir)
54341+ return 1;
54342+
54343+ if (!proc_is_chrooted(current))
54344+ return 1;
54345+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
54346+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
54347+ return 0;
54348+ }
54349+#endif
54350+ return 1;
54351+}
54352+
54353+int
54354+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
54355+ const time_t shm_createtime)
54356+{
54357+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
54358+ struct task_struct *p;
54359+ time_t starttime;
54360+
54361+ if (unlikely(!grsec_enable_chroot_shmat))
54362+ return 1;
54363+
54364+ if (likely(!proc_is_chrooted(current)))
54365+ return 1;
54366+
54367+ rcu_read_lock();
54368+ read_lock(&tasklist_lock);
54369+
54370+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
54371+ starttime = p->start_time.tv_sec;
54372+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
54373+ if (have_same_root(current, p)) {
54374+ goto allow;
54375+ } else {
54376+ read_unlock(&tasklist_lock);
54377+ rcu_read_unlock();
54378+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
54379+ return 0;
54380+ }
54381+ }
54382+ /* creator exited, pid reuse, fall through to next check */
54383+ }
54384+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
54385+ if (unlikely(!have_same_root(current, p))) {
54386+ read_unlock(&tasklist_lock);
54387+ rcu_read_unlock();
54388+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
54389+ return 0;
54390+ }
54391+ }
54392+
54393+allow:
54394+ read_unlock(&tasklist_lock);
54395+ rcu_read_unlock();
54396+#endif
54397+ return 1;
54398+}
54399+
54400+void
54401+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
54402+{
54403+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
54404+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
54405+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
54406+#endif
54407+ return;
54408+}
54409+
54410+int
54411+gr_handle_chroot_mknod(const struct dentry *dentry,
54412+ const struct vfsmount *mnt, const int mode)
54413+{
54414+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
54415+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
54416+ proc_is_chrooted(current)) {
54417+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
54418+ return -EPERM;
54419+ }
54420+#endif
54421+ return 0;
54422+}
54423+
54424+int
54425+gr_handle_chroot_mount(const struct dentry *dentry,
54426+ const struct vfsmount *mnt, const char *dev_name)
54427+{
54428+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
54429+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
54430+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
54431+ return -EPERM;
54432+ }
54433+#endif
54434+ return 0;
54435+}
54436+
54437+int
54438+gr_handle_chroot_pivot(void)
54439+{
54440+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
54441+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
54442+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
54443+ return -EPERM;
54444+ }
54445+#endif
54446+ return 0;
54447+}
54448+
54449+int
54450+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
54451+{
54452+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
54453+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
54454+ !gr_is_outside_chroot(dentry, mnt)) {
54455+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
54456+ return -EPERM;
54457+ }
54458+#endif
54459+ return 0;
54460+}
54461+
54462+extern const char *captab_log[];
54463+extern int captab_log_entries;
54464+
54465+int
54466+gr_chroot_is_capable(const int cap)
54467+{
54468+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54469+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
54470+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
54471+ if (cap_raised(chroot_caps, cap)) {
54472+ const struct cred *creds = current_cred();
54473+ if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
54474+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
54475+ }
54476+ return 0;
54477+ }
54478+ }
54479+#endif
54480+ return 1;
54481+}
54482+
54483+int
54484+gr_chroot_is_capable_nolog(const int cap)
54485+{
54486+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54487+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
54488+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
54489+ if (cap_raised(chroot_caps, cap)) {
54490+ return 0;
54491+ }
54492+ }
54493+#endif
54494+ return 1;
54495+}
54496+
54497+int
54498+gr_handle_chroot_sysctl(const int op)
54499+{
54500+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
54501+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
54502+ proc_is_chrooted(current))
54503+ return -EACCES;
54504+#endif
54505+ return 0;
54506+}
54507+
54508+void
54509+gr_handle_chroot_chdir(struct path *path)
54510+{
54511+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
54512+ if (grsec_enable_chroot_chdir)
54513+ set_fs_pwd(current->fs, path);
54514+#endif
54515+ return;
54516+}
54517+
54518+int
54519+gr_handle_chroot_chmod(const struct dentry *dentry,
54520+ const struct vfsmount *mnt, const int mode)
54521+{
54522+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
54523+ /* allow chmod +s on directories, but not files */
54524+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
54525+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
54526+ proc_is_chrooted(current)) {
54527+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
54528+ return -EPERM;
54529+ }
54530+#endif
54531+ return 0;
54532+}
54533diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
54534new file mode 100644
54535index 0000000..d81a586
54536--- /dev/null
54537+++ b/grsecurity/grsec_disabled.c
54538@@ -0,0 +1,439 @@
54539+#include <linux/kernel.h>
54540+#include <linux/module.h>
54541+#include <linux/sched.h>
54542+#include <linux/file.h>
54543+#include <linux/fs.h>
54544+#include <linux/kdev_t.h>
54545+#include <linux/net.h>
54546+#include <linux/in.h>
54547+#include <linux/ip.h>
54548+#include <linux/skbuff.h>
54549+#include <linux/sysctl.h>
54550+
54551+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
54552+void
54553+pax_set_initial_flags(struct linux_binprm *bprm)
54554+{
54555+ return;
54556+}
54557+#endif
54558+
54559+#ifdef CONFIG_SYSCTL
54560+__u32
54561+gr_handle_sysctl(const struct ctl_table * table, const int op)
54562+{
54563+ return 0;
54564+}
54565+#endif
54566+
54567+#ifdef CONFIG_TASKSTATS
54568+int gr_is_taskstats_denied(int pid)
54569+{
54570+ return 0;
54571+}
54572+#endif
54573+
54574+int
54575+gr_acl_is_enabled(void)
54576+{
54577+ return 0;
54578+}
54579+
54580+void
54581+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
54582+{
54583+ return;
54584+}
54585+
54586+int
54587+gr_handle_rawio(const struct inode *inode)
54588+{
54589+ return 0;
54590+}
54591+
54592+void
54593+gr_acl_handle_psacct(struct task_struct *task, const long code)
54594+{
54595+ return;
54596+}
54597+
54598+int
54599+gr_handle_ptrace(struct task_struct *task, const long request)
54600+{
54601+ return 0;
54602+}
54603+
54604+int
54605+gr_handle_proc_ptrace(struct task_struct *task)
54606+{
54607+ return 0;
54608+}
54609+
54610+void
54611+gr_learn_resource(const struct task_struct *task,
54612+ const int res, const unsigned long wanted, const int gt)
54613+{
54614+ return;
54615+}
54616+
54617+int
54618+gr_set_acls(const int type)
54619+{
54620+ return 0;
54621+}
54622+
54623+int
54624+gr_check_hidden_task(const struct task_struct *tsk)
54625+{
54626+ return 0;
54627+}
54628+
54629+int
54630+gr_check_protected_task(const struct task_struct *task)
54631+{
54632+ return 0;
54633+}
54634+
54635+int
54636+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
54637+{
54638+ return 0;
54639+}
54640+
54641+void
54642+gr_copy_label(struct task_struct *tsk)
54643+{
54644+ return;
54645+}
54646+
54647+void
54648+gr_set_pax_flags(struct task_struct *task)
54649+{
54650+ return;
54651+}
54652+
54653+int
54654+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
54655+ const int unsafe_share)
54656+{
54657+ return 0;
54658+}
54659+
54660+void
54661+gr_handle_delete(const ino_t ino, const dev_t dev)
54662+{
54663+ return;
54664+}
54665+
54666+void
54667+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
54668+{
54669+ return;
54670+}
54671+
54672+void
54673+gr_handle_crash(struct task_struct *task, const int sig)
54674+{
54675+ return;
54676+}
54677+
54678+int
54679+gr_check_crash_exec(const struct file *filp)
54680+{
54681+ return 0;
54682+}
54683+
54684+int
54685+gr_check_crash_uid(const uid_t uid)
54686+{
54687+ return 0;
54688+}
54689+
54690+void
54691+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
54692+ struct dentry *old_dentry,
54693+ struct dentry *new_dentry,
54694+ struct vfsmount *mnt, const __u8 replace)
54695+{
54696+ return;
54697+}
54698+
54699+int
54700+gr_search_socket(const int family, const int type, const int protocol)
54701+{
54702+ return 1;
54703+}
54704+
54705+int
54706+gr_search_connectbind(const int mode, const struct socket *sock,
54707+ const struct sockaddr_in *addr)
54708+{
54709+ return 0;
54710+}
54711+
54712+void
54713+gr_handle_alertkill(struct task_struct *task)
54714+{
54715+ return;
54716+}
54717+
54718+__u32
54719+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
54720+{
54721+ return 1;
54722+}
54723+
54724+__u32
54725+gr_acl_handle_hidden_file(const struct dentry * dentry,
54726+ const struct vfsmount * mnt)
54727+{
54728+ return 1;
54729+}
54730+
54731+__u32
54732+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
54733+ int acc_mode)
54734+{
54735+ return 1;
54736+}
54737+
54738+__u32
54739+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
54740+{
54741+ return 1;
54742+}
54743+
54744+__u32
54745+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
54746+{
54747+ return 1;
54748+}
54749+
54750+int
54751+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
54752+ unsigned int *vm_flags)
54753+{
54754+ return 1;
54755+}
54756+
54757+__u32
54758+gr_acl_handle_truncate(const struct dentry * dentry,
54759+ const struct vfsmount * mnt)
54760+{
54761+ return 1;
54762+}
54763+
54764+__u32
54765+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
54766+{
54767+ return 1;
54768+}
54769+
54770+__u32
54771+gr_acl_handle_access(const struct dentry * dentry,
54772+ const struct vfsmount * mnt, const int fmode)
54773+{
54774+ return 1;
54775+}
54776+
54777+__u32
54778+gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
54779+ mode_t mode)
54780+{
54781+ return 1;
54782+}
54783+
54784+__u32
54785+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
54786+ mode_t mode)
54787+{
54788+ return 1;
54789+}
54790+
54791+__u32
54792+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
54793+{
54794+ return 1;
54795+}
54796+
54797+__u32
54798+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
54799+{
54800+ return 1;
54801+}
54802+
54803+void
54804+grsecurity_init(void)
54805+{
54806+ return;
54807+}
54808+
54809+__u32
54810+gr_acl_handle_mknod(const struct dentry * new_dentry,
54811+ const struct dentry * parent_dentry,
54812+ const struct vfsmount * parent_mnt,
54813+ const int mode)
54814+{
54815+ return 1;
54816+}
54817+
54818+__u32
54819+gr_acl_handle_mkdir(const struct dentry * new_dentry,
54820+ const struct dentry * parent_dentry,
54821+ const struct vfsmount * parent_mnt)
54822+{
54823+ return 1;
54824+}
54825+
54826+__u32
54827+gr_acl_handle_symlink(const struct dentry * new_dentry,
54828+ const struct dentry * parent_dentry,
54829+ const struct vfsmount * parent_mnt, const char *from)
54830+{
54831+ return 1;
54832+}
54833+
54834+__u32
54835+gr_acl_handle_link(const struct dentry * new_dentry,
54836+ const struct dentry * parent_dentry,
54837+ const struct vfsmount * parent_mnt,
54838+ const struct dentry * old_dentry,
54839+ const struct vfsmount * old_mnt, const char *to)
54840+{
54841+ return 1;
54842+}
54843+
54844+int
54845+gr_acl_handle_rename(const struct dentry *new_dentry,
54846+ const struct dentry *parent_dentry,
54847+ const struct vfsmount *parent_mnt,
54848+ const struct dentry *old_dentry,
54849+ const struct inode *old_parent_inode,
54850+ const struct vfsmount *old_mnt, const char *newname)
54851+{
54852+ return 0;
54853+}
54854+
54855+int
54856+gr_acl_handle_filldir(const struct file *file, const char *name,
54857+ const int namelen, const ino_t ino)
54858+{
54859+ return 1;
54860+}
54861+
54862+int
54863+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
54864+ const time_t shm_createtime, const uid_t cuid, const int shmid)
54865+{
54866+ return 1;
54867+}
54868+
54869+int
54870+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
54871+{
54872+ return 0;
54873+}
54874+
54875+int
54876+gr_search_accept(const struct socket *sock)
54877+{
54878+ return 0;
54879+}
54880+
54881+int
54882+gr_search_listen(const struct socket *sock)
54883+{
54884+ return 0;
54885+}
54886+
54887+int
54888+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
54889+{
54890+ return 0;
54891+}
54892+
54893+__u32
54894+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
54895+{
54896+ return 1;
54897+}
54898+
54899+__u32
54900+gr_acl_handle_creat(const struct dentry * dentry,
54901+ const struct dentry * p_dentry,
54902+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
54903+ const int imode)
54904+{
54905+ return 1;
54906+}
54907+
54908+void
54909+gr_acl_handle_exit(void)
54910+{
54911+ return;
54912+}
54913+
54914+int
54915+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
54916+{
54917+ return 1;
54918+}
54919+
54920+void
54921+gr_set_role_label(const uid_t uid, const gid_t gid)
54922+{
54923+ return;
54924+}
54925+
54926+int
54927+gr_acl_handle_procpidmem(const struct task_struct *task)
54928+{
54929+ return 0;
54930+}
54931+
54932+int
54933+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
54934+{
54935+ return 0;
54936+}
54937+
54938+int
54939+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
54940+{
54941+ return 0;
54942+}
54943+
54944+void
54945+gr_set_kernel_label(struct task_struct *task)
54946+{
54947+ return;
54948+}
54949+
54950+int
54951+gr_check_user_change(int real, int effective, int fs)
54952+{
54953+ return 0;
54954+}
54955+
54956+int
54957+gr_check_group_change(int real, int effective, int fs)
54958+{
54959+ return 0;
54960+}
54961+
54962+int gr_acl_enable_at_secure(void)
54963+{
54964+ return 0;
54965+}
54966+
54967+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
54968+{
54969+ return dentry->d_inode->i_sb->s_dev;
54970+}
54971+
54972+EXPORT_SYMBOL(gr_learn_resource);
54973+EXPORT_SYMBOL(gr_set_kernel_label);
54974+#ifdef CONFIG_SECURITY
54975+EXPORT_SYMBOL(gr_check_user_change);
54976+EXPORT_SYMBOL(gr_check_group_change);
54977+#endif
54978diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
54979new file mode 100644
54980index 0000000..2b05ada
54981--- /dev/null
54982+++ b/grsecurity/grsec_exec.c
54983@@ -0,0 +1,146 @@
54984+#include <linux/kernel.h>
54985+#include <linux/sched.h>
54986+#include <linux/file.h>
54987+#include <linux/binfmts.h>
54988+#include <linux/fs.h>
54989+#include <linux/types.h>
54990+#include <linux/grdefs.h>
54991+#include <linux/grsecurity.h>
54992+#include <linux/grinternal.h>
54993+#include <linux/capability.h>
54994+#include <linux/module.h>
54995+
54996+#include <asm/uaccess.h>
54997+
54998+#ifdef CONFIG_GRKERNSEC_EXECLOG
54999+static char gr_exec_arg_buf[132];
55000+static DEFINE_MUTEX(gr_exec_arg_mutex);
55001+#endif
55002+
55003+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
55004+
55005+void
55006+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
55007+{
55008+#ifdef CONFIG_GRKERNSEC_EXECLOG
55009+ char *grarg = gr_exec_arg_buf;
55010+ unsigned int i, x, execlen = 0;
55011+ char c;
55012+
55013+ if (!((grsec_enable_execlog && grsec_enable_group &&
55014+ in_group_p(grsec_audit_gid))
55015+ || (grsec_enable_execlog && !grsec_enable_group)))
55016+ return;
55017+
55018+ mutex_lock(&gr_exec_arg_mutex);
55019+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
55020+
55021+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
55022+ const char __user *p;
55023+ unsigned int len;
55024+
55025+ p = get_user_arg_ptr(argv, i);
55026+ if (IS_ERR(p))
55027+ goto log;
55028+
55029+ len = strnlen_user(p, 128 - execlen);
55030+ if (len > 128 - execlen)
55031+ len = 128 - execlen;
55032+ else if (len > 0)
55033+ len--;
55034+ if (copy_from_user(grarg + execlen, p, len))
55035+ goto log;
55036+
55037+ /* rewrite unprintable characters */
55038+ for (x = 0; x < len; x++) {
55039+ c = *(grarg + execlen + x);
55040+ if (c < 32 || c > 126)
55041+ *(grarg + execlen + x) = ' ';
55042+ }
55043+
55044+ execlen += len;
55045+ *(grarg + execlen) = ' ';
55046+ *(grarg + execlen + 1) = '\0';
55047+ execlen++;
55048+ }
55049+
55050+ log:
55051+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
55052+ bprm->file->f_path.mnt, grarg);
55053+ mutex_unlock(&gr_exec_arg_mutex);
55054+#endif
55055+ return;
55056+}
55057+
55058+#ifdef CONFIG_GRKERNSEC
55059+extern int gr_acl_is_capable(const int cap);
55060+extern int gr_acl_is_capable_nolog(const int cap);
55061+extern int gr_chroot_is_capable(const int cap);
55062+extern int gr_chroot_is_capable_nolog(const int cap);
55063+#endif
55064+
55065+const char *captab_log[] = {
55066+ "CAP_CHOWN",
55067+ "CAP_DAC_OVERRIDE",
55068+ "CAP_DAC_READ_SEARCH",
55069+ "CAP_FOWNER",
55070+ "CAP_FSETID",
55071+ "CAP_KILL",
55072+ "CAP_SETGID",
55073+ "CAP_SETUID",
55074+ "CAP_SETPCAP",
55075+ "CAP_LINUX_IMMUTABLE",
55076+ "CAP_NET_BIND_SERVICE",
55077+ "CAP_NET_BROADCAST",
55078+ "CAP_NET_ADMIN",
55079+ "CAP_NET_RAW",
55080+ "CAP_IPC_LOCK",
55081+ "CAP_IPC_OWNER",
55082+ "CAP_SYS_MODULE",
55083+ "CAP_SYS_RAWIO",
55084+ "CAP_SYS_CHROOT",
55085+ "CAP_SYS_PTRACE",
55086+ "CAP_SYS_PACCT",
55087+ "CAP_SYS_ADMIN",
55088+ "CAP_SYS_BOOT",
55089+ "CAP_SYS_NICE",
55090+ "CAP_SYS_RESOURCE",
55091+ "CAP_SYS_TIME",
55092+ "CAP_SYS_TTY_CONFIG",
55093+ "CAP_MKNOD",
55094+ "CAP_LEASE",
55095+ "CAP_AUDIT_WRITE",
55096+ "CAP_AUDIT_CONTROL",
55097+ "CAP_SETFCAP",
55098+ "CAP_MAC_OVERRIDE",
55099+ "CAP_MAC_ADMIN",
55100+ "CAP_SYSLOG",
55101+ "CAP_WAKE_ALARM"
55102+};
55103+
55104+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
55105+
55106+int gr_is_capable(const int cap)
55107+{
55108+#ifdef CONFIG_GRKERNSEC
55109+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
55110+ return 1;
55111+ return 0;
55112+#else
55113+ return 1;
55114+#endif
55115+}
55116+
55117+int gr_is_capable_nolog(const int cap)
55118+{
55119+#ifdef CONFIG_GRKERNSEC
55120+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
55121+ return 1;
55122+ return 0;
55123+#else
55124+ return 1;
55125+#endif
55126+}
55127+
55128+EXPORT_SYMBOL(gr_is_capable);
55129+EXPORT_SYMBOL(gr_is_capable_nolog);
55130diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
55131new file mode 100644
55132index 0000000..d3ee748
55133--- /dev/null
55134+++ b/grsecurity/grsec_fifo.c
55135@@ -0,0 +1,24 @@
55136+#include <linux/kernel.h>
55137+#include <linux/sched.h>
55138+#include <linux/fs.h>
55139+#include <linux/file.h>
55140+#include <linux/grinternal.h>
55141+
55142+int
55143+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
55144+ const struct dentry *dir, const int flag, const int acc_mode)
55145+{
55146+#ifdef CONFIG_GRKERNSEC_FIFO
55147+ const struct cred *cred = current_cred();
55148+
55149+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
55150+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
55151+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
55152+ (cred->fsuid != dentry->d_inode->i_uid)) {
55153+ if (!inode_permission(dentry->d_inode, acc_mode))
55154+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
55155+ return -EACCES;
55156+ }
55157+#endif
55158+ return 0;
55159+}
55160diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
55161new file mode 100644
55162index 0000000..8ca18bf
55163--- /dev/null
55164+++ b/grsecurity/grsec_fork.c
55165@@ -0,0 +1,23 @@
55166+#include <linux/kernel.h>
55167+#include <linux/sched.h>
55168+#include <linux/grsecurity.h>
55169+#include <linux/grinternal.h>
55170+#include <linux/errno.h>
55171+
55172+void
55173+gr_log_forkfail(const int retval)
55174+{
55175+#ifdef CONFIG_GRKERNSEC_FORKFAIL
55176+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
55177+ switch (retval) {
55178+ case -EAGAIN:
55179+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
55180+ break;
55181+ case -ENOMEM:
55182+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
55183+ break;
55184+ }
55185+ }
55186+#endif
55187+ return;
55188+}
55189diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
55190new file mode 100644
55191index 0000000..01ddde4
55192--- /dev/null
55193+++ b/grsecurity/grsec_init.c
55194@@ -0,0 +1,277 @@
55195+#include <linux/kernel.h>
55196+#include <linux/sched.h>
55197+#include <linux/mm.h>
55198+#include <linux/gracl.h>
55199+#include <linux/slab.h>
55200+#include <linux/vmalloc.h>
55201+#include <linux/percpu.h>
55202+#include <linux/module.h>
55203+
55204+int grsec_enable_ptrace_readexec;
55205+int grsec_enable_setxid;
55206+int grsec_enable_brute;
55207+int grsec_enable_link;
55208+int grsec_enable_dmesg;
55209+int grsec_enable_harden_ptrace;
55210+int grsec_enable_fifo;
55211+int grsec_enable_execlog;
55212+int grsec_enable_signal;
55213+int grsec_enable_forkfail;
55214+int grsec_enable_audit_ptrace;
55215+int grsec_enable_time;
55216+int grsec_enable_audit_textrel;
55217+int grsec_enable_group;
55218+int grsec_audit_gid;
55219+int grsec_enable_chdir;
55220+int grsec_enable_mount;
55221+int grsec_enable_rofs;
55222+int grsec_enable_chroot_findtask;
55223+int grsec_enable_chroot_mount;
55224+int grsec_enable_chroot_shmat;
55225+int grsec_enable_chroot_fchdir;
55226+int grsec_enable_chroot_double;
55227+int grsec_enable_chroot_pivot;
55228+int grsec_enable_chroot_chdir;
55229+int grsec_enable_chroot_chmod;
55230+int grsec_enable_chroot_mknod;
55231+int grsec_enable_chroot_nice;
55232+int grsec_enable_chroot_execlog;
55233+int grsec_enable_chroot_caps;
55234+int grsec_enable_chroot_sysctl;
55235+int grsec_enable_chroot_unix;
55236+int grsec_enable_tpe;
55237+int grsec_tpe_gid;
55238+int grsec_enable_blackhole;
55239+#ifdef CONFIG_IPV6_MODULE
55240+EXPORT_SYMBOL(grsec_enable_blackhole);
55241+#endif
55242+int grsec_lastack_retries;
55243+int grsec_enable_tpe_all;
55244+int grsec_enable_tpe_invert;
55245+int grsec_enable_socket_all;
55246+int grsec_socket_all_gid;
55247+int grsec_enable_socket_client;
55248+int grsec_socket_client_gid;
55249+int grsec_enable_socket_server;
55250+int grsec_socket_server_gid;
55251+int grsec_resource_logging;
55252+int grsec_disable_privio;
55253+int grsec_enable_log_rwxmaps;
55254+int grsec_lock;
55255+
55256+DEFINE_SPINLOCK(grsec_alert_lock);
55257+unsigned long grsec_alert_wtime = 0;
55258+unsigned long grsec_alert_fyet = 0;
55259+
55260+DEFINE_SPINLOCK(grsec_audit_lock);
55261+
55262+DEFINE_RWLOCK(grsec_exec_file_lock);
55263+
55264+char *gr_shared_page[4];
55265+
55266+char *gr_alert_log_fmt;
55267+char *gr_audit_log_fmt;
55268+char *gr_alert_log_buf;
55269+char *gr_audit_log_buf;
55270+
55271+extern struct gr_arg *gr_usermode;
55272+extern unsigned char *gr_system_salt;
55273+extern unsigned char *gr_system_sum;
55274+
55275+void __init
55276+grsecurity_init(void)
55277+{
55278+ int j;
55279+ /* create the per-cpu shared pages */
55280+
55281+#ifdef CONFIG_X86
55282+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
55283+#endif
55284+
55285+ for (j = 0; j < 4; j++) {
55286+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
55287+ if (gr_shared_page[j] == NULL) {
55288+ panic("Unable to allocate grsecurity shared page");
55289+ return;
55290+ }
55291+ }
55292+
55293+ /* allocate log buffers */
55294+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
55295+ if (!gr_alert_log_fmt) {
55296+ panic("Unable to allocate grsecurity alert log format buffer");
55297+ return;
55298+ }
55299+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
55300+ if (!gr_audit_log_fmt) {
55301+ panic("Unable to allocate grsecurity audit log format buffer");
55302+ return;
55303+ }
55304+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
55305+ if (!gr_alert_log_buf) {
55306+ panic("Unable to allocate grsecurity alert log buffer");
55307+ return;
55308+ }
55309+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
55310+ if (!gr_audit_log_buf) {
55311+ panic("Unable to allocate grsecurity audit log buffer");
55312+ return;
55313+ }
55314+
55315+ /* allocate memory for authentication structure */
55316+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
55317+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
55318+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
55319+
55320+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
55321+ panic("Unable to allocate grsecurity authentication structure");
55322+ return;
55323+ }
55324+
55325+
55326+#ifdef CONFIG_GRKERNSEC_IO
55327+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
55328+ grsec_disable_privio = 1;
55329+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
55330+ grsec_disable_privio = 1;
55331+#else
55332+ grsec_disable_privio = 0;
55333+#endif
55334+#endif
55335+
55336+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55337+ /* for backward compatibility, tpe_invert always defaults to on if
55338+ enabled in the kernel
55339+ */
55340+ grsec_enable_tpe_invert = 1;
55341+#endif
55342+
55343+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
55344+#ifndef CONFIG_GRKERNSEC_SYSCTL
55345+ grsec_lock = 1;
55346+#endif
55347+
55348+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
55349+ grsec_enable_audit_textrel = 1;
55350+#endif
55351+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55352+ grsec_enable_log_rwxmaps = 1;
55353+#endif
55354+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
55355+ grsec_enable_group = 1;
55356+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
55357+#endif
55358+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
55359+ grsec_enable_ptrace_readexec = 1;
55360+#endif
55361+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
55362+ grsec_enable_chdir = 1;
55363+#endif
55364+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
55365+ grsec_enable_harden_ptrace = 1;
55366+#endif
55367+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55368+ grsec_enable_mount = 1;
55369+#endif
55370+#ifdef CONFIG_GRKERNSEC_LINK
55371+ grsec_enable_link = 1;
55372+#endif
55373+#ifdef CONFIG_GRKERNSEC_BRUTE
55374+ grsec_enable_brute = 1;
55375+#endif
55376+#ifdef CONFIG_GRKERNSEC_DMESG
55377+ grsec_enable_dmesg = 1;
55378+#endif
55379+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
55380+ grsec_enable_blackhole = 1;
55381+ grsec_lastack_retries = 4;
55382+#endif
55383+#ifdef CONFIG_GRKERNSEC_FIFO
55384+ grsec_enable_fifo = 1;
55385+#endif
55386+#ifdef CONFIG_GRKERNSEC_EXECLOG
55387+ grsec_enable_execlog = 1;
55388+#endif
55389+#ifdef CONFIG_GRKERNSEC_SETXID
55390+ grsec_enable_setxid = 1;
55391+#endif
55392+#ifdef CONFIG_GRKERNSEC_SIGNAL
55393+ grsec_enable_signal = 1;
55394+#endif
55395+#ifdef CONFIG_GRKERNSEC_FORKFAIL
55396+ grsec_enable_forkfail = 1;
55397+#endif
55398+#ifdef CONFIG_GRKERNSEC_TIME
55399+ grsec_enable_time = 1;
55400+#endif
55401+#ifdef CONFIG_GRKERNSEC_RESLOG
55402+ grsec_resource_logging = 1;
55403+#endif
55404+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55405+ grsec_enable_chroot_findtask = 1;
55406+#endif
55407+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
55408+ grsec_enable_chroot_unix = 1;
55409+#endif
55410+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
55411+ grsec_enable_chroot_mount = 1;
55412+#endif
55413+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
55414+ grsec_enable_chroot_fchdir = 1;
55415+#endif
55416+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
55417+ grsec_enable_chroot_shmat = 1;
55418+#endif
55419+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
55420+ grsec_enable_audit_ptrace = 1;
55421+#endif
55422+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
55423+ grsec_enable_chroot_double = 1;
55424+#endif
55425+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
55426+ grsec_enable_chroot_pivot = 1;
55427+#endif
55428+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
55429+ grsec_enable_chroot_chdir = 1;
55430+#endif
55431+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
55432+ grsec_enable_chroot_chmod = 1;
55433+#endif
55434+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
55435+ grsec_enable_chroot_mknod = 1;
55436+#endif
55437+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55438+ grsec_enable_chroot_nice = 1;
55439+#endif
55440+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
55441+ grsec_enable_chroot_execlog = 1;
55442+#endif
55443+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55444+ grsec_enable_chroot_caps = 1;
55445+#endif
55446+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
55447+ grsec_enable_chroot_sysctl = 1;
55448+#endif
55449+#ifdef CONFIG_GRKERNSEC_TPE
55450+ grsec_enable_tpe = 1;
55451+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
55452+#ifdef CONFIG_GRKERNSEC_TPE_ALL
55453+ grsec_enable_tpe_all = 1;
55454+#endif
55455+#endif
55456+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
55457+ grsec_enable_socket_all = 1;
55458+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
55459+#endif
55460+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
55461+ grsec_enable_socket_client = 1;
55462+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
55463+#endif
55464+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55465+ grsec_enable_socket_server = 1;
55466+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
55467+#endif
55468+#endif
55469+
55470+ return;
55471+}
55472diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
55473new file mode 100644
55474index 0000000..3efe141
55475--- /dev/null
55476+++ b/grsecurity/grsec_link.c
55477@@ -0,0 +1,43 @@
55478+#include <linux/kernel.h>
55479+#include <linux/sched.h>
55480+#include <linux/fs.h>
55481+#include <linux/file.h>
55482+#include <linux/grinternal.h>
55483+
55484+int
55485+gr_handle_follow_link(const struct inode *parent,
55486+ const struct inode *inode,
55487+ const struct dentry *dentry, const struct vfsmount *mnt)
55488+{
55489+#ifdef CONFIG_GRKERNSEC_LINK
55490+ const struct cred *cred = current_cred();
55491+
55492+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
55493+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
55494+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
55495+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
55496+ return -EACCES;
55497+ }
55498+#endif
55499+ return 0;
55500+}
55501+
55502+int
55503+gr_handle_hardlink(const struct dentry *dentry,
55504+ const struct vfsmount *mnt,
55505+ struct inode *inode, const int mode, const char *to)
55506+{
55507+#ifdef CONFIG_GRKERNSEC_LINK
55508+ const struct cred *cred = current_cred();
55509+
55510+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
55511+ (!S_ISREG(mode) || (mode & S_ISUID) ||
55512+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
55513+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
55514+ !capable(CAP_FOWNER) && cred->uid) {
55515+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
55516+ return -EPERM;
55517+ }
55518+#endif
55519+ return 0;
55520+}
55521diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
55522new file mode 100644
55523index 0000000..a45d2e9
55524--- /dev/null
55525+++ b/grsecurity/grsec_log.c
55526@@ -0,0 +1,322 @@
55527+#include <linux/kernel.h>
55528+#include <linux/sched.h>
55529+#include <linux/file.h>
55530+#include <linux/tty.h>
55531+#include <linux/fs.h>
55532+#include <linux/grinternal.h>
55533+
55534+#ifdef CONFIG_TREE_PREEMPT_RCU
55535+#define DISABLE_PREEMPT() preempt_disable()
55536+#define ENABLE_PREEMPT() preempt_enable()
55537+#else
55538+#define DISABLE_PREEMPT()
55539+#define ENABLE_PREEMPT()
55540+#endif
55541+
55542+#define BEGIN_LOCKS(x) \
55543+ DISABLE_PREEMPT(); \
55544+ rcu_read_lock(); \
55545+ read_lock(&tasklist_lock); \
55546+ read_lock(&grsec_exec_file_lock); \
55547+ if (x != GR_DO_AUDIT) \
55548+ spin_lock(&grsec_alert_lock); \
55549+ else \
55550+ spin_lock(&grsec_audit_lock)
55551+
55552+#define END_LOCKS(x) \
55553+ if (x != GR_DO_AUDIT) \
55554+ spin_unlock(&grsec_alert_lock); \
55555+ else \
55556+ spin_unlock(&grsec_audit_lock); \
55557+ read_unlock(&grsec_exec_file_lock); \
55558+ read_unlock(&tasklist_lock); \
55559+ rcu_read_unlock(); \
55560+ ENABLE_PREEMPT(); \
55561+ if (x == GR_DONT_AUDIT) \
55562+ gr_handle_alertkill(current)
55563+
55564+enum {
55565+ FLOODING,
55566+ NO_FLOODING
55567+};
55568+
55569+extern char *gr_alert_log_fmt;
55570+extern char *gr_audit_log_fmt;
55571+extern char *gr_alert_log_buf;
55572+extern char *gr_audit_log_buf;
55573+
55574+static int gr_log_start(int audit)
55575+{
55576+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
55577+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
55578+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55579+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
55580+ unsigned long curr_secs = get_seconds();
55581+
55582+ if (audit == GR_DO_AUDIT)
55583+ goto set_fmt;
55584+
55585+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
55586+ grsec_alert_wtime = curr_secs;
55587+ grsec_alert_fyet = 0;
55588+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
55589+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
55590+ grsec_alert_fyet++;
55591+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
55592+ grsec_alert_wtime = curr_secs;
55593+ grsec_alert_fyet++;
55594+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
55595+ return FLOODING;
55596+ }
55597+ else return FLOODING;
55598+
55599+set_fmt:
55600+#endif
55601+ memset(buf, 0, PAGE_SIZE);
55602+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
55603+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
55604+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
55605+ } else if (current->signal->curr_ip) {
55606+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
55607+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
55608+ } else if (gr_acl_is_enabled()) {
55609+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
55610+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
55611+ } else {
55612+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
55613+ strcpy(buf, fmt);
55614+ }
55615+
55616+ return NO_FLOODING;
55617+}
55618+
55619+static void gr_log_middle(int audit, const char *msg, va_list ap)
55620+ __attribute__ ((format (printf, 2, 0)));
55621+
55622+static void gr_log_middle(int audit, const char *msg, va_list ap)
55623+{
55624+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55625+ unsigned int len = strlen(buf);
55626+
55627+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
55628+
55629+ return;
55630+}
55631+
55632+static void gr_log_middle_varargs(int audit, const char *msg, ...)
55633+ __attribute__ ((format (printf, 2, 3)));
55634+
55635+static void gr_log_middle_varargs(int audit, const char *msg, ...)
55636+{
55637+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55638+ unsigned int len = strlen(buf);
55639+ va_list ap;
55640+
55641+ va_start(ap, msg);
55642+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
55643+ va_end(ap);
55644+
55645+ return;
55646+}
55647+
55648+static void gr_log_end(int audit, int append_default)
55649+{
55650+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
55651+
55652+ if (append_default) {
55653+ unsigned int len = strlen(buf);
55654+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
55655+ }
55656+
55657+ printk("%s\n", buf);
55658+
55659+ return;
55660+}
55661+
55662+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
55663+{
55664+ int logtype;
55665+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
55666+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
55667+ void *voidptr = NULL;
55668+ int num1 = 0, num2 = 0;
55669+ unsigned long ulong1 = 0, ulong2 = 0;
55670+ struct dentry *dentry = NULL;
55671+ struct vfsmount *mnt = NULL;
55672+ struct file *file = NULL;
55673+ struct task_struct *task = NULL;
55674+ const struct cred *cred, *pcred;
55675+ va_list ap;
55676+
55677+ BEGIN_LOCKS(audit);
55678+ logtype = gr_log_start(audit);
55679+ if (logtype == FLOODING) {
55680+ END_LOCKS(audit);
55681+ return;
55682+ }
55683+ va_start(ap, argtypes);
55684+ switch (argtypes) {
55685+ case GR_TTYSNIFF:
55686+ task = va_arg(ap, struct task_struct *);
55687+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
55688+ break;
55689+ case GR_SYSCTL_HIDDEN:
55690+ str1 = va_arg(ap, char *);
55691+ gr_log_middle_varargs(audit, msg, result, str1);
55692+ break;
55693+ case GR_RBAC:
55694+ dentry = va_arg(ap, struct dentry *);
55695+ mnt = va_arg(ap, struct vfsmount *);
55696+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
55697+ break;
55698+ case GR_RBAC_STR:
55699+ dentry = va_arg(ap, struct dentry *);
55700+ mnt = va_arg(ap, struct vfsmount *);
55701+ str1 = va_arg(ap, char *);
55702+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
55703+ break;
55704+ case GR_STR_RBAC:
55705+ str1 = va_arg(ap, char *);
55706+ dentry = va_arg(ap, struct dentry *);
55707+ mnt = va_arg(ap, struct vfsmount *);
55708+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
55709+ break;
55710+ case GR_RBAC_MODE2:
55711+ dentry = va_arg(ap, struct dentry *);
55712+ mnt = va_arg(ap, struct vfsmount *);
55713+ str1 = va_arg(ap, char *);
55714+ str2 = va_arg(ap, char *);
55715+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
55716+ break;
55717+ case GR_RBAC_MODE3:
55718+ dentry = va_arg(ap, struct dentry *);
55719+ mnt = va_arg(ap, struct vfsmount *);
55720+ str1 = va_arg(ap, char *);
55721+ str2 = va_arg(ap, char *);
55722+ str3 = va_arg(ap, char *);
55723+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
55724+ break;
55725+ case GR_FILENAME:
55726+ dentry = va_arg(ap, struct dentry *);
55727+ mnt = va_arg(ap, struct vfsmount *);
55728+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
55729+ break;
55730+ case GR_STR_FILENAME:
55731+ str1 = va_arg(ap, char *);
55732+ dentry = va_arg(ap, struct dentry *);
55733+ mnt = va_arg(ap, struct vfsmount *);
55734+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
55735+ break;
55736+ case GR_FILENAME_STR:
55737+ dentry = va_arg(ap, struct dentry *);
55738+ mnt = va_arg(ap, struct vfsmount *);
55739+ str1 = va_arg(ap, char *);
55740+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
55741+ break;
55742+ case GR_FILENAME_TWO_INT:
55743+ dentry = va_arg(ap, struct dentry *);
55744+ mnt = va_arg(ap, struct vfsmount *);
55745+ num1 = va_arg(ap, int);
55746+ num2 = va_arg(ap, int);
55747+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
55748+ break;
55749+ case GR_FILENAME_TWO_INT_STR:
55750+ dentry = va_arg(ap, struct dentry *);
55751+ mnt = va_arg(ap, struct vfsmount *);
55752+ num1 = va_arg(ap, int);
55753+ num2 = va_arg(ap, int);
55754+ str1 = va_arg(ap, char *);
55755+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
55756+ break;
55757+ case GR_TEXTREL:
55758+ file = va_arg(ap, struct file *);
55759+ ulong1 = va_arg(ap, unsigned long);
55760+ ulong2 = va_arg(ap, unsigned long);
55761+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
55762+ break;
55763+ case GR_PTRACE:
55764+ task = va_arg(ap, struct task_struct *);
55765+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
55766+ break;
55767+ case GR_RESOURCE:
55768+ task = va_arg(ap, struct task_struct *);
55769+ cred = __task_cred(task);
55770+ pcred = __task_cred(task->real_parent);
55771+ ulong1 = va_arg(ap, unsigned long);
55772+ str1 = va_arg(ap, char *);
55773+ ulong2 = va_arg(ap, unsigned long);
55774+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55775+ break;
55776+ case GR_CAP:
55777+ task = va_arg(ap, struct task_struct *);
55778+ cred = __task_cred(task);
55779+ pcred = __task_cred(task->real_parent);
55780+ str1 = va_arg(ap, char *);
55781+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55782+ break;
55783+ case GR_SIG:
55784+ str1 = va_arg(ap, char *);
55785+ voidptr = va_arg(ap, void *);
55786+ gr_log_middle_varargs(audit, msg, str1, voidptr);
55787+ break;
55788+ case GR_SIG2:
55789+ task = va_arg(ap, struct task_struct *);
55790+ cred = __task_cred(task);
55791+ pcred = __task_cred(task->real_parent);
55792+ num1 = va_arg(ap, int);
55793+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55794+ break;
55795+ case GR_CRASH1:
55796+ task = va_arg(ap, struct task_struct *);
55797+ cred = __task_cred(task);
55798+ pcred = __task_cred(task->real_parent);
55799+ ulong1 = va_arg(ap, unsigned long);
55800+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
55801+ break;
55802+ case GR_CRASH2:
55803+ task = va_arg(ap, struct task_struct *);
55804+ cred = __task_cred(task);
55805+ pcred = __task_cred(task->real_parent);
55806+ ulong1 = va_arg(ap, unsigned long);
55807+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
55808+ break;
55809+ case GR_RWXMAP:
55810+ file = va_arg(ap, struct file *);
55811+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
55812+ break;
55813+ case GR_PSACCT:
55814+ {
55815+ unsigned int wday, cday;
55816+ __u8 whr, chr;
55817+ __u8 wmin, cmin;
55818+ __u8 wsec, csec;
55819+ char cur_tty[64] = { 0 };
55820+ char parent_tty[64] = { 0 };
55821+
55822+ task = va_arg(ap, struct task_struct *);
55823+ wday = va_arg(ap, unsigned int);
55824+ cday = va_arg(ap, unsigned int);
55825+ whr = va_arg(ap, int);
55826+ chr = va_arg(ap, int);
55827+ wmin = va_arg(ap, int);
55828+ cmin = va_arg(ap, int);
55829+ wsec = va_arg(ap, int);
55830+ csec = va_arg(ap, int);
55831+ ulong1 = va_arg(ap, unsigned long);
55832+ cred = __task_cred(task);
55833+ pcred = __task_cred(task->real_parent);
55834+
55835+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
55836+ }
55837+ break;
55838+ default:
55839+ gr_log_middle(audit, msg, ap);
55840+ }
55841+ va_end(ap);
55842+ // these don't need DEFAULTSECARGS printed on the end
55843+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
55844+ gr_log_end(audit, 0);
55845+ else
55846+ gr_log_end(audit, 1);
55847+ END_LOCKS(audit);
55848+}
55849diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
55850new file mode 100644
55851index 0000000..6c0416b
55852--- /dev/null
55853+++ b/grsecurity/grsec_mem.c
55854@@ -0,0 +1,33 @@
55855+#include <linux/kernel.h>
55856+#include <linux/sched.h>
55857+#include <linux/mm.h>
55858+#include <linux/mman.h>
55859+#include <linux/grinternal.h>
55860+
55861+void
55862+gr_handle_ioperm(void)
55863+{
55864+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
55865+ return;
55866+}
55867+
55868+void
55869+gr_handle_iopl(void)
55870+{
55871+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
55872+ return;
55873+}
55874+
55875+void
55876+gr_handle_mem_readwrite(u64 from, u64 to)
55877+{
55878+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
55879+ return;
55880+}
55881+
55882+void
55883+gr_handle_vm86(void)
55884+{
55885+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
55886+ return;
55887+}
55888diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
55889new file mode 100644
55890index 0000000..2131422
55891--- /dev/null
55892+++ b/grsecurity/grsec_mount.c
55893@@ -0,0 +1,62 @@
55894+#include <linux/kernel.h>
55895+#include <linux/sched.h>
55896+#include <linux/mount.h>
55897+#include <linux/grsecurity.h>
55898+#include <linux/grinternal.h>
55899+
55900+void
55901+gr_log_remount(const char *devname, const int retval)
55902+{
55903+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55904+ if (grsec_enable_mount && (retval >= 0))
55905+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
55906+#endif
55907+ return;
55908+}
55909+
55910+void
55911+gr_log_unmount(const char *devname, const int retval)
55912+{
55913+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55914+ if (grsec_enable_mount && (retval >= 0))
55915+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
55916+#endif
55917+ return;
55918+}
55919+
55920+void
55921+gr_log_mount(const char *from, const char *to, const int retval)
55922+{
55923+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55924+ if (grsec_enable_mount && (retval >= 0))
55925+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
55926+#endif
55927+ return;
55928+}
55929+
55930+int
55931+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
55932+{
55933+#ifdef CONFIG_GRKERNSEC_ROFS
55934+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
55935+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
55936+ return -EPERM;
55937+ } else
55938+ return 0;
55939+#endif
55940+ return 0;
55941+}
55942+
55943+int
55944+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
55945+{
55946+#ifdef CONFIG_GRKERNSEC_ROFS
55947+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
55948+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
55949+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
55950+ return -EPERM;
55951+ } else
55952+ return 0;
55953+#endif
55954+ return 0;
55955+}
55956diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
55957new file mode 100644
55958index 0000000..a3b12a0
55959--- /dev/null
55960+++ b/grsecurity/grsec_pax.c
55961@@ -0,0 +1,36 @@
55962+#include <linux/kernel.h>
55963+#include <linux/sched.h>
55964+#include <linux/mm.h>
55965+#include <linux/file.h>
55966+#include <linux/grinternal.h>
55967+#include <linux/grsecurity.h>
55968+
55969+void
55970+gr_log_textrel(struct vm_area_struct * vma)
55971+{
55972+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
55973+ if (grsec_enable_audit_textrel)
55974+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
55975+#endif
55976+ return;
55977+}
55978+
55979+void
55980+gr_log_rwxmmap(struct file *file)
55981+{
55982+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55983+ if (grsec_enable_log_rwxmaps)
55984+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
55985+#endif
55986+ return;
55987+}
55988+
55989+void
55990+gr_log_rwxmprotect(struct file *file)
55991+{
55992+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55993+ if (grsec_enable_log_rwxmaps)
55994+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
55995+#endif
55996+ return;
55997+}
55998diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
55999new file mode 100644
56000index 0000000..f7f29aa
56001--- /dev/null
56002+++ b/grsecurity/grsec_ptrace.c
56003@@ -0,0 +1,30 @@
56004+#include <linux/kernel.h>
56005+#include <linux/sched.h>
56006+#include <linux/grinternal.h>
56007+#include <linux/security.h>
56008+
56009+void
56010+gr_audit_ptrace(struct task_struct *task)
56011+{
56012+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
56013+ if (grsec_enable_audit_ptrace)
56014+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
56015+#endif
56016+ return;
56017+}
56018+
56019+int
56020+gr_ptrace_readexec(struct file *file, int unsafe_flags)
56021+{
56022+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
56023+ const struct dentry *dentry = file->f_path.dentry;
56024+ const struct vfsmount *mnt = file->f_path.mnt;
56025+
56026+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
56027+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
56028+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
56029+ return -EACCES;
56030+ }
56031+#endif
56032+ return 0;
56033+}
56034diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
56035new file mode 100644
56036index 0000000..7a5b2de
56037--- /dev/null
56038+++ b/grsecurity/grsec_sig.c
56039@@ -0,0 +1,207 @@
56040+#include <linux/kernel.h>
56041+#include <linux/sched.h>
56042+#include <linux/delay.h>
56043+#include <linux/grsecurity.h>
56044+#include <linux/grinternal.h>
56045+#include <linux/hardirq.h>
56046+
56047+char *signames[] = {
56048+ [SIGSEGV] = "Segmentation fault",
56049+ [SIGILL] = "Illegal instruction",
56050+ [SIGABRT] = "Abort",
56051+ [SIGBUS] = "Invalid alignment/Bus error"
56052+};
56053+
56054+void
56055+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
56056+{
56057+#ifdef CONFIG_GRKERNSEC_SIGNAL
56058+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
56059+ (sig == SIGABRT) || (sig == SIGBUS))) {
56060+ if (t->pid == current->pid) {
56061+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
56062+ } else {
56063+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
56064+ }
56065+ }
56066+#endif
56067+ return;
56068+}
56069+
56070+int
56071+gr_handle_signal(const struct task_struct *p, const int sig)
56072+{
56073+#ifdef CONFIG_GRKERNSEC
56074+ /* ignore the 0 signal for protected task checks */
56075+ if (current->pid > 1 && sig && gr_check_protected_task(p)) {
56076+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
56077+ return -EPERM;
56078+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
56079+ return -EPERM;
56080+ }
56081+#endif
56082+ return 0;
56083+}
56084+
56085+#ifdef CONFIG_GRKERNSEC
56086+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
56087+
56088+int gr_fake_force_sig(int sig, struct task_struct *t)
56089+{
56090+ unsigned long int flags;
56091+ int ret, blocked, ignored;
56092+ struct k_sigaction *action;
56093+
56094+ spin_lock_irqsave(&t->sighand->siglock, flags);
56095+ action = &t->sighand->action[sig-1];
56096+ ignored = action->sa.sa_handler == SIG_IGN;
56097+ blocked = sigismember(&t->blocked, sig);
56098+ if (blocked || ignored) {
56099+ action->sa.sa_handler = SIG_DFL;
56100+ if (blocked) {
56101+ sigdelset(&t->blocked, sig);
56102+ recalc_sigpending_and_wake(t);
56103+ }
56104+ }
56105+ if (action->sa.sa_handler == SIG_DFL)
56106+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
56107+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
56108+
56109+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
56110+
56111+ return ret;
56112+}
56113+#endif
56114+
56115+#ifdef CONFIG_GRKERNSEC_BRUTE
56116+#define GR_USER_BAN_TIME (15 * 60)
56117+
56118+static int __get_dumpable(unsigned long mm_flags)
56119+{
56120+ int ret;
56121+
56122+ ret = mm_flags & MMF_DUMPABLE_MASK;
56123+ return (ret >= 2) ? 2 : ret;
56124+}
56125+#endif
56126+
56127+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
56128+{
56129+#ifdef CONFIG_GRKERNSEC_BRUTE
56130+ uid_t uid = 0;
56131+
56132+ if (!grsec_enable_brute)
56133+ return;
56134+
56135+ rcu_read_lock();
56136+ read_lock(&tasklist_lock);
56137+ read_lock(&grsec_exec_file_lock);
56138+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
56139+ p->real_parent->brute = 1;
56140+ else {
56141+ const struct cred *cred = __task_cred(p), *cred2;
56142+ struct task_struct *tsk, *tsk2;
56143+
56144+ if (!__get_dumpable(mm_flags) && cred->uid) {
56145+ struct user_struct *user;
56146+
56147+ uid = cred->uid;
56148+
56149+ /* this is put upon execution past expiration */
56150+ user = find_user(uid);
56151+ if (user == NULL)
56152+ goto unlock;
56153+ user->banned = 1;
56154+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
56155+ if (user->ban_expires == ~0UL)
56156+ user->ban_expires--;
56157+
56158+ do_each_thread(tsk2, tsk) {
56159+ cred2 = __task_cred(tsk);
56160+ if (tsk != p && cred2->uid == uid)
56161+ gr_fake_force_sig(SIGKILL, tsk);
56162+ } while_each_thread(tsk2, tsk);
56163+ }
56164+ }
56165+unlock:
56166+ read_unlock(&grsec_exec_file_lock);
56167+ read_unlock(&tasklist_lock);
56168+ rcu_read_unlock();
56169+
56170+ if (uid)
56171+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
56172+
56173+#endif
56174+ return;
56175+}
56176+
56177+void gr_handle_brute_check(void)
56178+{
56179+#ifdef CONFIG_GRKERNSEC_BRUTE
56180+ if (current->brute)
56181+ msleep(30 * 1000);
56182+#endif
56183+ return;
56184+}
56185+
56186+void gr_handle_kernel_exploit(void)
56187+{
56188+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
56189+ const struct cred *cred;
56190+ struct task_struct *tsk, *tsk2;
56191+ struct user_struct *user;
56192+ uid_t uid;
56193+
56194+ if (in_irq() || in_serving_softirq() || in_nmi())
56195+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
56196+
56197+ uid = current_uid();
56198+
56199+ if (uid == 0)
56200+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
56201+ else {
56202+ /* kill all the processes of this user, hold a reference
56203+ to their creds struct, and prevent them from creating
56204+ another process until system reset
56205+ */
56206+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
56207+ /* we intentionally leak this ref */
56208+ user = get_uid(current->cred->user);
56209+ if (user) {
56210+ user->banned = 1;
56211+ user->ban_expires = ~0UL;
56212+ }
56213+
56214+ read_lock(&tasklist_lock);
56215+ do_each_thread(tsk2, tsk) {
56216+ cred = __task_cred(tsk);
56217+ if (cred->uid == uid)
56218+ gr_fake_force_sig(SIGKILL, tsk);
56219+ } while_each_thread(tsk2, tsk);
56220+ read_unlock(&tasklist_lock);
56221+ }
56222+#endif
56223+}
56224+
56225+int __gr_process_user_ban(struct user_struct *user)
56226+{
56227+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
56228+ if (unlikely(user->banned)) {
56229+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
56230+ user->banned = 0;
56231+ user->ban_expires = 0;
56232+ free_uid(user);
56233+ } else
56234+ return -EPERM;
56235+ }
56236+#endif
56237+ return 0;
56238+}
56239+
56240+int gr_process_user_ban(void)
56241+{
56242+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
56243+ return __gr_process_user_ban(current->cred->user);
56244+#endif
56245+ return 0;
56246+}
56247diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
56248new file mode 100644
56249index 0000000..4030d57
56250--- /dev/null
56251+++ b/grsecurity/grsec_sock.c
56252@@ -0,0 +1,244 @@
56253+#include <linux/kernel.h>
56254+#include <linux/module.h>
56255+#include <linux/sched.h>
56256+#include <linux/file.h>
56257+#include <linux/net.h>
56258+#include <linux/in.h>
56259+#include <linux/ip.h>
56260+#include <net/sock.h>
56261+#include <net/inet_sock.h>
56262+#include <linux/grsecurity.h>
56263+#include <linux/grinternal.h>
56264+#include <linux/gracl.h>
56265+
56266+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
56267+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
56268+
56269+EXPORT_SYMBOL(gr_search_udp_recvmsg);
56270+EXPORT_SYMBOL(gr_search_udp_sendmsg);
56271+
56272+#ifdef CONFIG_UNIX_MODULE
56273+EXPORT_SYMBOL(gr_acl_handle_unix);
56274+EXPORT_SYMBOL(gr_acl_handle_mknod);
56275+EXPORT_SYMBOL(gr_handle_chroot_unix);
56276+EXPORT_SYMBOL(gr_handle_create);
56277+#endif
56278+
56279+#ifdef CONFIG_GRKERNSEC
56280+#define gr_conn_table_size 32749
56281+struct conn_table_entry {
56282+ struct conn_table_entry *next;
56283+ struct signal_struct *sig;
56284+};
56285+
56286+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
56287+DEFINE_SPINLOCK(gr_conn_table_lock);
56288+
56289+extern const char * gr_socktype_to_name(unsigned char type);
56290+extern const char * gr_proto_to_name(unsigned char proto);
56291+extern const char * gr_sockfamily_to_name(unsigned char family);
56292+
56293+static __inline__ int
56294+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
56295+{
56296+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
56297+}
56298+
56299+static __inline__ int
56300+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
56301+ __u16 sport, __u16 dport)
56302+{
56303+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
56304+ sig->gr_sport == sport && sig->gr_dport == dport))
56305+ return 1;
56306+ else
56307+ return 0;
56308+}
56309+
56310+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
56311+{
56312+ struct conn_table_entry **match;
56313+ unsigned int index;
56314+
56315+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
56316+ sig->gr_sport, sig->gr_dport,
56317+ gr_conn_table_size);
56318+
56319+ newent->sig = sig;
56320+
56321+ match = &gr_conn_table[index];
56322+ newent->next = *match;
56323+ *match = newent;
56324+
56325+ return;
56326+}
56327+
56328+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
56329+{
56330+ struct conn_table_entry *match, *last = NULL;
56331+ unsigned int index;
56332+
56333+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
56334+ sig->gr_sport, sig->gr_dport,
56335+ gr_conn_table_size);
56336+
56337+ match = gr_conn_table[index];
56338+ while (match && !conn_match(match->sig,
56339+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
56340+ sig->gr_dport)) {
56341+ last = match;
56342+ match = match->next;
56343+ }
56344+
56345+ if (match) {
56346+ if (last)
56347+ last->next = match->next;
56348+ else
56349+ gr_conn_table[index] = NULL;
56350+ kfree(match);
56351+ }
56352+
56353+ return;
56354+}
56355+
56356+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
56357+ __u16 sport, __u16 dport)
56358+{
56359+ struct conn_table_entry *match;
56360+ unsigned int index;
56361+
56362+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
56363+
56364+ match = gr_conn_table[index];
56365+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
56366+ match = match->next;
56367+
56368+ if (match)
56369+ return match->sig;
56370+ else
56371+ return NULL;
56372+}
56373+
56374+#endif
56375+
56376+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
56377+{
56378+#ifdef CONFIG_GRKERNSEC
56379+ struct signal_struct *sig = task->signal;
56380+ struct conn_table_entry *newent;
56381+
56382+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
56383+ if (newent == NULL)
56384+ return;
56385+ /* no bh lock needed since we are called with bh disabled */
56386+ spin_lock(&gr_conn_table_lock);
56387+ gr_del_task_from_ip_table_nolock(sig);
56388+ sig->gr_saddr = inet->inet_rcv_saddr;
56389+ sig->gr_daddr = inet->inet_daddr;
56390+ sig->gr_sport = inet->inet_sport;
56391+ sig->gr_dport = inet->inet_dport;
56392+ gr_add_to_task_ip_table_nolock(sig, newent);
56393+ spin_unlock(&gr_conn_table_lock);
56394+#endif
56395+ return;
56396+}
56397+
56398+void gr_del_task_from_ip_table(struct task_struct *task)
56399+{
56400+#ifdef CONFIG_GRKERNSEC
56401+ spin_lock_bh(&gr_conn_table_lock);
56402+ gr_del_task_from_ip_table_nolock(task->signal);
56403+ spin_unlock_bh(&gr_conn_table_lock);
56404+#endif
56405+ return;
56406+}
56407+
56408+void
56409+gr_attach_curr_ip(const struct sock *sk)
56410+{
56411+#ifdef CONFIG_GRKERNSEC
56412+ struct signal_struct *p, *set;
56413+ const struct inet_sock *inet = inet_sk(sk);
56414+
56415+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
56416+ return;
56417+
56418+ set = current->signal;
56419+
56420+ spin_lock_bh(&gr_conn_table_lock);
56421+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
56422+ inet->inet_dport, inet->inet_sport);
56423+ if (unlikely(p != NULL)) {
56424+ set->curr_ip = p->curr_ip;
56425+ set->used_accept = 1;
56426+ gr_del_task_from_ip_table_nolock(p);
56427+ spin_unlock_bh(&gr_conn_table_lock);
56428+ return;
56429+ }
56430+ spin_unlock_bh(&gr_conn_table_lock);
56431+
56432+ set->curr_ip = inet->inet_daddr;
56433+ set->used_accept = 1;
56434+#endif
56435+ return;
56436+}
56437+
56438+int
56439+gr_handle_sock_all(const int family, const int type, const int protocol)
56440+{
56441+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
56442+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
56443+ (family != AF_UNIX)) {
56444+ if (family == AF_INET)
56445+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
56446+ else
56447+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
56448+ return -EACCES;
56449+ }
56450+#endif
56451+ return 0;
56452+}
56453+
56454+int
56455+gr_handle_sock_server(const struct sockaddr *sck)
56456+{
56457+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56458+ if (grsec_enable_socket_server &&
56459+ in_group_p(grsec_socket_server_gid) &&
56460+ sck && (sck->sa_family != AF_UNIX) &&
56461+ (sck->sa_family != AF_LOCAL)) {
56462+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
56463+ return -EACCES;
56464+ }
56465+#endif
56466+ return 0;
56467+}
56468+
56469+int
56470+gr_handle_sock_server_other(const struct sock *sck)
56471+{
56472+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56473+ if (grsec_enable_socket_server &&
56474+ in_group_p(grsec_socket_server_gid) &&
56475+ sck && (sck->sk_family != AF_UNIX) &&
56476+ (sck->sk_family != AF_LOCAL)) {
56477+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
56478+ return -EACCES;
56479+ }
56480+#endif
56481+ return 0;
56482+}
56483+
56484+int
56485+gr_handle_sock_client(const struct sockaddr *sck)
56486+{
56487+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
56488+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
56489+ sck && (sck->sa_family != AF_UNIX) &&
56490+ (sck->sa_family != AF_LOCAL)) {
56491+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
56492+ return -EACCES;
56493+ }
56494+#endif
56495+ return 0;
56496+}
56497diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
56498new file mode 100644
56499index 0000000..a1aedd7
56500--- /dev/null
56501+++ b/grsecurity/grsec_sysctl.c
56502@@ -0,0 +1,451 @@
56503+#include <linux/kernel.h>
56504+#include <linux/sched.h>
56505+#include <linux/sysctl.h>
56506+#include <linux/grsecurity.h>
56507+#include <linux/grinternal.h>
56508+
56509+int
56510+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
56511+{
56512+#ifdef CONFIG_GRKERNSEC_SYSCTL
56513+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
56514+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
56515+ return -EACCES;
56516+ }
56517+#endif
56518+ return 0;
56519+}
56520+
56521+#ifdef CONFIG_GRKERNSEC_ROFS
56522+static int __maybe_unused one = 1;
56523+#endif
56524+
56525+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
56526+struct ctl_table grsecurity_table[] = {
56527+#ifdef CONFIG_GRKERNSEC_SYSCTL
56528+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
56529+#ifdef CONFIG_GRKERNSEC_IO
56530+ {
56531+ .procname = "disable_priv_io",
56532+ .data = &grsec_disable_privio,
56533+ .maxlen = sizeof(int),
56534+ .mode = 0600,
56535+ .proc_handler = &proc_dointvec,
56536+ },
56537+#endif
56538+#endif
56539+#ifdef CONFIG_GRKERNSEC_LINK
56540+ {
56541+ .procname = "linking_restrictions",
56542+ .data = &grsec_enable_link,
56543+ .maxlen = sizeof(int),
56544+ .mode = 0600,
56545+ .proc_handler = &proc_dointvec,
56546+ },
56547+#endif
56548+#ifdef CONFIG_GRKERNSEC_BRUTE
56549+ {
56550+ .procname = "deter_bruteforce",
56551+ .data = &grsec_enable_brute,
56552+ .maxlen = sizeof(int),
56553+ .mode = 0600,
56554+ .proc_handler = &proc_dointvec,
56555+ },
56556+#endif
56557+#ifdef CONFIG_GRKERNSEC_FIFO
56558+ {
56559+ .procname = "fifo_restrictions",
56560+ .data = &grsec_enable_fifo,
56561+ .maxlen = sizeof(int),
56562+ .mode = 0600,
56563+ .proc_handler = &proc_dointvec,
56564+ },
56565+#endif
56566+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
56567+ {
56568+ .procname = "ptrace_readexec",
56569+ .data = &grsec_enable_ptrace_readexec,
56570+ .maxlen = sizeof(int),
56571+ .mode = 0600,
56572+ .proc_handler = &proc_dointvec,
56573+ },
56574+#endif
56575+#ifdef CONFIG_GRKERNSEC_SETXID
56576+ {
56577+ .procname = "consistent_setxid",
56578+ .data = &grsec_enable_setxid,
56579+ .maxlen = sizeof(int),
56580+ .mode = 0600,
56581+ .proc_handler = &proc_dointvec,
56582+ },
56583+#endif
56584+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
56585+ {
56586+ .procname = "ip_blackhole",
56587+ .data = &grsec_enable_blackhole,
56588+ .maxlen = sizeof(int),
56589+ .mode = 0600,
56590+ .proc_handler = &proc_dointvec,
56591+ },
56592+ {
56593+ .procname = "lastack_retries",
56594+ .data = &grsec_lastack_retries,
56595+ .maxlen = sizeof(int),
56596+ .mode = 0600,
56597+ .proc_handler = &proc_dointvec,
56598+ },
56599+#endif
56600+#ifdef CONFIG_GRKERNSEC_EXECLOG
56601+ {
56602+ .procname = "exec_logging",
56603+ .data = &grsec_enable_execlog,
56604+ .maxlen = sizeof(int),
56605+ .mode = 0600,
56606+ .proc_handler = &proc_dointvec,
56607+ },
56608+#endif
56609+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
56610+ {
56611+ .procname = "rwxmap_logging",
56612+ .data = &grsec_enable_log_rwxmaps,
56613+ .maxlen = sizeof(int),
56614+ .mode = 0600,
56615+ .proc_handler = &proc_dointvec,
56616+ },
56617+#endif
56618+#ifdef CONFIG_GRKERNSEC_SIGNAL
56619+ {
56620+ .procname = "signal_logging",
56621+ .data = &grsec_enable_signal,
56622+ .maxlen = sizeof(int),
56623+ .mode = 0600,
56624+ .proc_handler = &proc_dointvec,
56625+ },
56626+#endif
56627+#ifdef CONFIG_GRKERNSEC_FORKFAIL
56628+ {
56629+ .procname = "forkfail_logging",
56630+ .data = &grsec_enable_forkfail,
56631+ .maxlen = sizeof(int),
56632+ .mode = 0600,
56633+ .proc_handler = &proc_dointvec,
56634+ },
56635+#endif
56636+#ifdef CONFIG_GRKERNSEC_TIME
56637+ {
56638+ .procname = "timechange_logging",
56639+ .data = &grsec_enable_time,
56640+ .maxlen = sizeof(int),
56641+ .mode = 0600,
56642+ .proc_handler = &proc_dointvec,
56643+ },
56644+#endif
56645+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
56646+ {
56647+ .procname = "chroot_deny_shmat",
56648+ .data = &grsec_enable_chroot_shmat,
56649+ .maxlen = sizeof(int),
56650+ .mode = 0600,
56651+ .proc_handler = &proc_dointvec,
56652+ },
56653+#endif
56654+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
56655+ {
56656+ .procname = "chroot_deny_unix",
56657+ .data = &grsec_enable_chroot_unix,
56658+ .maxlen = sizeof(int),
56659+ .mode = 0600,
56660+ .proc_handler = &proc_dointvec,
56661+ },
56662+#endif
56663+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
56664+ {
56665+ .procname = "chroot_deny_mount",
56666+ .data = &grsec_enable_chroot_mount,
56667+ .maxlen = sizeof(int),
56668+ .mode = 0600,
56669+ .proc_handler = &proc_dointvec,
56670+ },
56671+#endif
56672+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
56673+ {
56674+ .procname = "chroot_deny_fchdir",
56675+ .data = &grsec_enable_chroot_fchdir,
56676+ .maxlen = sizeof(int),
56677+ .mode = 0600,
56678+ .proc_handler = &proc_dointvec,
56679+ },
56680+#endif
56681+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
56682+ {
56683+ .procname = "chroot_deny_chroot",
56684+ .data = &grsec_enable_chroot_double,
56685+ .maxlen = sizeof(int),
56686+ .mode = 0600,
56687+ .proc_handler = &proc_dointvec,
56688+ },
56689+#endif
56690+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
56691+ {
56692+ .procname = "chroot_deny_pivot",
56693+ .data = &grsec_enable_chroot_pivot,
56694+ .maxlen = sizeof(int),
56695+ .mode = 0600,
56696+ .proc_handler = &proc_dointvec,
56697+ },
56698+#endif
56699+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
56700+ {
56701+ .procname = "chroot_enforce_chdir",
56702+ .data = &grsec_enable_chroot_chdir,
56703+ .maxlen = sizeof(int),
56704+ .mode = 0600,
56705+ .proc_handler = &proc_dointvec,
56706+ },
56707+#endif
56708+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
56709+ {
56710+ .procname = "chroot_deny_chmod",
56711+ .data = &grsec_enable_chroot_chmod,
56712+ .maxlen = sizeof(int),
56713+ .mode = 0600,
56714+ .proc_handler = &proc_dointvec,
56715+ },
56716+#endif
56717+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
56718+ {
56719+ .procname = "chroot_deny_mknod",
56720+ .data = &grsec_enable_chroot_mknod,
56721+ .maxlen = sizeof(int),
56722+ .mode = 0600,
56723+ .proc_handler = &proc_dointvec,
56724+ },
56725+#endif
56726+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
56727+ {
56728+ .procname = "chroot_restrict_nice",
56729+ .data = &grsec_enable_chroot_nice,
56730+ .maxlen = sizeof(int),
56731+ .mode = 0600,
56732+ .proc_handler = &proc_dointvec,
56733+ },
56734+#endif
56735+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
56736+ {
56737+ .procname = "chroot_execlog",
56738+ .data = &grsec_enable_chroot_execlog,
56739+ .maxlen = sizeof(int),
56740+ .mode = 0600,
56741+ .proc_handler = &proc_dointvec,
56742+ },
56743+#endif
56744+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56745+ {
56746+ .procname = "chroot_caps",
56747+ .data = &grsec_enable_chroot_caps,
56748+ .maxlen = sizeof(int),
56749+ .mode = 0600,
56750+ .proc_handler = &proc_dointvec,
56751+ },
56752+#endif
56753+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
56754+ {
56755+ .procname = "chroot_deny_sysctl",
56756+ .data = &grsec_enable_chroot_sysctl,
56757+ .maxlen = sizeof(int),
56758+ .mode = 0600,
56759+ .proc_handler = &proc_dointvec,
56760+ },
56761+#endif
56762+#ifdef CONFIG_GRKERNSEC_TPE
56763+ {
56764+ .procname = "tpe",
56765+ .data = &grsec_enable_tpe,
56766+ .maxlen = sizeof(int),
56767+ .mode = 0600,
56768+ .proc_handler = &proc_dointvec,
56769+ },
56770+ {
56771+ .procname = "tpe_gid",
56772+ .data = &grsec_tpe_gid,
56773+ .maxlen = sizeof(int),
56774+ .mode = 0600,
56775+ .proc_handler = &proc_dointvec,
56776+ },
56777+#endif
56778+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
56779+ {
56780+ .procname = "tpe_invert",
56781+ .data = &grsec_enable_tpe_invert,
56782+ .maxlen = sizeof(int),
56783+ .mode = 0600,
56784+ .proc_handler = &proc_dointvec,
56785+ },
56786+#endif
56787+#ifdef CONFIG_GRKERNSEC_TPE_ALL
56788+ {
56789+ .procname = "tpe_restrict_all",
56790+ .data = &grsec_enable_tpe_all,
56791+ .maxlen = sizeof(int),
56792+ .mode = 0600,
56793+ .proc_handler = &proc_dointvec,
56794+ },
56795+#endif
56796+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
56797+ {
56798+ .procname = "socket_all",
56799+ .data = &grsec_enable_socket_all,
56800+ .maxlen = sizeof(int),
56801+ .mode = 0600,
56802+ .proc_handler = &proc_dointvec,
56803+ },
56804+ {
56805+ .procname = "socket_all_gid",
56806+ .data = &grsec_socket_all_gid,
56807+ .maxlen = sizeof(int),
56808+ .mode = 0600,
56809+ .proc_handler = &proc_dointvec,
56810+ },
56811+#endif
56812+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
56813+ {
56814+ .procname = "socket_client",
56815+ .data = &grsec_enable_socket_client,
56816+ .maxlen = sizeof(int),
56817+ .mode = 0600,
56818+ .proc_handler = &proc_dointvec,
56819+ },
56820+ {
56821+ .procname = "socket_client_gid",
56822+ .data = &grsec_socket_client_gid,
56823+ .maxlen = sizeof(int),
56824+ .mode = 0600,
56825+ .proc_handler = &proc_dointvec,
56826+ },
56827+#endif
56828+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
56829+ {
56830+ .procname = "socket_server",
56831+ .data = &grsec_enable_socket_server,
56832+ .maxlen = sizeof(int),
56833+ .mode = 0600,
56834+ .proc_handler = &proc_dointvec,
56835+ },
56836+ {
56837+ .procname = "socket_server_gid",
56838+ .data = &grsec_socket_server_gid,
56839+ .maxlen = sizeof(int),
56840+ .mode = 0600,
56841+ .proc_handler = &proc_dointvec,
56842+ },
56843+#endif
56844+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
56845+ {
56846+ .procname = "audit_group",
56847+ .data = &grsec_enable_group,
56848+ .maxlen = sizeof(int),
56849+ .mode = 0600,
56850+ .proc_handler = &proc_dointvec,
56851+ },
56852+ {
56853+ .procname = "audit_gid",
56854+ .data = &grsec_audit_gid,
56855+ .maxlen = sizeof(int),
56856+ .mode = 0600,
56857+ .proc_handler = &proc_dointvec,
56858+ },
56859+#endif
56860+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
56861+ {
56862+ .procname = "audit_chdir",
56863+ .data = &grsec_enable_chdir,
56864+ .maxlen = sizeof(int),
56865+ .mode = 0600,
56866+ .proc_handler = &proc_dointvec,
56867+ },
56868+#endif
56869+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
56870+ {
56871+ .procname = "audit_mount",
56872+ .data = &grsec_enable_mount,
56873+ .maxlen = sizeof(int),
56874+ .mode = 0600,
56875+ .proc_handler = &proc_dointvec,
56876+ },
56877+#endif
56878+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
56879+ {
56880+ .procname = "audit_textrel",
56881+ .data = &grsec_enable_audit_textrel,
56882+ .maxlen = sizeof(int),
56883+ .mode = 0600,
56884+ .proc_handler = &proc_dointvec,
56885+ },
56886+#endif
56887+#ifdef CONFIG_GRKERNSEC_DMESG
56888+ {
56889+ .procname = "dmesg",
56890+ .data = &grsec_enable_dmesg,
56891+ .maxlen = sizeof(int),
56892+ .mode = 0600,
56893+ .proc_handler = &proc_dointvec,
56894+ },
56895+#endif
56896+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
56897+ {
56898+ .procname = "chroot_findtask",
56899+ .data = &grsec_enable_chroot_findtask,
56900+ .maxlen = sizeof(int),
56901+ .mode = 0600,
56902+ .proc_handler = &proc_dointvec,
56903+ },
56904+#endif
56905+#ifdef CONFIG_GRKERNSEC_RESLOG
56906+ {
56907+ .procname = "resource_logging",
56908+ .data = &grsec_resource_logging,
56909+ .maxlen = sizeof(int),
56910+ .mode = 0600,
56911+ .proc_handler = &proc_dointvec,
56912+ },
56913+#endif
56914+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
56915+ {
56916+ .procname = "audit_ptrace",
56917+ .data = &grsec_enable_audit_ptrace,
56918+ .maxlen = sizeof(int),
56919+ .mode = 0600,
56920+ .proc_handler = &proc_dointvec,
56921+ },
56922+#endif
56923+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
56924+ {
56925+ .procname = "harden_ptrace",
56926+ .data = &grsec_enable_harden_ptrace,
56927+ .maxlen = sizeof(int),
56928+ .mode = 0600,
56929+ .proc_handler = &proc_dointvec,
56930+ },
56931+#endif
56932+ {
56933+ .procname = "grsec_lock",
56934+ .data = &grsec_lock,
56935+ .maxlen = sizeof(int),
56936+ .mode = 0600,
56937+ .proc_handler = &proc_dointvec,
56938+ },
56939+#endif
56940+#ifdef CONFIG_GRKERNSEC_ROFS
56941+ {
56942+ .procname = "romount_protect",
56943+ .data = &grsec_enable_rofs,
56944+ .maxlen = sizeof(int),
56945+ .mode = 0600,
56946+ .proc_handler = &proc_dointvec_minmax,
56947+ .extra1 = &one,
56948+ .extra2 = &one,
56949+ },
56950+#endif
56951+ { }
56952+};
56953+#endif
56954diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
56955new file mode 100644
56956index 0000000..0dc13c3
56957--- /dev/null
56958+++ b/grsecurity/grsec_time.c
56959@@ -0,0 +1,16 @@
56960+#include <linux/kernel.h>
56961+#include <linux/sched.h>
56962+#include <linux/grinternal.h>
56963+#include <linux/module.h>
56964+
56965+void
56966+gr_log_timechange(void)
56967+{
56968+#ifdef CONFIG_GRKERNSEC_TIME
56969+ if (grsec_enable_time)
56970+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
56971+#endif
56972+ return;
56973+}
56974+
56975+EXPORT_SYMBOL(gr_log_timechange);
56976diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
56977new file mode 100644
56978index 0000000..a35ba33
56979--- /dev/null
56980+++ b/grsecurity/grsec_tpe.c
56981@@ -0,0 +1,73 @@
56982+#include <linux/kernel.h>
56983+#include <linux/sched.h>
56984+#include <linux/file.h>
56985+#include <linux/fs.h>
56986+#include <linux/grinternal.h>
56987+
56988+extern int gr_acl_tpe_check(void);
56989+
56990+int
56991+gr_tpe_allow(const struct file *file)
56992+{
56993+#ifdef CONFIG_GRKERNSEC
56994+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
56995+ const struct cred *cred = current_cred();
56996+ char *msg = NULL;
56997+ char *msg2 = NULL;
56998+
56999+ // never restrict root
57000+ if (!cred->uid)
57001+ return 1;
57002+
57003+ if (grsec_enable_tpe) {
57004+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
57005+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
57006+ msg = "not being in trusted group";
57007+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
57008+ msg = "being in untrusted group";
57009+#else
57010+ if (in_group_p(grsec_tpe_gid))
57011+ msg = "being in untrusted group";
57012+#endif
57013+ }
57014+ if (!msg && gr_acl_tpe_check())
57015+ msg = "being in untrusted role";
57016+
57017+ // not in any affected group/role
57018+ if (!msg)
57019+ goto next_check;
57020+
57021+ if (inode->i_uid)
57022+ msg2 = "file in non-root-owned directory";
57023+ else if (inode->i_mode & S_IWOTH)
57024+ msg2 = "file in world-writable directory";
57025+ else if (inode->i_mode & S_IWGRP)
57026+ msg2 = "file in group-writable directory";
57027+
57028+ if (msg && msg2) {
57029+ char fullmsg[64] = {0};
57030+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
57031+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
57032+ return 0;
57033+ }
57034+ msg = NULL;
57035+next_check:
57036+#ifdef CONFIG_GRKERNSEC_TPE_ALL
57037+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
57038+ return 1;
57039+
57040+ if (inode->i_uid && (inode->i_uid != cred->uid))
57041+ msg = "directory not owned by user";
57042+ else if (inode->i_mode & S_IWOTH)
57043+ msg = "file in world-writable directory";
57044+ else if (inode->i_mode & S_IWGRP)
57045+ msg = "file in group-writable directory";
57046+
57047+ if (msg) {
57048+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
57049+ return 0;
57050+ }
57051+#endif
57052+#endif
57053+ return 1;
57054+}
57055diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
57056new file mode 100644
57057index 0000000..9f7b1ac
57058--- /dev/null
57059+++ b/grsecurity/grsum.c
57060@@ -0,0 +1,61 @@
57061+#include <linux/err.h>
57062+#include <linux/kernel.h>
57063+#include <linux/sched.h>
57064+#include <linux/mm.h>
57065+#include <linux/scatterlist.h>
57066+#include <linux/crypto.h>
57067+#include <linux/gracl.h>
57068+
57069+
57070+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
57071+#error "crypto and sha256 must be built into the kernel"
57072+#endif
57073+
57074+int
57075+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
57076+{
57077+ char *p;
57078+ struct crypto_hash *tfm;
57079+ struct hash_desc desc;
57080+ struct scatterlist sg;
57081+ unsigned char temp_sum[GR_SHA_LEN];
57082+ volatile int retval = 0;
57083+ volatile int dummy = 0;
57084+ unsigned int i;
57085+
57086+ sg_init_table(&sg, 1);
57087+
57088+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
57089+ if (IS_ERR(tfm)) {
57090+ /* should never happen, since sha256 should be built in */
57091+ return 1;
57092+ }
57093+
57094+ desc.tfm = tfm;
57095+ desc.flags = 0;
57096+
57097+ crypto_hash_init(&desc);
57098+
57099+ p = salt;
57100+ sg_set_buf(&sg, p, GR_SALT_LEN);
57101+ crypto_hash_update(&desc, &sg, sg.length);
57102+
57103+ p = entry->pw;
57104+ sg_set_buf(&sg, p, strlen(p));
57105+
57106+ crypto_hash_update(&desc, &sg, sg.length);
57107+
57108+ crypto_hash_final(&desc, temp_sum);
57109+
57110+ memset(entry->pw, 0, GR_PW_LEN);
57111+
57112+ for (i = 0; i < GR_SHA_LEN; i++)
57113+ if (sum[i] != temp_sum[i])
57114+ retval = 1;
57115+ else
57116+ dummy = 1; // waste a cycle
57117+
57118+ crypto_free_hash(tfm);
57119+
57120+ return retval;
57121+}
57122diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
57123index 6cd5b64..f620d2d 100644
57124--- a/include/acpi/acpi_bus.h
57125+++ b/include/acpi/acpi_bus.h
57126@@ -107,7 +107,7 @@ struct acpi_device_ops {
57127 acpi_op_bind bind;
57128 acpi_op_unbind unbind;
57129 acpi_op_notify notify;
57130-};
57131+} __no_const;
57132
57133 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
57134
57135diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
57136index b7babf0..71e4e74 100644
57137--- a/include/asm-generic/atomic-long.h
57138+++ b/include/asm-generic/atomic-long.h
57139@@ -22,6 +22,12 @@
57140
57141 typedef atomic64_t atomic_long_t;
57142
57143+#ifdef CONFIG_PAX_REFCOUNT
57144+typedef atomic64_unchecked_t atomic_long_unchecked_t;
57145+#else
57146+typedef atomic64_t atomic_long_unchecked_t;
57147+#endif
57148+
57149 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
57150
57151 static inline long atomic_long_read(atomic_long_t *l)
57152@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
57153 return (long)atomic64_read(v);
57154 }
57155
57156+#ifdef CONFIG_PAX_REFCOUNT
57157+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
57158+{
57159+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57160+
57161+ return (long)atomic64_read_unchecked(v);
57162+}
57163+#endif
57164+
57165 static inline void atomic_long_set(atomic_long_t *l, long i)
57166 {
57167 atomic64_t *v = (atomic64_t *)l;
57168@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
57169 atomic64_set(v, i);
57170 }
57171
57172+#ifdef CONFIG_PAX_REFCOUNT
57173+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
57174+{
57175+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57176+
57177+ atomic64_set_unchecked(v, i);
57178+}
57179+#endif
57180+
57181 static inline void atomic_long_inc(atomic_long_t *l)
57182 {
57183 atomic64_t *v = (atomic64_t *)l;
57184@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
57185 atomic64_inc(v);
57186 }
57187
57188+#ifdef CONFIG_PAX_REFCOUNT
57189+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
57190+{
57191+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57192+
57193+ atomic64_inc_unchecked(v);
57194+}
57195+#endif
57196+
57197 static inline void atomic_long_dec(atomic_long_t *l)
57198 {
57199 atomic64_t *v = (atomic64_t *)l;
57200@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
57201 atomic64_dec(v);
57202 }
57203
57204+#ifdef CONFIG_PAX_REFCOUNT
57205+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
57206+{
57207+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57208+
57209+ atomic64_dec_unchecked(v);
57210+}
57211+#endif
57212+
57213 static inline void atomic_long_add(long i, atomic_long_t *l)
57214 {
57215 atomic64_t *v = (atomic64_t *)l;
57216@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
57217 atomic64_add(i, v);
57218 }
57219
57220+#ifdef CONFIG_PAX_REFCOUNT
57221+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
57222+{
57223+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57224+
57225+ atomic64_add_unchecked(i, v);
57226+}
57227+#endif
57228+
57229 static inline void atomic_long_sub(long i, atomic_long_t *l)
57230 {
57231 atomic64_t *v = (atomic64_t *)l;
57232@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
57233 atomic64_sub(i, v);
57234 }
57235
57236+#ifdef CONFIG_PAX_REFCOUNT
57237+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
57238+{
57239+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57240+
57241+ atomic64_sub_unchecked(i, v);
57242+}
57243+#endif
57244+
57245 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
57246 {
57247 atomic64_t *v = (atomic64_t *)l;
57248@@ -115,6 +175,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
57249 return (long)atomic64_inc_return(v);
57250 }
57251
57252+#ifdef CONFIG_PAX_REFCOUNT
57253+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
57254+{
57255+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57256+
57257+ return (long)atomic64_inc_return_unchecked(v);
57258+}
57259+#endif
57260+
57261 static inline long atomic_long_dec_return(atomic_long_t *l)
57262 {
57263 atomic64_t *v = (atomic64_t *)l;
57264@@ -140,6 +209,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
57265
57266 typedef atomic_t atomic_long_t;
57267
57268+#ifdef CONFIG_PAX_REFCOUNT
57269+typedef atomic_unchecked_t atomic_long_unchecked_t;
57270+#else
57271+typedef atomic_t atomic_long_unchecked_t;
57272+#endif
57273+
57274 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
57275 static inline long atomic_long_read(atomic_long_t *l)
57276 {
57277@@ -148,6 +223,15 @@ static inline long atomic_long_read(atomic_long_t *l)
57278 return (long)atomic_read(v);
57279 }
57280
57281+#ifdef CONFIG_PAX_REFCOUNT
57282+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
57283+{
57284+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57285+
57286+ return (long)atomic_read_unchecked(v);
57287+}
57288+#endif
57289+
57290 static inline void atomic_long_set(atomic_long_t *l, long i)
57291 {
57292 atomic_t *v = (atomic_t *)l;
57293@@ -155,6 +239,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
57294 atomic_set(v, i);
57295 }
57296
57297+#ifdef CONFIG_PAX_REFCOUNT
57298+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
57299+{
57300+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57301+
57302+ atomic_set_unchecked(v, i);
57303+}
57304+#endif
57305+
57306 static inline void atomic_long_inc(atomic_long_t *l)
57307 {
57308 atomic_t *v = (atomic_t *)l;
57309@@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
57310 atomic_inc(v);
57311 }
57312
57313+#ifdef CONFIG_PAX_REFCOUNT
57314+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
57315+{
57316+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57317+
57318+ atomic_inc_unchecked(v);
57319+}
57320+#endif
57321+
57322 static inline void atomic_long_dec(atomic_long_t *l)
57323 {
57324 atomic_t *v = (atomic_t *)l;
57325@@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
57326 atomic_dec(v);
57327 }
57328
57329+#ifdef CONFIG_PAX_REFCOUNT
57330+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
57331+{
57332+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57333+
57334+ atomic_dec_unchecked(v);
57335+}
57336+#endif
57337+
57338 static inline void atomic_long_add(long i, atomic_long_t *l)
57339 {
57340 atomic_t *v = (atomic_t *)l;
57341@@ -176,6 +287,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
57342 atomic_add(i, v);
57343 }
57344
57345+#ifdef CONFIG_PAX_REFCOUNT
57346+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
57347+{
57348+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57349+
57350+ atomic_add_unchecked(i, v);
57351+}
57352+#endif
57353+
57354 static inline void atomic_long_sub(long i, atomic_long_t *l)
57355 {
57356 atomic_t *v = (atomic_t *)l;
57357@@ -183,6 +303,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
57358 atomic_sub(i, v);
57359 }
57360
57361+#ifdef CONFIG_PAX_REFCOUNT
57362+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
57363+{
57364+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57365+
57366+ atomic_sub_unchecked(i, v);
57367+}
57368+#endif
57369+
57370 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
57371 {
57372 atomic_t *v = (atomic_t *)l;
57373@@ -232,6 +361,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
57374 return (long)atomic_inc_return(v);
57375 }
57376
57377+#ifdef CONFIG_PAX_REFCOUNT
57378+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
57379+{
57380+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57381+
57382+ return (long)atomic_inc_return_unchecked(v);
57383+}
57384+#endif
57385+
57386 static inline long atomic_long_dec_return(atomic_long_t *l)
57387 {
57388 atomic_t *v = (atomic_t *)l;
57389@@ -255,4 +393,49 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
57390
57391 #endif /* BITS_PER_LONG == 64 */
57392
57393+#ifdef CONFIG_PAX_REFCOUNT
57394+static inline void pax_refcount_needs_these_functions(void)
57395+{
57396+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
57397+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
57398+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
57399+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
57400+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
57401+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
57402+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
57403+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
57404+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
57405+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
57406+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
57407+
57408+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
57409+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
57410+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
57411+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
57412+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
57413+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
57414+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
57415+}
57416+#else
57417+#define atomic_read_unchecked(v) atomic_read(v)
57418+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
57419+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
57420+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
57421+#define atomic_inc_unchecked(v) atomic_inc(v)
57422+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
57423+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
57424+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
57425+#define atomic_dec_unchecked(v) atomic_dec(v)
57426+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
57427+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
57428+
57429+#define atomic_long_read_unchecked(v) atomic_long_read(v)
57430+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
57431+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
57432+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
57433+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
57434+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
57435+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
57436+#endif
57437+
57438 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
57439diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
57440index b18ce4f..2ee2843 100644
57441--- a/include/asm-generic/atomic64.h
57442+++ b/include/asm-generic/atomic64.h
57443@@ -16,6 +16,8 @@ typedef struct {
57444 long long counter;
57445 } atomic64_t;
57446
57447+typedef atomic64_t atomic64_unchecked_t;
57448+
57449 #define ATOMIC64_INIT(i) { (i) }
57450
57451 extern long long atomic64_read(const atomic64_t *v);
57452@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
57453 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
57454 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
57455
57456+#define atomic64_read_unchecked(v) atomic64_read(v)
57457+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
57458+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
57459+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
57460+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
57461+#define atomic64_inc_unchecked(v) atomic64_inc(v)
57462+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
57463+#define atomic64_dec_unchecked(v) atomic64_dec(v)
57464+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
57465+
57466 #endif /* _ASM_GENERIC_ATOMIC64_H */
57467diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
57468index 1bfcfe5..e04c5c9 100644
57469--- a/include/asm-generic/cache.h
57470+++ b/include/asm-generic/cache.h
57471@@ -6,7 +6,7 @@
57472 * cache lines need to provide their own cache.h.
57473 */
57474
57475-#define L1_CACHE_SHIFT 5
57476-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
57477+#define L1_CACHE_SHIFT 5UL
57478+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
57479
57480 #endif /* __ASM_GENERIC_CACHE_H */
57481diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h
57482index 1ca3efc..e3dc852 100644
57483--- a/include/asm-generic/int-l64.h
57484+++ b/include/asm-generic/int-l64.h
57485@@ -46,6 +46,8 @@ typedef unsigned int u32;
57486 typedef signed long s64;
57487 typedef unsigned long u64;
57488
57489+typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
57490+
57491 #define S8_C(x) x
57492 #define U8_C(x) x ## U
57493 #define S16_C(x) x
57494diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
57495index f394147..b6152b9 100644
57496--- a/include/asm-generic/int-ll64.h
57497+++ b/include/asm-generic/int-ll64.h
57498@@ -51,6 +51,8 @@ typedef unsigned int u32;
57499 typedef signed long long s64;
57500 typedef unsigned long long u64;
57501
57502+typedef unsigned long long intoverflow_t;
57503+
57504 #define S8_C(x) x
57505 #define U8_C(x) x ## U
57506 #define S16_C(x) x
57507diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
57508index 0232ccb..13d9165 100644
57509--- a/include/asm-generic/kmap_types.h
57510+++ b/include/asm-generic/kmap_types.h
57511@@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
57512 KMAP_D(17) KM_NMI,
57513 KMAP_D(18) KM_NMI_PTE,
57514 KMAP_D(19) KM_KDB,
57515+KMAP_D(20) KM_CLEARPAGE,
57516 /*
57517 * Remember to update debug_kmap_atomic() when adding new kmap types!
57518 */
57519-KMAP_D(20) KM_TYPE_NR
57520+KMAP_D(21) KM_TYPE_NR
57521 };
57522
57523 #undef KMAP_D
57524diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
57525index 725612b..9cc513a 100644
57526--- a/include/asm-generic/pgtable-nopmd.h
57527+++ b/include/asm-generic/pgtable-nopmd.h
57528@@ -1,14 +1,19 @@
57529 #ifndef _PGTABLE_NOPMD_H
57530 #define _PGTABLE_NOPMD_H
57531
57532-#ifndef __ASSEMBLY__
57533-
57534 #include <asm-generic/pgtable-nopud.h>
57535
57536-struct mm_struct;
57537-
57538 #define __PAGETABLE_PMD_FOLDED
57539
57540+#define PMD_SHIFT PUD_SHIFT
57541+#define PTRS_PER_PMD 1
57542+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
57543+#define PMD_MASK (~(PMD_SIZE-1))
57544+
57545+#ifndef __ASSEMBLY__
57546+
57547+struct mm_struct;
57548+
57549 /*
57550 * Having the pmd type consist of a pud gets the size right, and allows
57551 * us to conceptually access the pud entry that this pmd is folded into
57552@@ -16,11 +21,6 @@ struct mm_struct;
57553 */
57554 typedef struct { pud_t pud; } pmd_t;
57555
57556-#define PMD_SHIFT PUD_SHIFT
57557-#define PTRS_PER_PMD 1
57558-#define PMD_SIZE (1UL << PMD_SHIFT)
57559-#define PMD_MASK (~(PMD_SIZE-1))
57560-
57561 /*
57562 * The "pud_xxx()" functions here are trivial for a folded two-level
57563 * setup: the pmd is never bad, and a pmd always exists (as it's folded
57564diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
57565index 810431d..ccc3638 100644
57566--- a/include/asm-generic/pgtable-nopud.h
57567+++ b/include/asm-generic/pgtable-nopud.h
57568@@ -1,10 +1,15 @@
57569 #ifndef _PGTABLE_NOPUD_H
57570 #define _PGTABLE_NOPUD_H
57571
57572-#ifndef __ASSEMBLY__
57573-
57574 #define __PAGETABLE_PUD_FOLDED
57575
57576+#define PUD_SHIFT PGDIR_SHIFT
57577+#define PTRS_PER_PUD 1
57578+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
57579+#define PUD_MASK (~(PUD_SIZE-1))
57580+
57581+#ifndef __ASSEMBLY__
57582+
57583 /*
57584 * Having the pud type consist of a pgd gets the size right, and allows
57585 * us to conceptually access the pgd entry that this pud is folded into
57586@@ -12,11 +17,6 @@
57587 */
57588 typedef struct { pgd_t pgd; } pud_t;
57589
57590-#define PUD_SHIFT PGDIR_SHIFT
57591-#define PTRS_PER_PUD 1
57592-#define PUD_SIZE (1UL << PUD_SHIFT)
57593-#define PUD_MASK (~(PUD_SIZE-1))
57594-
57595 /*
57596 * The "pgd_xxx()" functions here are trivial for a folded two-level
57597 * setup: the pud is never bad, and a pud always exists (as it's folded
57598diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
57599index 76bff2b..c7a14e2 100644
57600--- a/include/asm-generic/pgtable.h
57601+++ b/include/asm-generic/pgtable.h
57602@@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
57603 #endif /* __HAVE_ARCH_PMD_WRITE */
57604 #endif
57605
57606+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
57607+static inline unsigned long pax_open_kernel(void) { return 0; }
57608+#endif
57609+
57610+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
57611+static inline unsigned long pax_close_kernel(void) { return 0; }
57612+#endif
57613+
57614 #endif /* !__ASSEMBLY__ */
57615
57616 #endif /* _ASM_GENERIC_PGTABLE_H */
57617diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
57618index b5e2e4c..6a5373e 100644
57619--- a/include/asm-generic/vmlinux.lds.h
57620+++ b/include/asm-generic/vmlinux.lds.h
57621@@ -217,6 +217,7 @@
57622 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
57623 VMLINUX_SYMBOL(__start_rodata) = .; \
57624 *(.rodata) *(.rodata.*) \
57625+ *(.data..read_only) \
57626 *(__vermagic) /* Kernel version magic */ \
57627 . = ALIGN(8); \
57628 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
57629@@ -722,17 +723,18 @@
57630 * section in the linker script will go there too. @phdr should have
57631 * a leading colon.
57632 *
57633- * Note that this macros defines __per_cpu_load as an absolute symbol.
57634+ * Note that this macros defines per_cpu_load as an absolute symbol.
57635 * If there is no need to put the percpu section at a predetermined
57636 * address, use PERCPU_SECTION.
57637 */
57638 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
57639- VMLINUX_SYMBOL(__per_cpu_load) = .; \
57640- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
57641+ per_cpu_load = .; \
57642+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
57643 - LOAD_OFFSET) { \
57644+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
57645 PERCPU_INPUT(cacheline) \
57646 } phdr \
57647- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
57648+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
57649
57650 /**
57651 * PERCPU_SECTION - define output section for percpu area, simple version
57652diff --git a/include/drm/drmP.h b/include/drm/drmP.h
57653index 1f9e951..14ef517 100644
57654--- a/include/drm/drmP.h
57655+++ b/include/drm/drmP.h
57656@@ -72,6 +72,7 @@
57657 #include <linux/workqueue.h>
57658 #include <linux/poll.h>
57659 #include <asm/pgalloc.h>
57660+#include <asm/local.h>
57661 #include "drm.h"
57662
57663 #include <linux/idr.h>
57664@@ -1038,7 +1039,7 @@ struct drm_device {
57665
57666 /** \name Usage Counters */
57667 /*@{ */
57668- int open_count; /**< Outstanding files open */
57669+ local_t open_count; /**< Outstanding files open */
57670 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
57671 atomic_t vma_count; /**< Outstanding vma areas open */
57672 int buf_use; /**< Buffers in use -- cannot alloc */
57673@@ -1049,7 +1050,7 @@ struct drm_device {
57674 /*@{ */
57675 unsigned long counters;
57676 enum drm_stat_type types[15];
57677- atomic_t counts[15];
57678+ atomic_unchecked_t counts[15];
57679 /*@} */
57680
57681 struct list_head filelist;
57682diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
57683index 73b0712..0b7ef2f 100644
57684--- a/include/drm/drm_crtc_helper.h
57685+++ b/include/drm/drm_crtc_helper.h
57686@@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
57687
57688 /* disable crtc when not in use - more explicit than dpms off */
57689 void (*disable)(struct drm_crtc *crtc);
57690-};
57691+} __no_const;
57692
57693 struct drm_encoder_helper_funcs {
57694 void (*dpms)(struct drm_encoder *encoder, int mode);
57695@@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
57696 struct drm_connector *connector);
57697 /* disable encoder when not in use - more explicit than dpms off */
57698 void (*disable)(struct drm_encoder *encoder);
57699-};
57700+} __no_const;
57701
57702 struct drm_connector_helper_funcs {
57703 int (*get_modes)(struct drm_connector *connector);
57704diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
57705index 26c1f78..6722682 100644
57706--- a/include/drm/ttm/ttm_memory.h
57707+++ b/include/drm/ttm/ttm_memory.h
57708@@ -47,7 +47,7 @@
57709
57710 struct ttm_mem_shrink {
57711 int (*do_shrink) (struct ttm_mem_shrink *);
57712-};
57713+} __no_const;
57714
57715 /**
57716 * struct ttm_mem_global - Global memory accounting structure.
57717diff --git a/include/linux/a.out.h b/include/linux/a.out.h
57718index e86dfca..40cc55f 100644
57719--- a/include/linux/a.out.h
57720+++ b/include/linux/a.out.h
57721@@ -39,6 +39,14 @@ enum machine_type {
57722 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
57723 };
57724
57725+/* Constants for the N_FLAGS field */
57726+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57727+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
57728+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
57729+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
57730+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57731+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57732+
57733 #if !defined (N_MAGIC)
57734 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
57735 #endif
57736diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
57737index 49a83ca..df96b54 100644
57738--- a/include/linux/atmdev.h
57739+++ b/include/linux/atmdev.h
57740@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
57741 #endif
57742
57743 struct k_atm_aal_stats {
57744-#define __HANDLE_ITEM(i) atomic_t i
57745+#define __HANDLE_ITEM(i) atomic_unchecked_t i
57746 __AAL_STAT_ITEMS
57747 #undef __HANDLE_ITEM
57748 };
57749diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
57750index fd88a39..f4d0bad 100644
57751--- a/include/linux/binfmts.h
57752+++ b/include/linux/binfmts.h
57753@@ -88,6 +88,7 @@ struct linux_binfmt {
57754 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
57755 int (*load_shlib)(struct file *);
57756 int (*core_dump)(struct coredump_params *cprm);
57757+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
57758 unsigned long min_coredump; /* minimal dump size */
57759 };
57760
57761diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
57762index 0ed1eb0..3ab569b 100644
57763--- a/include/linux/blkdev.h
57764+++ b/include/linux/blkdev.h
57765@@ -1315,7 +1315,7 @@ struct block_device_operations {
57766 /* this callback is with swap_lock and sometimes page table lock held */
57767 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
57768 struct module *owner;
57769-};
57770+} __do_const;
57771
57772 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
57773 unsigned long);
57774diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
57775index 4d1a074..88f929a 100644
57776--- a/include/linux/blktrace_api.h
57777+++ b/include/linux/blktrace_api.h
57778@@ -162,7 +162,7 @@ struct blk_trace {
57779 struct dentry *dir;
57780 struct dentry *dropped_file;
57781 struct dentry *msg_file;
57782- atomic_t dropped;
57783+ atomic_unchecked_t dropped;
57784 };
57785
57786 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
57787diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
57788index 83195fb..0b0f77d 100644
57789--- a/include/linux/byteorder/little_endian.h
57790+++ b/include/linux/byteorder/little_endian.h
57791@@ -42,51 +42,51 @@
57792
57793 static inline __le64 __cpu_to_le64p(const __u64 *p)
57794 {
57795- return (__force __le64)*p;
57796+ return (__force const __le64)*p;
57797 }
57798 static inline __u64 __le64_to_cpup(const __le64 *p)
57799 {
57800- return (__force __u64)*p;
57801+ return (__force const __u64)*p;
57802 }
57803 static inline __le32 __cpu_to_le32p(const __u32 *p)
57804 {
57805- return (__force __le32)*p;
57806+ return (__force const __le32)*p;
57807 }
57808 static inline __u32 __le32_to_cpup(const __le32 *p)
57809 {
57810- return (__force __u32)*p;
57811+ return (__force const __u32)*p;
57812 }
57813 static inline __le16 __cpu_to_le16p(const __u16 *p)
57814 {
57815- return (__force __le16)*p;
57816+ return (__force const __le16)*p;
57817 }
57818 static inline __u16 __le16_to_cpup(const __le16 *p)
57819 {
57820- return (__force __u16)*p;
57821+ return (__force const __u16)*p;
57822 }
57823 static inline __be64 __cpu_to_be64p(const __u64 *p)
57824 {
57825- return (__force __be64)__swab64p(p);
57826+ return (__force const __be64)__swab64p(p);
57827 }
57828 static inline __u64 __be64_to_cpup(const __be64 *p)
57829 {
57830- return __swab64p((__u64 *)p);
57831+ return __swab64p((const __u64 *)p);
57832 }
57833 static inline __be32 __cpu_to_be32p(const __u32 *p)
57834 {
57835- return (__force __be32)__swab32p(p);
57836+ return (__force const __be32)__swab32p(p);
57837 }
57838 static inline __u32 __be32_to_cpup(const __be32 *p)
57839 {
57840- return __swab32p((__u32 *)p);
57841+ return __swab32p((const __u32 *)p);
57842 }
57843 static inline __be16 __cpu_to_be16p(const __u16 *p)
57844 {
57845- return (__force __be16)__swab16p(p);
57846+ return (__force const __be16)__swab16p(p);
57847 }
57848 static inline __u16 __be16_to_cpup(const __be16 *p)
57849 {
57850- return __swab16p((__u16 *)p);
57851+ return __swab16p((const __u16 *)p);
57852 }
57853 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
57854 #define __le64_to_cpus(x) do { (void)(x); } while (0)
57855diff --git a/include/linux/cache.h b/include/linux/cache.h
57856index 4c57065..4307975 100644
57857--- a/include/linux/cache.h
57858+++ b/include/linux/cache.h
57859@@ -16,6 +16,10 @@
57860 #define __read_mostly
57861 #endif
57862
57863+#ifndef __read_only
57864+#define __read_only __read_mostly
57865+#endif
57866+
57867 #ifndef ____cacheline_aligned
57868 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
57869 #endif
57870diff --git a/include/linux/capability.h b/include/linux/capability.h
57871index a63d13d..069bfd5 100644
57872--- a/include/linux/capability.h
57873+++ b/include/linux/capability.h
57874@@ -548,6 +548,9 @@ extern bool capable(int cap);
57875 extern bool ns_capable(struct user_namespace *ns, int cap);
57876 extern bool task_ns_capable(struct task_struct *t, int cap);
57877 extern bool nsown_capable(int cap);
57878+extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
57879+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
57880+extern bool capable_nolog(int cap);
57881
57882 /* audit system wants to get cap info from files as well */
57883 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
57884diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
57885index 04ffb2e..6799180 100644
57886--- a/include/linux/cleancache.h
57887+++ b/include/linux/cleancache.h
57888@@ -31,7 +31,7 @@ struct cleancache_ops {
57889 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
57890 void (*flush_inode)(int, struct cleancache_filekey);
57891 void (*flush_fs)(int);
57892-};
57893+} __no_const;
57894
57895 extern struct cleancache_ops
57896 cleancache_register_ops(struct cleancache_ops *ops);
57897diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
57898index dfadc96..c0e70c1 100644
57899--- a/include/linux/compiler-gcc4.h
57900+++ b/include/linux/compiler-gcc4.h
57901@@ -31,6 +31,12 @@
57902
57903
57904 #if __GNUC_MINOR__ >= 5
57905+
57906+#ifdef CONSTIFY_PLUGIN
57907+#define __no_const __attribute__((no_const))
57908+#define __do_const __attribute__((do_const))
57909+#endif
57910+
57911 /*
57912 * Mark a position in code as unreachable. This can be used to
57913 * suppress control flow warnings after asm blocks that transfer
57914@@ -46,6 +52,11 @@
57915 #define __noclone __attribute__((__noclone__))
57916
57917 #endif
57918+
57919+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
57920+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
57921+#define __bos0(ptr) __bos((ptr), 0)
57922+#define __bos1(ptr) __bos((ptr), 1)
57923 #endif
57924
57925 #if __GNUC_MINOR__ > 0
57926diff --git a/include/linux/compiler.h b/include/linux/compiler.h
57927index 320d6c9..8573a1c 100644
57928--- a/include/linux/compiler.h
57929+++ b/include/linux/compiler.h
57930@@ -5,31 +5,62 @@
57931
57932 #ifdef __CHECKER__
57933 # define __user __attribute__((noderef, address_space(1)))
57934+# define __force_user __force __user
57935 # define __kernel __attribute__((address_space(0)))
57936+# define __force_kernel __force __kernel
57937 # define __safe __attribute__((safe))
57938 # define __force __attribute__((force))
57939 # define __nocast __attribute__((nocast))
57940 # define __iomem __attribute__((noderef, address_space(2)))
57941+# define __force_iomem __force __iomem
57942 # define __acquires(x) __attribute__((context(x,0,1)))
57943 # define __releases(x) __attribute__((context(x,1,0)))
57944 # define __acquire(x) __context__(x,1)
57945 # define __release(x) __context__(x,-1)
57946 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
57947 # define __percpu __attribute__((noderef, address_space(3)))
57948+# define __force_percpu __force __percpu
57949 #ifdef CONFIG_SPARSE_RCU_POINTER
57950 # define __rcu __attribute__((noderef, address_space(4)))
57951+# define __force_rcu __force __rcu
57952 #else
57953 # define __rcu
57954+# define __force_rcu
57955 #endif
57956 extern void __chk_user_ptr(const volatile void __user *);
57957 extern void __chk_io_ptr(const volatile void __iomem *);
57958+#elif defined(CHECKER_PLUGIN)
57959+//# define __user
57960+//# define __force_user
57961+//# define __kernel
57962+//# define __force_kernel
57963+# define __safe
57964+# define __force
57965+# define __nocast
57966+# define __iomem
57967+# define __force_iomem
57968+# define __chk_user_ptr(x) (void)0
57969+# define __chk_io_ptr(x) (void)0
57970+# define __builtin_warning(x, y...) (1)
57971+# define __acquires(x)
57972+# define __releases(x)
57973+# define __acquire(x) (void)0
57974+# define __release(x) (void)0
57975+# define __cond_lock(x,c) (c)
57976+# define __percpu
57977+# define __force_percpu
57978+# define __rcu
57979+# define __force_rcu
57980 #else
57981 # define __user
57982+# define __force_user
57983 # define __kernel
57984+# define __force_kernel
57985 # define __safe
57986 # define __force
57987 # define __nocast
57988 # define __iomem
57989+# define __force_iomem
57990 # define __chk_user_ptr(x) (void)0
57991 # define __chk_io_ptr(x) (void)0
57992 # define __builtin_warning(x, y...) (1)
57993@@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
57994 # define __release(x) (void)0
57995 # define __cond_lock(x,c) (c)
57996 # define __percpu
57997+# define __force_percpu
57998 # define __rcu
57999+# define __force_rcu
58000 #endif
58001
58002 #ifdef __KERNEL__
58003@@ -264,6 +297,14 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
58004 # define __attribute_const__ /* unimplemented */
58005 #endif
58006
58007+#ifndef __no_const
58008+# define __no_const
58009+#endif
58010+
58011+#ifndef __do_const
58012+# define __do_const
58013+#endif
58014+
58015 /*
58016 * Tell gcc if a function is cold. The compiler will assume any path
58017 * directly leading to the call is unlikely.
58018@@ -273,6 +314,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
58019 #define __cold
58020 #endif
58021
58022+#ifndef __alloc_size
58023+#define __alloc_size(...)
58024+#endif
58025+
58026+#ifndef __bos
58027+#define __bos(ptr, arg)
58028+#endif
58029+
58030+#ifndef __bos0
58031+#define __bos0(ptr)
58032+#endif
58033+
58034+#ifndef __bos1
58035+#define __bos1(ptr)
58036+#endif
58037+
58038 /* Simple shorthand for a section definition */
58039 #ifndef __section
58040 # define __section(S) __attribute__ ((__section__(#S)))
58041@@ -306,6 +363,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
58042 * use is to mediate communication between process-level code and irq/NMI
58043 * handlers, all running on the same CPU.
58044 */
58045-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
58046+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
58047+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
58048
58049 #endif /* __LINUX_COMPILER_H */
58050diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
58051index e9eaec5..bfeb9bb 100644
58052--- a/include/linux/cpuset.h
58053+++ b/include/linux/cpuset.h
58054@@ -118,7 +118,7 @@ static inline void put_mems_allowed(void)
58055 * nodemask.
58056 */
58057 smp_mb();
58058- --ACCESS_ONCE(current->mems_allowed_change_disable);
58059+ --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
58060 }
58061
58062 static inline void set_mems_allowed(nodemask_t nodemask)
58063diff --git a/include/linux/cred.h b/include/linux/cred.h
58064index 4030896..8d6f342 100644
58065--- a/include/linux/cred.h
58066+++ b/include/linux/cred.h
58067@@ -207,6 +207,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
58068 static inline void validate_process_creds(void)
58069 {
58070 }
58071+static inline void validate_task_creds(struct task_struct *task)
58072+{
58073+}
58074 #endif
58075
58076 /**
58077diff --git a/include/linux/crypto.h b/include/linux/crypto.h
58078index 8a94217..15d49e3 100644
58079--- a/include/linux/crypto.h
58080+++ b/include/linux/crypto.h
58081@@ -365,7 +365,7 @@ struct cipher_tfm {
58082 const u8 *key, unsigned int keylen);
58083 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
58084 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
58085-};
58086+} __no_const;
58087
58088 struct hash_tfm {
58089 int (*init)(struct hash_desc *desc);
58090@@ -386,13 +386,13 @@ struct compress_tfm {
58091 int (*cot_decompress)(struct crypto_tfm *tfm,
58092 const u8 *src, unsigned int slen,
58093 u8 *dst, unsigned int *dlen);
58094-};
58095+} __no_const;
58096
58097 struct rng_tfm {
58098 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
58099 unsigned int dlen);
58100 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
58101-};
58102+} __no_const;
58103
58104 #define crt_ablkcipher crt_u.ablkcipher
58105 #define crt_aead crt_u.aead
58106diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
58107index 7925bf0..d5143d2 100644
58108--- a/include/linux/decompress/mm.h
58109+++ b/include/linux/decompress/mm.h
58110@@ -77,7 +77,7 @@ static void free(void *where)
58111 * warnings when not needed (indeed large_malloc / large_free are not
58112 * needed by inflate */
58113
58114-#define malloc(a) kmalloc(a, GFP_KERNEL)
58115+#define malloc(a) kmalloc((a), GFP_KERNEL)
58116 #define free(a) kfree(a)
58117
58118 #define large_malloc(a) vmalloc(a)
58119diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
58120index e13117c..e9fc938 100644
58121--- a/include/linux/dma-mapping.h
58122+++ b/include/linux/dma-mapping.h
58123@@ -46,7 +46,7 @@ struct dma_map_ops {
58124 u64 (*get_required_mask)(struct device *dev);
58125 #endif
58126 int is_phys;
58127-};
58128+} __do_const;
58129
58130 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
58131
58132diff --git a/include/linux/efi.h b/include/linux/efi.h
58133index 2362a0b..cfaf8fcc 100644
58134--- a/include/linux/efi.h
58135+++ b/include/linux/efi.h
58136@@ -446,7 +446,7 @@ struct efivar_operations {
58137 efi_get_variable_t *get_variable;
58138 efi_get_next_variable_t *get_next_variable;
58139 efi_set_variable_t *set_variable;
58140-};
58141+} __no_const;
58142
58143 struct efivars {
58144 /*
58145diff --git a/include/linux/elf.h b/include/linux/elf.h
58146index 31f0508..5421c01 100644
58147--- a/include/linux/elf.h
58148+++ b/include/linux/elf.h
58149@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
58150 #define PT_GNU_EH_FRAME 0x6474e550
58151
58152 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
58153+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
58154+
58155+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
58156+
58157+/* Constants for the e_flags field */
58158+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
58159+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
58160+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
58161+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
58162+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
58163+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
58164
58165 /*
58166 * Extended Numbering
58167@@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
58168 #define DT_DEBUG 21
58169 #define DT_TEXTREL 22
58170 #define DT_JMPREL 23
58171+#define DT_FLAGS 30
58172+ #define DF_TEXTREL 0x00000004
58173 #define DT_ENCODING 32
58174 #define OLD_DT_LOOS 0x60000000
58175 #define DT_LOOS 0x6000000d
58176@@ -252,6 +265,19 @@ typedef struct elf64_hdr {
58177 #define PF_W 0x2
58178 #define PF_X 0x1
58179
58180+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
58181+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
58182+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
58183+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
58184+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
58185+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
58186+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
58187+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
58188+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
58189+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
58190+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
58191+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
58192+
58193 typedef struct elf32_phdr{
58194 Elf32_Word p_type;
58195 Elf32_Off p_offset;
58196@@ -344,6 +370,8 @@ typedef struct elf64_shdr {
58197 #define EI_OSABI 7
58198 #define EI_PAD 8
58199
58200+#define EI_PAX 14
58201+
58202 #define ELFMAG0 0x7f /* EI_MAG */
58203 #define ELFMAG1 'E'
58204 #define ELFMAG2 'L'
58205@@ -423,6 +451,7 @@ extern Elf32_Dyn _DYNAMIC [];
58206 #define elf_note elf32_note
58207 #define elf_addr_t Elf32_Off
58208 #define Elf_Half Elf32_Half
58209+#define elf_dyn Elf32_Dyn
58210
58211 #else
58212
58213@@ -433,6 +462,7 @@ extern Elf64_Dyn _DYNAMIC [];
58214 #define elf_note elf64_note
58215 #define elf_addr_t Elf64_Off
58216 #define Elf_Half Elf64_Half
58217+#define elf_dyn Elf64_Dyn
58218
58219 #endif
58220
58221diff --git a/include/linux/filter.h b/include/linux/filter.h
58222index 8eeb205..d59bfa2 100644
58223--- a/include/linux/filter.h
58224+++ b/include/linux/filter.h
58225@@ -134,6 +134,7 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
58226
58227 struct sk_buff;
58228 struct sock;
58229+struct bpf_jit_work;
58230
58231 struct sk_filter
58232 {
58233@@ -141,6 +142,9 @@ struct sk_filter
58234 unsigned int len; /* Number of filter blocks */
58235 unsigned int (*bpf_func)(const struct sk_buff *skb,
58236 const struct sock_filter *filter);
58237+#ifdef CONFIG_BPF_JIT
58238+ struct bpf_jit_work *work;
58239+#endif
58240 struct rcu_head rcu;
58241 struct sock_filter insns[0];
58242 };
58243diff --git a/include/linux/firewire.h b/include/linux/firewire.h
58244index 84ccf8e..2e9b14c 100644
58245--- a/include/linux/firewire.h
58246+++ b/include/linux/firewire.h
58247@@ -428,7 +428,7 @@ struct fw_iso_context {
58248 union {
58249 fw_iso_callback_t sc;
58250 fw_iso_mc_callback_t mc;
58251- } callback;
58252+ } __no_const callback;
58253 void *callback_data;
58254 };
58255
58256diff --git a/include/linux/fs.h b/include/linux/fs.h
58257index e0bc4ff..d79c2fa 100644
58258--- a/include/linux/fs.h
58259+++ b/include/linux/fs.h
58260@@ -1608,7 +1608,8 @@ struct file_operations {
58261 int (*setlease)(struct file *, long, struct file_lock **);
58262 long (*fallocate)(struct file *file, int mode, loff_t offset,
58263 loff_t len);
58264-};
58265+} __do_const;
58266+typedef struct file_operations __no_const file_operations_no_const;
58267
58268 struct inode_operations {
58269 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
58270diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
58271index 003dc0f..3c4ea97 100644
58272--- a/include/linux/fs_struct.h
58273+++ b/include/linux/fs_struct.h
58274@@ -6,7 +6,7 @@
58275 #include <linux/seqlock.h>
58276
58277 struct fs_struct {
58278- int users;
58279+ atomic_t users;
58280 spinlock_t lock;
58281 seqcount_t seq;
58282 int umask;
58283diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
58284index ce31408..b1ad003 100644
58285--- a/include/linux/fscache-cache.h
58286+++ b/include/linux/fscache-cache.h
58287@@ -102,7 +102,7 @@ struct fscache_operation {
58288 fscache_operation_release_t release;
58289 };
58290
58291-extern atomic_t fscache_op_debug_id;
58292+extern atomic_unchecked_t fscache_op_debug_id;
58293 extern void fscache_op_work_func(struct work_struct *work);
58294
58295 extern void fscache_enqueue_operation(struct fscache_operation *);
58296@@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
58297 {
58298 INIT_WORK(&op->work, fscache_op_work_func);
58299 atomic_set(&op->usage, 1);
58300- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
58301+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
58302 op->processor = processor;
58303 op->release = release;
58304 INIT_LIST_HEAD(&op->pend_link);
58305diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
58306index 2a53f10..0187fdf 100644
58307--- a/include/linux/fsnotify.h
58308+++ b/include/linux/fsnotify.h
58309@@ -314,7 +314,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
58310 */
58311 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
58312 {
58313- return kstrdup(name, GFP_KERNEL);
58314+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
58315 }
58316
58317 /*
58318diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
58319index 91d0e0a3..035666b 100644
58320--- a/include/linux/fsnotify_backend.h
58321+++ b/include/linux/fsnotify_backend.h
58322@@ -105,6 +105,7 @@ struct fsnotify_ops {
58323 void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
58324 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
58325 };
58326+typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
58327
58328 /*
58329 * A group is a "thing" that wants to receive notification about filesystem
58330diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
58331index c3da42d..c70e0df 100644
58332--- a/include/linux/ftrace_event.h
58333+++ b/include/linux/ftrace_event.h
58334@@ -97,7 +97,7 @@ struct trace_event_functions {
58335 trace_print_func raw;
58336 trace_print_func hex;
58337 trace_print_func binary;
58338-};
58339+} __no_const;
58340
58341 struct trace_event {
58342 struct hlist_node node;
58343@@ -254,7 +254,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
58344 extern int trace_add_event_call(struct ftrace_event_call *call);
58345 extern void trace_remove_event_call(struct ftrace_event_call *call);
58346
58347-#define is_signed_type(type) (((type)(-1)) < 0)
58348+#define is_signed_type(type) (((type)(-1)) < (type)1)
58349
58350 int trace_set_clr_event(const char *system, const char *event, int set);
58351
58352diff --git a/include/linux/genhd.h b/include/linux/genhd.h
58353index 6d18f35..ab71e2c 100644
58354--- a/include/linux/genhd.h
58355+++ b/include/linux/genhd.h
58356@@ -185,7 +185,7 @@ struct gendisk {
58357 struct kobject *slave_dir;
58358
58359 struct timer_rand_state *random;
58360- atomic_t sync_io; /* RAID */
58361+ atomic_unchecked_t sync_io; /* RAID */
58362 struct disk_events *ev;
58363 #ifdef CONFIG_BLK_DEV_INTEGRITY
58364 struct blk_integrity *integrity;
58365diff --git a/include/linux/gracl.h b/include/linux/gracl.h
58366new file mode 100644
58367index 0000000..0dc3943
58368--- /dev/null
58369+++ b/include/linux/gracl.h
58370@@ -0,0 +1,317 @@
58371+#ifndef GR_ACL_H
58372+#define GR_ACL_H
58373+
58374+#include <linux/grdefs.h>
58375+#include <linux/resource.h>
58376+#include <linux/capability.h>
58377+#include <linux/dcache.h>
58378+#include <asm/resource.h>
58379+
58380+/* Major status information */
58381+
58382+#define GR_VERSION "grsecurity 2.2.2"
58383+#define GRSECURITY_VERSION 0x2202
58384+
58385+enum {
58386+ GR_SHUTDOWN = 0,
58387+ GR_ENABLE = 1,
58388+ GR_SPROLE = 2,
58389+ GR_RELOAD = 3,
58390+ GR_SEGVMOD = 4,
58391+ GR_STATUS = 5,
58392+ GR_UNSPROLE = 6,
58393+ GR_PASSSET = 7,
58394+ GR_SPROLEPAM = 8,
58395+};
58396+
58397+/* Password setup definitions
58398+ * kernel/grhash.c */
58399+enum {
58400+ GR_PW_LEN = 128,
58401+ GR_SALT_LEN = 16,
58402+ GR_SHA_LEN = 32,
58403+};
58404+
58405+enum {
58406+ GR_SPROLE_LEN = 64,
58407+};
58408+
58409+enum {
58410+ GR_NO_GLOB = 0,
58411+ GR_REG_GLOB,
58412+ GR_CREATE_GLOB
58413+};
58414+
58415+#define GR_NLIMITS 32
58416+
58417+/* Begin Data Structures */
58418+
58419+struct sprole_pw {
58420+ unsigned char *rolename;
58421+ unsigned char salt[GR_SALT_LEN];
58422+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
58423+};
58424+
58425+struct name_entry {
58426+ __u32 key;
58427+ ino_t inode;
58428+ dev_t device;
58429+ char *name;
58430+ __u16 len;
58431+ __u8 deleted;
58432+ struct name_entry *prev;
58433+ struct name_entry *next;
58434+};
58435+
58436+struct inodev_entry {
58437+ struct name_entry *nentry;
58438+ struct inodev_entry *prev;
58439+ struct inodev_entry *next;
58440+};
58441+
58442+struct acl_role_db {
58443+ struct acl_role_label **r_hash;
58444+ __u32 r_size;
58445+};
58446+
58447+struct inodev_db {
58448+ struct inodev_entry **i_hash;
58449+ __u32 i_size;
58450+};
58451+
58452+struct name_db {
58453+ struct name_entry **n_hash;
58454+ __u32 n_size;
58455+};
58456+
58457+struct crash_uid {
58458+ uid_t uid;
58459+ unsigned long expires;
58460+};
58461+
58462+struct gr_hash_struct {
58463+ void **table;
58464+ void **nametable;
58465+ void *first;
58466+ __u32 table_size;
58467+ __u32 used_size;
58468+ int type;
58469+};
58470+
58471+/* Userspace Grsecurity ACL data structures */
58472+
58473+struct acl_subject_label {
58474+ char *filename;
58475+ ino_t inode;
58476+ dev_t device;
58477+ __u32 mode;
58478+ kernel_cap_t cap_mask;
58479+ kernel_cap_t cap_lower;
58480+ kernel_cap_t cap_invert_audit;
58481+
58482+ struct rlimit res[GR_NLIMITS];
58483+ __u32 resmask;
58484+
58485+ __u8 user_trans_type;
58486+ __u8 group_trans_type;
58487+ uid_t *user_transitions;
58488+ gid_t *group_transitions;
58489+ __u16 user_trans_num;
58490+ __u16 group_trans_num;
58491+
58492+ __u32 sock_families[2];
58493+ __u32 ip_proto[8];
58494+ __u32 ip_type;
58495+ struct acl_ip_label **ips;
58496+ __u32 ip_num;
58497+ __u32 inaddr_any_override;
58498+
58499+ __u32 crashes;
58500+ unsigned long expires;
58501+
58502+ struct acl_subject_label *parent_subject;
58503+ struct gr_hash_struct *hash;
58504+ struct acl_subject_label *prev;
58505+ struct acl_subject_label *next;
58506+
58507+ struct acl_object_label **obj_hash;
58508+ __u32 obj_hash_size;
58509+ __u16 pax_flags;
58510+};
58511+
58512+struct role_allowed_ip {
58513+ __u32 addr;
58514+ __u32 netmask;
58515+
58516+ struct role_allowed_ip *prev;
58517+ struct role_allowed_ip *next;
58518+};
58519+
58520+struct role_transition {
58521+ char *rolename;
58522+
58523+ struct role_transition *prev;
58524+ struct role_transition *next;
58525+};
58526+
58527+struct acl_role_label {
58528+ char *rolename;
58529+ uid_t uidgid;
58530+ __u16 roletype;
58531+
58532+ __u16 auth_attempts;
58533+ unsigned long expires;
58534+
58535+ struct acl_subject_label *root_label;
58536+ struct gr_hash_struct *hash;
58537+
58538+ struct acl_role_label *prev;
58539+ struct acl_role_label *next;
58540+
58541+ struct role_transition *transitions;
58542+ struct role_allowed_ip *allowed_ips;
58543+ uid_t *domain_children;
58544+ __u16 domain_child_num;
58545+
58546+ struct acl_subject_label **subj_hash;
58547+ __u32 subj_hash_size;
58548+};
58549+
58550+struct user_acl_role_db {
58551+ struct acl_role_label **r_table;
58552+ __u32 num_pointers; /* Number of allocations to track */
58553+ __u32 num_roles; /* Number of roles */
58554+ __u32 num_domain_children; /* Number of domain children */
58555+ __u32 num_subjects; /* Number of subjects */
58556+ __u32 num_objects; /* Number of objects */
58557+};
58558+
58559+struct acl_object_label {
58560+ char *filename;
58561+ ino_t inode;
58562+ dev_t device;
58563+ __u32 mode;
58564+
58565+ struct acl_subject_label *nested;
58566+ struct acl_object_label *globbed;
58567+
58568+ /* next two structures not used */
58569+
58570+ struct acl_object_label *prev;
58571+ struct acl_object_label *next;
58572+};
58573+
58574+struct acl_ip_label {
58575+ char *iface;
58576+ __u32 addr;
58577+ __u32 netmask;
58578+ __u16 low, high;
58579+ __u8 mode;
58580+ __u32 type;
58581+ __u32 proto[8];
58582+
58583+ /* next two structures not used */
58584+
58585+ struct acl_ip_label *prev;
58586+ struct acl_ip_label *next;
58587+};
58588+
58589+struct gr_arg {
58590+ struct user_acl_role_db role_db;
58591+ unsigned char pw[GR_PW_LEN];
58592+ unsigned char salt[GR_SALT_LEN];
58593+ unsigned char sum[GR_SHA_LEN];
58594+ unsigned char sp_role[GR_SPROLE_LEN];
58595+ struct sprole_pw *sprole_pws;
58596+ dev_t segv_device;
58597+ ino_t segv_inode;
58598+ uid_t segv_uid;
58599+ __u16 num_sprole_pws;
58600+ __u16 mode;
58601+};
58602+
58603+struct gr_arg_wrapper {
58604+ struct gr_arg *arg;
58605+ __u32 version;
58606+ __u32 size;
58607+};
58608+
58609+struct subject_map {
58610+ struct acl_subject_label *user;
58611+ struct acl_subject_label *kernel;
58612+ struct subject_map *prev;
58613+ struct subject_map *next;
58614+};
58615+
58616+struct acl_subj_map_db {
58617+ struct subject_map **s_hash;
58618+ __u32 s_size;
58619+};
58620+
58621+/* End Data Structures Section */
58622+
58623+/* Hash functions generated by empirical testing by Brad Spengler
58624+ Makes good use of the low bits of the inode. Generally 0-1 times
58625+ in loop for successful match. 0-3 for unsuccessful match.
58626+ Shift/add algorithm with modulus of table size and an XOR*/
58627+
58628+static __inline__ unsigned int
58629+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
58630+{
58631+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
58632+}
58633+
58634+ static __inline__ unsigned int
58635+shash(const struct acl_subject_label *userp, const unsigned int sz)
58636+{
58637+ return ((const unsigned long)userp % sz);
58638+}
58639+
58640+static __inline__ unsigned int
58641+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
58642+{
58643+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
58644+}
58645+
58646+static __inline__ unsigned int
58647+nhash(const char *name, const __u16 len, const unsigned int sz)
58648+{
58649+ return full_name_hash((const unsigned char *)name, len) % sz;
58650+}
58651+
58652+#define FOR_EACH_ROLE_START(role) \
58653+ role = role_list; \
58654+ while (role) {
58655+
58656+#define FOR_EACH_ROLE_END(role) \
58657+ role = role->prev; \
58658+ }
58659+
58660+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
58661+ subj = NULL; \
58662+ iter = 0; \
58663+ while (iter < role->subj_hash_size) { \
58664+ if (subj == NULL) \
58665+ subj = role->subj_hash[iter]; \
58666+ if (subj == NULL) { \
58667+ iter++; \
58668+ continue; \
58669+ }
58670+
58671+#define FOR_EACH_SUBJECT_END(subj,iter) \
58672+ subj = subj->next; \
58673+ if (subj == NULL) \
58674+ iter++; \
58675+ }
58676+
58677+
58678+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
58679+ subj = role->hash->first; \
58680+ while (subj != NULL) {
58681+
58682+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
58683+ subj = subj->next; \
58684+ }
58685+
58686+#endif
58687+
58688diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
58689new file mode 100644
58690index 0000000..323ecf2
58691--- /dev/null
58692+++ b/include/linux/gralloc.h
58693@@ -0,0 +1,9 @@
58694+#ifndef __GRALLOC_H
58695+#define __GRALLOC_H
58696+
58697+void acl_free_all(void);
58698+int acl_alloc_stack_init(unsigned long size);
58699+void *acl_alloc(unsigned long len);
58700+void *acl_alloc_num(unsigned long num, unsigned long len);
58701+
58702+#endif
58703diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
58704new file mode 100644
58705index 0000000..b30e9bc
58706--- /dev/null
58707+++ b/include/linux/grdefs.h
58708@@ -0,0 +1,140 @@
58709+#ifndef GRDEFS_H
58710+#define GRDEFS_H
58711+
58712+/* Begin grsecurity status declarations */
58713+
58714+enum {
58715+ GR_READY = 0x01,
58716+ GR_STATUS_INIT = 0x00 // disabled state
58717+};
58718+
58719+/* Begin ACL declarations */
58720+
58721+/* Role flags */
58722+
58723+enum {
58724+ GR_ROLE_USER = 0x0001,
58725+ GR_ROLE_GROUP = 0x0002,
58726+ GR_ROLE_DEFAULT = 0x0004,
58727+ GR_ROLE_SPECIAL = 0x0008,
58728+ GR_ROLE_AUTH = 0x0010,
58729+ GR_ROLE_NOPW = 0x0020,
58730+ GR_ROLE_GOD = 0x0040,
58731+ GR_ROLE_LEARN = 0x0080,
58732+ GR_ROLE_TPE = 0x0100,
58733+ GR_ROLE_DOMAIN = 0x0200,
58734+ GR_ROLE_PAM = 0x0400,
58735+ GR_ROLE_PERSIST = 0x0800
58736+};
58737+
58738+/* ACL Subject and Object mode flags */
58739+enum {
58740+ GR_DELETED = 0x80000000
58741+};
58742+
58743+/* ACL Object-only mode flags */
58744+enum {
58745+ GR_READ = 0x00000001,
58746+ GR_APPEND = 0x00000002,
58747+ GR_WRITE = 0x00000004,
58748+ GR_EXEC = 0x00000008,
58749+ GR_FIND = 0x00000010,
58750+ GR_INHERIT = 0x00000020,
58751+ GR_SETID = 0x00000040,
58752+ GR_CREATE = 0x00000080,
58753+ GR_DELETE = 0x00000100,
58754+ GR_LINK = 0x00000200,
58755+ GR_AUDIT_READ = 0x00000400,
58756+ GR_AUDIT_APPEND = 0x00000800,
58757+ GR_AUDIT_WRITE = 0x00001000,
58758+ GR_AUDIT_EXEC = 0x00002000,
58759+ GR_AUDIT_FIND = 0x00004000,
58760+ GR_AUDIT_INHERIT= 0x00008000,
58761+ GR_AUDIT_SETID = 0x00010000,
58762+ GR_AUDIT_CREATE = 0x00020000,
58763+ GR_AUDIT_DELETE = 0x00040000,
58764+ GR_AUDIT_LINK = 0x00080000,
58765+ GR_PTRACERD = 0x00100000,
58766+ GR_NOPTRACE = 0x00200000,
58767+ GR_SUPPRESS = 0x00400000,
58768+ GR_NOLEARN = 0x00800000,
58769+ GR_INIT_TRANSFER= 0x01000000
58770+};
58771+
58772+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
58773+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
58774+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
58775+
58776+/* ACL subject-only mode flags */
58777+enum {
58778+ GR_KILL = 0x00000001,
58779+ GR_VIEW = 0x00000002,
58780+ GR_PROTECTED = 0x00000004,
58781+ GR_LEARN = 0x00000008,
58782+ GR_OVERRIDE = 0x00000010,
58783+ /* just a placeholder, this mode is only used in userspace */
58784+ GR_DUMMY = 0x00000020,
58785+ GR_PROTSHM = 0x00000040,
58786+ GR_KILLPROC = 0x00000080,
58787+ GR_KILLIPPROC = 0x00000100,
58788+ /* just a placeholder, this mode is only used in userspace */
58789+ GR_NOTROJAN = 0x00000200,
58790+ GR_PROTPROCFD = 0x00000400,
58791+ GR_PROCACCT = 0x00000800,
58792+ GR_RELAXPTRACE = 0x00001000,
58793+ GR_NESTED = 0x00002000,
58794+ GR_INHERITLEARN = 0x00004000,
58795+ GR_PROCFIND = 0x00008000,
58796+ GR_POVERRIDE = 0x00010000,
58797+ GR_KERNELAUTH = 0x00020000,
58798+ GR_ATSECURE = 0x00040000,
58799+ GR_SHMEXEC = 0x00080000
58800+};
58801+
58802+enum {
58803+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
58804+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
58805+ GR_PAX_ENABLE_MPROTECT = 0x0004,
58806+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
58807+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
58808+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
58809+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
58810+ GR_PAX_DISABLE_MPROTECT = 0x0400,
58811+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
58812+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
58813+};
58814+
58815+enum {
58816+ GR_ID_USER = 0x01,
58817+ GR_ID_GROUP = 0x02,
58818+};
58819+
58820+enum {
58821+ GR_ID_ALLOW = 0x01,
58822+ GR_ID_DENY = 0x02,
58823+};
58824+
58825+#define GR_CRASH_RES 31
58826+#define GR_UIDTABLE_MAX 500
58827+
58828+/* begin resource learning section */
58829+enum {
58830+ GR_RLIM_CPU_BUMP = 60,
58831+ GR_RLIM_FSIZE_BUMP = 50000,
58832+ GR_RLIM_DATA_BUMP = 10000,
58833+ GR_RLIM_STACK_BUMP = 1000,
58834+ GR_RLIM_CORE_BUMP = 10000,
58835+ GR_RLIM_RSS_BUMP = 500000,
58836+ GR_RLIM_NPROC_BUMP = 1,
58837+ GR_RLIM_NOFILE_BUMP = 5,
58838+ GR_RLIM_MEMLOCK_BUMP = 50000,
58839+ GR_RLIM_AS_BUMP = 500000,
58840+ GR_RLIM_LOCKS_BUMP = 2,
58841+ GR_RLIM_SIGPENDING_BUMP = 5,
58842+ GR_RLIM_MSGQUEUE_BUMP = 10000,
58843+ GR_RLIM_NICE_BUMP = 1,
58844+ GR_RLIM_RTPRIO_BUMP = 1,
58845+ GR_RLIM_RTTIME_BUMP = 1000000
58846+};
58847+
58848+#endif
58849diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
58850new file mode 100644
58851index 0000000..da390f1
58852--- /dev/null
58853+++ b/include/linux/grinternal.h
58854@@ -0,0 +1,221 @@
58855+#ifndef __GRINTERNAL_H
58856+#define __GRINTERNAL_H
58857+
58858+#ifdef CONFIG_GRKERNSEC
58859+
58860+#include <linux/fs.h>
58861+#include <linux/mnt_namespace.h>
58862+#include <linux/nsproxy.h>
58863+#include <linux/gracl.h>
58864+#include <linux/grdefs.h>
58865+#include <linux/grmsg.h>
58866+
58867+void gr_add_learn_entry(const char *fmt, ...)
58868+ __attribute__ ((format (printf, 1, 2)));
58869+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
58870+ const struct vfsmount *mnt);
58871+__u32 gr_check_create(const struct dentry *new_dentry,
58872+ const struct dentry *parent,
58873+ const struct vfsmount *mnt, const __u32 mode);
58874+int gr_check_protected_task(const struct task_struct *task);
58875+__u32 to_gr_audit(const __u32 reqmode);
58876+int gr_set_acls(const int type);
58877+int gr_apply_subject_to_task(struct task_struct *task);
58878+int gr_acl_is_enabled(void);
58879+char gr_roletype_to_char(void);
58880+
58881+void gr_handle_alertkill(struct task_struct *task);
58882+char *gr_to_filename(const struct dentry *dentry,
58883+ const struct vfsmount *mnt);
58884+char *gr_to_filename1(const struct dentry *dentry,
58885+ const struct vfsmount *mnt);
58886+char *gr_to_filename2(const struct dentry *dentry,
58887+ const struct vfsmount *mnt);
58888+char *gr_to_filename3(const struct dentry *dentry,
58889+ const struct vfsmount *mnt);
58890+
58891+extern int grsec_enable_ptrace_readexec;
58892+extern int grsec_enable_harden_ptrace;
58893+extern int grsec_enable_link;
58894+extern int grsec_enable_fifo;
58895+extern int grsec_enable_execve;
58896+extern int grsec_enable_shm;
58897+extern int grsec_enable_execlog;
58898+extern int grsec_enable_signal;
58899+extern int grsec_enable_audit_ptrace;
58900+extern int grsec_enable_forkfail;
58901+extern int grsec_enable_time;
58902+extern int grsec_enable_rofs;
58903+extern int grsec_enable_chroot_shmat;
58904+extern int grsec_enable_chroot_mount;
58905+extern int grsec_enable_chroot_double;
58906+extern int grsec_enable_chroot_pivot;
58907+extern int grsec_enable_chroot_chdir;
58908+extern int grsec_enable_chroot_chmod;
58909+extern int grsec_enable_chroot_mknod;
58910+extern int grsec_enable_chroot_fchdir;
58911+extern int grsec_enable_chroot_nice;
58912+extern int grsec_enable_chroot_execlog;
58913+extern int grsec_enable_chroot_caps;
58914+extern int grsec_enable_chroot_sysctl;
58915+extern int grsec_enable_chroot_unix;
58916+extern int grsec_enable_tpe;
58917+extern int grsec_tpe_gid;
58918+extern int grsec_enable_tpe_all;
58919+extern int grsec_enable_tpe_invert;
58920+extern int grsec_enable_socket_all;
58921+extern int grsec_socket_all_gid;
58922+extern int grsec_enable_socket_client;
58923+extern int grsec_socket_client_gid;
58924+extern int grsec_enable_socket_server;
58925+extern int grsec_socket_server_gid;
58926+extern int grsec_audit_gid;
58927+extern int grsec_enable_group;
58928+extern int grsec_enable_audit_textrel;
58929+extern int grsec_enable_log_rwxmaps;
58930+extern int grsec_enable_mount;
58931+extern int grsec_enable_chdir;
58932+extern int grsec_resource_logging;
58933+extern int grsec_enable_blackhole;
58934+extern int grsec_lastack_retries;
58935+extern int grsec_enable_brute;
58936+extern int grsec_lock;
58937+
58938+extern spinlock_t grsec_alert_lock;
58939+extern unsigned long grsec_alert_wtime;
58940+extern unsigned long grsec_alert_fyet;
58941+
58942+extern spinlock_t grsec_audit_lock;
58943+
58944+extern rwlock_t grsec_exec_file_lock;
58945+
58946+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
58947+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
58948+ (tsk)->exec_file->f_vfsmnt) : "/")
58949+
58950+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
58951+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
58952+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58953+
58954+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
58955+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
58956+ (tsk)->exec_file->f_vfsmnt) : "/")
58957+
58958+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
58959+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
58960+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58961+
58962+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
58963+
58964+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
58965+
58966+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
58967+ (task)->pid, (cred)->uid, \
58968+ (cred)->euid, (cred)->gid, (cred)->egid, \
58969+ gr_parent_task_fullpath(task), \
58970+ (task)->real_parent->comm, (task)->real_parent->pid, \
58971+ (pcred)->uid, (pcred)->euid, \
58972+ (pcred)->gid, (pcred)->egid
58973+
58974+#define GR_CHROOT_CAPS {{ \
58975+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
58976+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
58977+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
58978+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
58979+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
58980+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
58981+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
58982+
58983+#define security_learn(normal_msg,args...) \
58984+({ \
58985+ read_lock(&grsec_exec_file_lock); \
58986+ gr_add_learn_entry(normal_msg "\n", ## args); \
58987+ read_unlock(&grsec_exec_file_lock); \
58988+})
58989+
58990+enum {
58991+ GR_DO_AUDIT,
58992+ GR_DONT_AUDIT,
58993+ /* used for non-audit messages that we shouldn't kill the task on */
58994+ GR_DONT_AUDIT_GOOD
58995+};
58996+
58997+enum {
58998+ GR_TTYSNIFF,
58999+ GR_RBAC,
59000+ GR_RBAC_STR,
59001+ GR_STR_RBAC,
59002+ GR_RBAC_MODE2,
59003+ GR_RBAC_MODE3,
59004+ GR_FILENAME,
59005+ GR_SYSCTL_HIDDEN,
59006+ GR_NOARGS,
59007+ GR_ONE_INT,
59008+ GR_ONE_INT_TWO_STR,
59009+ GR_ONE_STR,
59010+ GR_STR_INT,
59011+ GR_TWO_STR_INT,
59012+ GR_TWO_INT,
59013+ GR_TWO_U64,
59014+ GR_THREE_INT,
59015+ GR_FIVE_INT_TWO_STR,
59016+ GR_TWO_STR,
59017+ GR_THREE_STR,
59018+ GR_FOUR_STR,
59019+ GR_STR_FILENAME,
59020+ GR_FILENAME_STR,
59021+ GR_FILENAME_TWO_INT,
59022+ GR_FILENAME_TWO_INT_STR,
59023+ GR_TEXTREL,
59024+ GR_PTRACE,
59025+ GR_RESOURCE,
59026+ GR_CAP,
59027+ GR_SIG,
59028+ GR_SIG2,
59029+ GR_CRASH1,
59030+ GR_CRASH2,
59031+ GR_PSACCT,
59032+ GR_RWXMAP
59033+};
59034+
59035+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
59036+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
59037+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
59038+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
59039+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
59040+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
59041+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
59042+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
59043+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
59044+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
59045+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
59046+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
59047+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
59048+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
59049+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
59050+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
59051+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
59052+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
59053+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
59054+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
59055+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
59056+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
59057+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
59058+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
59059+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
59060+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
59061+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
59062+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
59063+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
59064+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
59065+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
59066+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
59067+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
59068+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
59069+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
59070+
59071+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
59072+
59073+#endif
59074+
59075+#endif
59076diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
59077new file mode 100644
59078index 0000000..b3347e2
59079--- /dev/null
59080+++ b/include/linux/grmsg.h
59081@@ -0,0 +1,109 @@
59082+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
59083+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
59084+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
59085+#define GR_STOPMOD_MSG "denied modification of module state by "
59086+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
59087+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
59088+#define GR_IOPERM_MSG "denied use of ioperm() by "
59089+#define GR_IOPL_MSG "denied use of iopl() by "
59090+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
59091+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
59092+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
59093+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
59094+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
59095+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
59096+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
59097+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
59098+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
59099+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
59100+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
59101+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
59102+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
59103+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
59104+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
59105+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
59106+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
59107+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
59108+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
59109+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
59110+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
59111+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
59112+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
59113+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
59114+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
59115+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
59116+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.64s) of %.950s by "
59117+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
59118+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
59119+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
59120+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
59121+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
59122+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
59123+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
59124+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
59125+#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
59126+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
59127+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
59128+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
59129+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
59130+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
59131+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
59132+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
59133+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
59134+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
59135+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
59136+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
59137+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
59138+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
59139+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
59140+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
59141+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
59142+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
59143+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
59144+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
59145+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
59146+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
59147+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
59148+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
59149+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
59150+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
59151+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
59152+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
59153+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
59154+#define GR_FAILFORK_MSG "failed fork with errno %s by "
59155+#define GR_NICE_CHROOT_MSG "denied priority change by "
59156+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
59157+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
59158+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
59159+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
59160+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
59161+#define GR_TIME_MSG "time set by "
59162+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
59163+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
59164+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
59165+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
59166+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
59167+#define GR_BIND_MSG "denied bind() by "
59168+#define GR_CONNECT_MSG "denied connect() by "
59169+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
59170+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
59171+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
59172+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
59173+#define GR_CAP_ACL_MSG "use of %s denied for "
59174+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
59175+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
59176+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
59177+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
59178+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
59179+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
59180+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
59181+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
59182+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
59183+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
59184+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
59185+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
59186+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
59187+#define GR_VM86_MSG "denied use of vm86 by "
59188+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
59189+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
59190+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
59191diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
59192new file mode 100644
59193index 0000000..eb4885f
59194--- /dev/null
59195+++ b/include/linux/grsecurity.h
59196@@ -0,0 +1,233 @@
59197+#ifndef GR_SECURITY_H
59198+#define GR_SECURITY_H
59199+#include <linux/fs.h>
59200+#include <linux/fs_struct.h>
59201+#include <linux/binfmts.h>
59202+#include <linux/gracl.h>
59203+
59204+/* notify of brain-dead configs */
59205+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59206+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
59207+#endif
59208+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
59209+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
59210+#endif
59211+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
59212+#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
59213+#endif
59214+#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
59215+#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
59216+#endif
59217+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
59218+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
59219+#endif
59220+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
59221+#error "CONFIG_PAX enabled, but no PaX options are enabled."
59222+#endif
59223+
59224+#include <linux/compat.h>
59225+
59226+struct user_arg_ptr {
59227+#ifdef CONFIG_COMPAT
59228+ bool is_compat;
59229+#endif
59230+ union {
59231+ const char __user *const __user *native;
59232+#ifdef CONFIG_COMPAT
59233+ compat_uptr_t __user *compat;
59234+#endif
59235+ } ptr;
59236+};
59237+
59238+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
59239+void gr_handle_brute_check(void);
59240+void gr_handle_kernel_exploit(void);
59241+int gr_process_user_ban(void);
59242+
59243+char gr_roletype_to_char(void);
59244+
59245+int gr_acl_enable_at_secure(void);
59246+
59247+int gr_check_user_change(int real, int effective, int fs);
59248+int gr_check_group_change(int real, int effective, int fs);
59249+
59250+void gr_del_task_from_ip_table(struct task_struct *p);
59251+
59252+int gr_pid_is_chrooted(struct task_struct *p);
59253+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
59254+int gr_handle_chroot_nice(void);
59255+int gr_handle_chroot_sysctl(const int op);
59256+int gr_handle_chroot_setpriority(struct task_struct *p,
59257+ const int niceval);
59258+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
59259+int gr_handle_chroot_chroot(const struct dentry *dentry,
59260+ const struct vfsmount *mnt);
59261+void gr_handle_chroot_chdir(struct path *path);
59262+int gr_handle_chroot_chmod(const struct dentry *dentry,
59263+ const struct vfsmount *mnt, const int mode);
59264+int gr_handle_chroot_mknod(const struct dentry *dentry,
59265+ const struct vfsmount *mnt, const int mode);
59266+int gr_handle_chroot_mount(const struct dentry *dentry,
59267+ const struct vfsmount *mnt,
59268+ const char *dev_name);
59269+int gr_handle_chroot_pivot(void);
59270+int gr_handle_chroot_unix(const pid_t pid);
59271+
59272+int gr_handle_rawio(const struct inode *inode);
59273+
59274+void gr_handle_ioperm(void);
59275+void gr_handle_iopl(void);
59276+
59277+int gr_tpe_allow(const struct file *file);
59278+
59279+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
59280+void gr_clear_chroot_entries(struct task_struct *task);
59281+
59282+void gr_log_forkfail(const int retval);
59283+void gr_log_timechange(void);
59284+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
59285+void gr_log_chdir(const struct dentry *dentry,
59286+ const struct vfsmount *mnt);
59287+void gr_log_chroot_exec(const struct dentry *dentry,
59288+ const struct vfsmount *mnt);
59289+void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
59290+void gr_log_remount(const char *devname, const int retval);
59291+void gr_log_unmount(const char *devname, const int retval);
59292+void gr_log_mount(const char *from, const char *to, const int retval);
59293+void gr_log_textrel(struct vm_area_struct *vma);
59294+void gr_log_rwxmmap(struct file *file);
59295+void gr_log_rwxmprotect(struct file *file);
59296+
59297+int gr_handle_follow_link(const struct inode *parent,
59298+ const struct inode *inode,
59299+ const struct dentry *dentry,
59300+ const struct vfsmount *mnt);
59301+int gr_handle_fifo(const struct dentry *dentry,
59302+ const struct vfsmount *mnt,
59303+ const struct dentry *dir, const int flag,
59304+ const int acc_mode);
59305+int gr_handle_hardlink(const struct dentry *dentry,
59306+ const struct vfsmount *mnt,
59307+ struct inode *inode,
59308+ const int mode, const char *to);
59309+
59310+int gr_is_capable(const int cap);
59311+int gr_is_capable_nolog(const int cap);
59312+void gr_learn_resource(const struct task_struct *task, const int limit,
59313+ const unsigned long wanted, const int gt);
59314+void gr_copy_label(struct task_struct *tsk);
59315+void gr_handle_crash(struct task_struct *task, const int sig);
59316+int gr_handle_signal(const struct task_struct *p, const int sig);
59317+int gr_check_crash_uid(const uid_t uid);
59318+int gr_check_protected_task(const struct task_struct *task);
59319+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
59320+int gr_acl_handle_mmap(const struct file *file,
59321+ const unsigned long prot);
59322+int gr_acl_handle_mprotect(const struct file *file,
59323+ const unsigned long prot);
59324+int gr_check_hidden_task(const struct task_struct *tsk);
59325+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
59326+ const struct vfsmount *mnt);
59327+__u32 gr_acl_handle_utime(const struct dentry *dentry,
59328+ const struct vfsmount *mnt);
59329+__u32 gr_acl_handle_access(const struct dentry *dentry,
59330+ const struct vfsmount *mnt, const int fmode);
59331+__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
59332+ const struct vfsmount *mnt, mode_t mode);
59333+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
59334+ const struct vfsmount *mnt, mode_t mode);
59335+__u32 gr_acl_handle_chown(const struct dentry *dentry,
59336+ const struct vfsmount *mnt);
59337+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
59338+ const struct vfsmount *mnt);
59339+int gr_handle_ptrace(struct task_struct *task, const long request);
59340+int gr_handle_proc_ptrace(struct task_struct *task);
59341+__u32 gr_acl_handle_execve(const struct dentry *dentry,
59342+ const struct vfsmount *mnt);
59343+int gr_check_crash_exec(const struct file *filp);
59344+int gr_acl_is_enabled(void);
59345+void gr_set_kernel_label(struct task_struct *task);
59346+void gr_set_role_label(struct task_struct *task, const uid_t uid,
59347+ const gid_t gid);
59348+int gr_set_proc_label(const struct dentry *dentry,
59349+ const struct vfsmount *mnt,
59350+ const int unsafe_flags);
59351+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
59352+ const struct vfsmount *mnt);
59353+__u32 gr_acl_handle_open(const struct dentry *dentry,
59354+ const struct vfsmount *mnt, int acc_mode);
59355+__u32 gr_acl_handle_creat(const struct dentry *dentry,
59356+ const struct dentry *p_dentry,
59357+ const struct vfsmount *p_mnt,
59358+ int open_flags, int acc_mode, const int imode);
59359+void gr_handle_create(const struct dentry *dentry,
59360+ const struct vfsmount *mnt);
59361+void gr_handle_proc_create(const struct dentry *dentry,
59362+ const struct inode *inode);
59363+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
59364+ const struct dentry *parent_dentry,
59365+ const struct vfsmount *parent_mnt,
59366+ const int mode);
59367+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
59368+ const struct dentry *parent_dentry,
59369+ const struct vfsmount *parent_mnt);
59370+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
59371+ const struct vfsmount *mnt);
59372+void gr_handle_delete(const ino_t ino, const dev_t dev);
59373+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
59374+ const struct vfsmount *mnt);
59375+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
59376+ const struct dentry *parent_dentry,
59377+ const struct vfsmount *parent_mnt,
59378+ const char *from);
59379+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
59380+ const struct dentry *parent_dentry,
59381+ const struct vfsmount *parent_mnt,
59382+ const struct dentry *old_dentry,
59383+ const struct vfsmount *old_mnt, const char *to);
59384+int gr_acl_handle_rename(struct dentry *new_dentry,
59385+ struct dentry *parent_dentry,
59386+ const struct vfsmount *parent_mnt,
59387+ struct dentry *old_dentry,
59388+ struct inode *old_parent_inode,
59389+ struct vfsmount *old_mnt, const char *newname);
59390+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
59391+ struct dentry *old_dentry,
59392+ struct dentry *new_dentry,
59393+ struct vfsmount *mnt, const __u8 replace);
59394+__u32 gr_check_link(const struct dentry *new_dentry,
59395+ const struct dentry *parent_dentry,
59396+ const struct vfsmount *parent_mnt,
59397+ const struct dentry *old_dentry,
59398+ const struct vfsmount *old_mnt);
59399+int gr_acl_handle_filldir(const struct file *file, const char *name,
59400+ const unsigned int namelen, const ino_t ino);
59401+
59402+__u32 gr_acl_handle_unix(const struct dentry *dentry,
59403+ const struct vfsmount *mnt);
59404+void gr_acl_handle_exit(void);
59405+void gr_acl_handle_psacct(struct task_struct *task, const long code);
59406+int gr_acl_handle_procpidmem(const struct task_struct *task);
59407+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
59408+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
59409+void gr_audit_ptrace(struct task_struct *task);
59410+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
59411+
59412+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
59413+
59414+#ifdef CONFIG_GRKERNSEC
59415+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
59416+void gr_handle_vm86(void);
59417+void gr_handle_mem_readwrite(u64 from, u64 to);
59418+
59419+extern int grsec_enable_dmesg;
59420+extern int grsec_disable_privio;
59421+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59422+extern int grsec_enable_chroot_findtask;
59423+#endif
59424+#ifdef CONFIG_GRKERNSEC_SETXID
59425+extern int grsec_enable_setxid;
59426+#endif
59427+#endif
59428+
59429+#endif
59430diff --git a/include/linux/grsock.h b/include/linux/grsock.h
59431new file mode 100644
59432index 0000000..e7ffaaf
59433--- /dev/null
59434+++ b/include/linux/grsock.h
59435@@ -0,0 +1,19 @@
59436+#ifndef __GRSOCK_H
59437+#define __GRSOCK_H
59438+
59439+extern void gr_attach_curr_ip(const struct sock *sk);
59440+extern int gr_handle_sock_all(const int family, const int type,
59441+ const int protocol);
59442+extern int gr_handle_sock_server(const struct sockaddr *sck);
59443+extern int gr_handle_sock_server_other(const struct sock *sck);
59444+extern int gr_handle_sock_client(const struct sockaddr *sck);
59445+extern int gr_search_connect(struct socket * sock,
59446+ struct sockaddr_in * addr);
59447+extern int gr_search_bind(struct socket * sock,
59448+ struct sockaddr_in * addr);
59449+extern int gr_search_listen(struct socket * sock);
59450+extern int gr_search_accept(struct socket * sock);
59451+extern int gr_search_socket(const int domain, const int type,
59452+ const int protocol);
59453+
59454+#endif
59455diff --git a/include/linux/hid.h b/include/linux/hid.h
59456index c235e4e..f0cf7a0 100644
59457--- a/include/linux/hid.h
59458+++ b/include/linux/hid.h
59459@@ -679,7 +679,7 @@ struct hid_ll_driver {
59460 unsigned int code, int value);
59461
59462 int (*parse)(struct hid_device *hdev);
59463-};
59464+} __no_const;
59465
59466 #define PM_HINT_FULLON 1<<5
59467 #define PM_HINT_NORMAL 1<<1
59468diff --git a/include/linux/highmem.h b/include/linux/highmem.h
59469index 3a93f73..b19d0b3 100644
59470--- a/include/linux/highmem.h
59471+++ b/include/linux/highmem.h
59472@@ -185,6 +185,18 @@ static inline void clear_highpage(struct page *page)
59473 kunmap_atomic(kaddr, KM_USER0);
59474 }
59475
59476+static inline void sanitize_highpage(struct page *page)
59477+{
59478+ void *kaddr;
59479+ unsigned long flags;
59480+
59481+ local_irq_save(flags);
59482+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
59483+ clear_page(kaddr);
59484+ kunmap_atomic(kaddr, KM_CLEARPAGE);
59485+ local_irq_restore(flags);
59486+}
59487+
59488 static inline void zero_user_segments(struct page *page,
59489 unsigned start1, unsigned end1,
59490 unsigned start2, unsigned end2)
59491diff --git a/include/linux/i2c.h b/include/linux/i2c.h
59492index 07d103a..04ec65b 100644
59493--- a/include/linux/i2c.h
59494+++ b/include/linux/i2c.h
59495@@ -364,6 +364,7 @@ struct i2c_algorithm {
59496 /* To determine what the adapter supports */
59497 u32 (*functionality) (struct i2c_adapter *);
59498 };
59499+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
59500
59501 /*
59502 * i2c_adapter is the structure used to identify a physical i2c bus along
59503diff --git a/include/linux/i2o.h b/include/linux/i2o.h
59504index a6deef4..c56a7f2 100644
59505--- a/include/linux/i2o.h
59506+++ b/include/linux/i2o.h
59507@@ -564,7 +564,7 @@ struct i2o_controller {
59508 struct i2o_device *exec; /* Executive */
59509 #if BITS_PER_LONG == 64
59510 spinlock_t context_list_lock; /* lock for context_list */
59511- atomic_t context_list_counter; /* needed for unique contexts */
59512+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
59513 struct list_head context_list; /* list of context id's
59514 and pointers */
59515 #endif
59516diff --git a/include/linux/init.h b/include/linux/init.h
59517index 9146f39..885354d 100644
59518--- a/include/linux/init.h
59519+++ b/include/linux/init.h
59520@@ -293,13 +293,13 @@ void __init parse_early_options(char *cmdline);
59521
59522 /* Each module must use one module_init(). */
59523 #define module_init(initfn) \
59524- static inline initcall_t __inittest(void) \
59525+ static inline __used initcall_t __inittest(void) \
59526 { return initfn; } \
59527 int init_module(void) __attribute__((alias(#initfn)));
59528
59529 /* This is only required if you want to be unloadable. */
59530 #define module_exit(exitfn) \
59531- static inline exitcall_t __exittest(void) \
59532+ static inline __used exitcall_t __exittest(void) \
59533 { return exitfn; } \
59534 void cleanup_module(void) __attribute__((alias(#exitfn)));
59535
59536diff --git a/include/linux/init_task.h b/include/linux/init_task.h
59537index 32574ee..00d4ef1 100644
59538--- a/include/linux/init_task.h
59539+++ b/include/linux/init_task.h
59540@@ -128,6 +128,12 @@ extern struct cred init_cred;
59541
59542 #define INIT_TASK_COMM "swapper"
59543
59544+#ifdef CONFIG_X86
59545+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
59546+#else
59547+#define INIT_TASK_THREAD_INFO
59548+#endif
59549+
59550 /*
59551 * INIT_TASK is used to set up the first task table, touch at
59552 * your own risk!. Base=0, limit=0x1fffff (=2MB)
59553@@ -166,6 +172,7 @@ extern struct cred init_cred;
59554 RCU_INIT_POINTER(.cred, &init_cred), \
59555 .comm = INIT_TASK_COMM, \
59556 .thread = INIT_THREAD, \
59557+ INIT_TASK_THREAD_INFO \
59558 .fs = &init_fs, \
59559 .files = &init_files, \
59560 .signal = &init_signals, \
59561diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
59562index e6ca56d..8583707 100644
59563--- a/include/linux/intel-iommu.h
59564+++ b/include/linux/intel-iommu.h
59565@@ -296,7 +296,7 @@ struct iommu_flush {
59566 u8 fm, u64 type);
59567 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
59568 unsigned int size_order, u64 type);
59569-};
59570+} __no_const;
59571
59572 enum {
59573 SR_DMAR_FECTL_REG,
59574diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
59575index a64b00e..464d8bc 100644
59576--- a/include/linux/interrupt.h
59577+++ b/include/linux/interrupt.h
59578@@ -441,7 +441,7 @@ enum
59579 /* map softirq index to softirq name. update 'softirq_to_name' in
59580 * kernel/softirq.c when adding a new softirq.
59581 */
59582-extern char *softirq_to_name[NR_SOFTIRQS];
59583+extern const char * const softirq_to_name[NR_SOFTIRQS];
59584
59585 /* softirq mask and active fields moved to irq_cpustat_t in
59586 * asm/hardirq.h to get better cache usage. KAO
59587@@ -449,12 +449,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
59588
59589 struct softirq_action
59590 {
59591- void (*action)(struct softirq_action *);
59592+ void (*action)(void);
59593 };
59594
59595 asmlinkage void do_softirq(void);
59596 asmlinkage void __do_softirq(void);
59597-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
59598+extern void open_softirq(int nr, void (*action)(void));
59599 extern void softirq_init(void);
59600 static inline void __raise_softirq_irqoff(unsigned int nr)
59601 {
59602diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
59603index 3875719..4cd454c 100644
59604--- a/include/linux/kallsyms.h
59605+++ b/include/linux/kallsyms.h
59606@@ -15,7 +15,8 @@
59607
59608 struct module;
59609
59610-#ifdef CONFIG_KALLSYMS
59611+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
59612+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59613 /* Lookup the address for a symbol. Returns 0 if not found. */
59614 unsigned long kallsyms_lookup_name(const char *name);
59615
59616@@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
59617 /* Stupid that this does nothing, but I didn't create this mess. */
59618 #define __print_symbol(fmt, addr)
59619 #endif /*CONFIG_KALLSYMS*/
59620+#else /* when included by kallsyms.c, vsnprintf.c, or
59621+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
59622+extern void __print_symbol(const char *fmt, unsigned long address);
59623+extern int sprint_backtrace(char *buffer, unsigned long address);
59624+extern int sprint_symbol(char *buffer, unsigned long address);
59625+const char *kallsyms_lookup(unsigned long addr,
59626+ unsigned long *symbolsize,
59627+ unsigned long *offset,
59628+ char **modname, char *namebuf);
59629+#endif
59630
59631 /* This macro allows us to keep printk typechecking */
59632 static __printf(1, 2)
59633diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
59634index fa39183..40160be 100644
59635--- a/include/linux/kgdb.h
59636+++ b/include/linux/kgdb.h
59637@@ -53,7 +53,7 @@ extern int kgdb_connected;
59638 extern int kgdb_io_module_registered;
59639
59640 extern atomic_t kgdb_setting_breakpoint;
59641-extern atomic_t kgdb_cpu_doing_single_step;
59642+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
59643
59644 extern struct task_struct *kgdb_usethread;
59645 extern struct task_struct *kgdb_contthread;
59646@@ -251,7 +251,7 @@ struct kgdb_arch {
59647 void (*disable_hw_break)(struct pt_regs *regs);
59648 void (*remove_all_hw_break)(void);
59649 void (*correct_hw_break)(void);
59650-};
59651+} __do_const;
59652
59653 /**
59654 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
59655@@ -276,7 +276,7 @@ struct kgdb_io {
59656 void (*pre_exception) (void);
59657 void (*post_exception) (void);
59658 int is_console;
59659-};
59660+} __do_const;
59661
59662 extern struct kgdb_arch arch_kgdb_ops;
59663
59664diff --git a/include/linux/kmod.h b/include/linux/kmod.h
59665index b16f653..eb908f4 100644
59666--- a/include/linux/kmod.h
59667+++ b/include/linux/kmod.h
59668@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
59669 * usually useless though. */
59670 extern __printf(2, 3)
59671 int __request_module(bool wait, const char *name, ...);
59672+extern __printf(3, 4)
59673+int ___request_module(bool wait, char *param_name, const char *name, ...);
59674 #define request_module(mod...) __request_module(true, mod)
59675 #define request_module_nowait(mod...) __request_module(false, mod)
59676 #define try_then_request_module(x, mod...) \
59677diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
59678index d526231..086e89b 100644
59679--- a/include/linux/kvm_host.h
59680+++ b/include/linux/kvm_host.h
59681@@ -308,7 +308,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
59682 void vcpu_load(struct kvm_vcpu *vcpu);
59683 void vcpu_put(struct kvm_vcpu *vcpu);
59684
59685-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
59686+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
59687 struct module *module);
59688 void kvm_exit(void);
59689
59690@@ -454,7 +454,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
59691 struct kvm_guest_debug *dbg);
59692 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
59693
59694-int kvm_arch_init(void *opaque);
59695+int kvm_arch_init(const void *opaque);
59696 void kvm_arch_exit(void);
59697
59698 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
59699diff --git a/include/linux/libata.h b/include/linux/libata.h
59700index cafc09a..d7e7829 100644
59701--- a/include/linux/libata.h
59702+++ b/include/linux/libata.h
59703@@ -909,7 +909,7 @@ struct ata_port_operations {
59704 * fields must be pointers.
59705 */
59706 const struct ata_port_operations *inherits;
59707-};
59708+} __do_const;
59709
59710 struct ata_port_info {
59711 unsigned long flags;
59712diff --git a/include/linux/mca.h b/include/linux/mca.h
59713index 3797270..7765ede 100644
59714--- a/include/linux/mca.h
59715+++ b/include/linux/mca.h
59716@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
59717 int region);
59718 void * (*mca_transform_memory)(struct mca_device *,
59719 void *memory);
59720-};
59721+} __no_const;
59722
59723 struct mca_bus {
59724 u64 default_dma_mask;
59725diff --git a/include/linux/memory.h b/include/linux/memory.h
59726index 935699b..11042cc 100644
59727--- a/include/linux/memory.h
59728+++ b/include/linux/memory.h
59729@@ -144,7 +144,7 @@ struct memory_accessor {
59730 size_t count);
59731 ssize_t (*write)(struct memory_accessor *, const char *buf,
59732 off_t offset, size_t count);
59733-};
59734+} __no_const;
59735
59736 /*
59737 * Kernel text modification mutex, used for code patching. Users of this lock
59738diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
59739index 9970337..9444122 100644
59740--- a/include/linux/mfd/abx500.h
59741+++ b/include/linux/mfd/abx500.h
59742@@ -188,6 +188,7 @@ struct abx500_ops {
59743 int (*event_registers_startup_state_get) (struct device *, u8 *);
59744 int (*startup_irq_enabled) (struct device *, unsigned int);
59745 };
59746+typedef struct abx500_ops __no_const abx500_ops_no_const;
59747
59748 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
59749 void abx500_remove_ops(struct device *dev);
59750diff --git a/include/linux/mm.h b/include/linux/mm.h
59751index 4baadd1..2e0b45e 100644
59752--- a/include/linux/mm.h
59753+++ b/include/linux/mm.h
59754@@ -115,7 +115,14 @@ extern unsigned int kobjsize(const void *objp);
59755
59756 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
59757 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
59758+
59759+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
59760+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
59761+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
59762+#else
59763 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
59764+#endif
59765+
59766 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
59767 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
59768
59769@@ -1012,34 +1019,6 @@ int set_page_dirty(struct page *page);
59770 int set_page_dirty_lock(struct page *page);
59771 int clear_page_dirty_for_io(struct page *page);
59772
59773-/* Is the vma a continuation of the stack vma above it? */
59774-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
59775-{
59776- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
59777-}
59778-
59779-static inline int stack_guard_page_start(struct vm_area_struct *vma,
59780- unsigned long addr)
59781-{
59782- return (vma->vm_flags & VM_GROWSDOWN) &&
59783- (vma->vm_start == addr) &&
59784- !vma_growsdown(vma->vm_prev, addr);
59785-}
59786-
59787-/* Is the vma a continuation of the stack vma below it? */
59788-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
59789-{
59790- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
59791-}
59792-
59793-static inline int stack_guard_page_end(struct vm_area_struct *vma,
59794- unsigned long addr)
59795-{
59796- return (vma->vm_flags & VM_GROWSUP) &&
59797- (vma->vm_end == addr) &&
59798- !vma_growsup(vma->vm_next, addr);
59799-}
59800-
59801 extern unsigned long move_page_tables(struct vm_area_struct *vma,
59802 unsigned long old_addr, struct vm_area_struct *new_vma,
59803 unsigned long new_addr, unsigned long len);
59804@@ -1134,6 +1113,15 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
59805 }
59806 #endif
59807
59808+#ifdef CONFIG_MMU
59809+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
59810+#else
59811+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
59812+{
59813+ return __pgprot(0);
59814+}
59815+#endif
59816+
59817 int vma_wants_writenotify(struct vm_area_struct *vma);
59818
59819 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
59820@@ -1419,6 +1407,7 @@ out:
59821 }
59822
59823 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
59824+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
59825
59826 extern unsigned long do_brk(unsigned long, unsigned long);
59827
59828@@ -1476,6 +1465,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
59829 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
59830 struct vm_area_struct **pprev);
59831
59832+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
59833+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
59834+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
59835+
59836 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
59837 NULL if none. Assume start_addr < end_addr. */
59838 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
59839@@ -1492,15 +1485,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
59840 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
59841 }
59842
59843-#ifdef CONFIG_MMU
59844-pgprot_t vm_get_page_prot(unsigned long vm_flags);
59845-#else
59846-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
59847-{
59848- return __pgprot(0);
59849-}
59850-#endif
59851-
59852 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
59853 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
59854 unsigned long pfn, unsigned long size, pgprot_t);
59855@@ -1614,7 +1598,7 @@ extern int unpoison_memory(unsigned long pfn);
59856 extern int sysctl_memory_failure_early_kill;
59857 extern int sysctl_memory_failure_recovery;
59858 extern void shake_page(struct page *p, int access);
59859-extern atomic_long_t mce_bad_pages;
59860+extern atomic_long_unchecked_t mce_bad_pages;
59861 extern int soft_offline_page(struct page *page, int flags);
59862
59863 extern void dump_page(struct page *page);
59864@@ -1628,5 +1612,11 @@ extern void copy_user_huge_page(struct page *dst, struct page *src,
59865 unsigned int pages_per_huge_page);
59866 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
59867
59868+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
59869+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
59870+#else
59871+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
59872+#endif
59873+
59874 #endif /* __KERNEL__ */
59875 #endif /* _LINUX_MM_H */
59876diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
59877index 5b42f1b..759e4b4 100644
59878--- a/include/linux/mm_types.h
59879+++ b/include/linux/mm_types.h
59880@@ -253,6 +253,8 @@ struct vm_area_struct {
59881 #ifdef CONFIG_NUMA
59882 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
59883 #endif
59884+
59885+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
59886 };
59887
59888 struct core_thread {
59889@@ -389,6 +391,24 @@ struct mm_struct {
59890 #ifdef CONFIG_CPUMASK_OFFSTACK
59891 struct cpumask cpumask_allocation;
59892 #endif
59893+
59894+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59895+ unsigned long pax_flags;
59896+#endif
59897+
59898+#ifdef CONFIG_PAX_DLRESOLVE
59899+ unsigned long call_dl_resolve;
59900+#endif
59901+
59902+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
59903+ unsigned long call_syscall;
59904+#endif
59905+
59906+#ifdef CONFIG_PAX_ASLR
59907+ unsigned long delta_mmap; /* randomized offset */
59908+ unsigned long delta_stack; /* randomized offset */
59909+#endif
59910+
59911 };
59912
59913 static inline void mm_init_cpumask(struct mm_struct *mm)
59914diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
59915index 1d1b1e1..2a13c78 100644
59916--- a/include/linux/mmu_notifier.h
59917+++ b/include/linux/mmu_notifier.h
59918@@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
59919 */
59920 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
59921 ({ \
59922- pte_t __pte; \
59923+ pte_t ___pte; \
59924 struct vm_area_struct *___vma = __vma; \
59925 unsigned long ___address = __address; \
59926- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
59927+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
59928 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
59929- __pte; \
59930+ ___pte; \
59931 })
59932
59933 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
59934diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
59935index 188cb2f..d78409b 100644
59936--- a/include/linux/mmzone.h
59937+++ b/include/linux/mmzone.h
59938@@ -369,7 +369,7 @@ struct zone {
59939 unsigned long flags; /* zone flags, see below */
59940
59941 /* Zone statistics */
59942- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59943+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
59944
59945 /*
59946 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
59947diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
59948index 468819c..17b9db3 100644
59949--- a/include/linux/mod_devicetable.h
59950+++ b/include/linux/mod_devicetable.h
59951@@ -12,7 +12,7 @@
59952 typedef unsigned long kernel_ulong_t;
59953 #endif
59954
59955-#define PCI_ANY_ID (~0)
59956+#define PCI_ANY_ID ((__u16)~0)
59957
59958 struct pci_device_id {
59959 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
59960@@ -131,7 +131,7 @@ struct usb_device_id {
59961 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
59962 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
59963
59964-#define HID_ANY_ID (~0)
59965+#define HID_ANY_ID (~0U)
59966
59967 struct hid_device_id {
59968 __u16 bus;
59969diff --git a/include/linux/module.h b/include/linux/module.h
59970index 3cb7839..511cb87 100644
59971--- a/include/linux/module.h
59972+++ b/include/linux/module.h
59973@@ -17,6 +17,7 @@
59974 #include <linux/moduleparam.h>
59975 #include <linux/tracepoint.h>
59976 #include <linux/export.h>
59977+#include <linux/fs.h>
59978
59979 #include <linux/percpu.h>
59980 #include <asm/module.h>
59981@@ -261,19 +262,16 @@ struct module
59982 int (*init)(void);
59983
59984 /* If this is non-NULL, vfree after init() returns */
59985- void *module_init;
59986+ void *module_init_rx, *module_init_rw;
59987
59988 /* Here is the actual code + data, vfree'd on unload. */
59989- void *module_core;
59990+ void *module_core_rx, *module_core_rw;
59991
59992 /* Here are the sizes of the init and core sections */
59993- unsigned int init_size, core_size;
59994+ unsigned int init_size_rw, core_size_rw;
59995
59996 /* The size of the executable code in each section. */
59997- unsigned int init_text_size, core_text_size;
59998-
59999- /* Size of RO sections of the module (text+rodata) */
60000- unsigned int init_ro_size, core_ro_size;
60001+ unsigned int init_size_rx, core_size_rx;
60002
60003 /* Arch-specific module values */
60004 struct mod_arch_specific arch;
60005@@ -329,6 +327,10 @@ struct module
60006 #ifdef CONFIG_EVENT_TRACING
60007 struct ftrace_event_call **trace_events;
60008 unsigned int num_trace_events;
60009+ struct file_operations trace_id;
60010+ struct file_operations trace_enable;
60011+ struct file_operations trace_format;
60012+ struct file_operations trace_filter;
60013 #endif
60014 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
60015 unsigned int num_ftrace_callsites;
60016@@ -379,16 +381,46 @@ bool is_module_address(unsigned long addr);
60017 bool is_module_percpu_address(unsigned long addr);
60018 bool is_module_text_address(unsigned long addr);
60019
60020+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
60021+{
60022+
60023+#ifdef CONFIG_PAX_KERNEXEC
60024+ if (ktla_ktva(addr) >= (unsigned long)start &&
60025+ ktla_ktva(addr) < (unsigned long)start + size)
60026+ return 1;
60027+#endif
60028+
60029+ return ((void *)addr >= start && (void *)addr < start + size);
60030+}
60031+
60032+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
60033+{
60034+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
60035+}
60036+
60037+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
60038+{
60039+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
60040+}
60041+
60042+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
60043+{
60044+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
60045+}
60046+
60047+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
60048+{
60049+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
60050+}
60051+
60052 static inline int within_module_core(unsigned long addr, struct module *mod)
60053 {
60054- return (unsigned long)mod->module_core <= addr &&
60055- addr < (unsigned long)mod->module_core + mod->core_size;
60056+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
60057 }
60058
60059 static inline int within_module_init(unsigned long addr, struct module *mod)
60060 {
60061- return (unsigned long)mod->module_init <= addr &&
60062- addr < (unsigned long)mod->module_init + mod->init_size;
60063+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
60064 }
60065
60066 /* Search for module by name: must hold module_mutex. */
60067diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
60068index b2be02e..6a9fdb1 100644
60069--- a/include/linux/moduleloader.h
60070+++ b/include/linux/moduleloader.h
60071@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
60072 sections. Returns NULL on failure. */
60073 void *module_alloc(unsigned long size);
60074
60075+#ifdef CONFIG_PAX_KERNEXEC
60076+void *module_alloc_exec(unsigned long size);
60077+#else
60078+#define module_alloc_exec(x) module_alloc(x)
60079+#endif
60080+
60081 /* Free memory returned from module_alloc. */
60082 void module_free(struct module *mod, void *module_region);
60083
60084+#ifdef CONFIG_PAX_KERNEXEC
60085+void module_free_exec(struct module *mod, void *module_region);
60086+#else
60087+#define module_free_exec(x, y) module_free((x), (y))
60088+#endif
60089+
60090 /* Apply the given relocation to the (simplified) ELF. Return -error
60091 or 0. */
60092 int apply_relocate(Elf_Shdr *sechdrs,
60093diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
60094index 7939f63..ec6df57 100644
60095--- a/include/linux/moduleparam.h
60096+++ b/include/linux/moduleparam.h
60097@@ -260,7 +260,7 @@ static inline void __kernel_param_unlock(void)
60098 * @len is usually just sizeof(string).
60099 */
60100 #define module_param_string(name, string, len, perm) \
60101- static const struct kparam_string __param_string_##name \
60102+ static const struct kparam_string __param_string_##name __used \
60103 = { len, string }; \
60104 __module_param_call(MODULE_PARAM_PREFIX, name, \
60105 &param_ops_string, \
60106@@ -395,7 +395,7 @@ extern int param_get_invbool(char *buffer, const struct kernel_param *kp);
60107 * module_param_named() for why this might be necessary.
60108 */
60109 #define module_param_array_named(name, array, type, nump, perm) \
60110- static const struct kparam_array __param_arr_##name \
60111+ static const struct kparam_array __param_arr_##name __used \
60112 = { .max = ARRAY_SIZE(array), .num = nump, \
60113 .ops = &param_ops_##type, \
60114 .elemsize = sizeof(array[0]), .elem = array }; \
60115diff --git a/include/linux/namei.h b/include/linux/namei.h
60116index ffc0213..2c1f2cb 100644
60117--- a/include/linux/namei.h
60118+++ b/include/linux/namei.h
60119@@ -24,7 +24,7 @@ struct nameidata {
60120 unsigned seq;
60121 int last_type;
60122 unsigned depth;
60123- char *saved_names[MAX_NESTED_LINKS + 1];
60124+ const char *saved_names[MAX_NESTED_LINKS + 1];
60125
60126 /* Intent data */
60127 union {
60128@@ -94,12 +94,12 @@ extern int follow_up(struct path *);
60129 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
60130 extern void unlock_rename(struct dentry *, struct dentry *);
60131
60132-static inline void nd_set_link(struct nameidata *nd, char *path)
60133+static inline void nd_set_link(struct nameidata *nd, const char *path)
60134 {
60135 nd->saved_names[nd->depth] = path;
60136 }
60137
60138-static inline char *nd_get_link(struct nameidata *nd)
60139+static inline const char *nd_get_link(const struct nameidata *nd)
60140 {
60141 return nd->saved_names[nd->depth];
60142 }
60143diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
60144index a82ad4d..90d15b7 100644
60145--- a/include/linux/netdevice.h
60146+++ b/include/linux/netdevice.h
60147@@ -949,6 +949,7 @@ struct net_device_ops {
60148 int (*ndo_set_features)(struct net_device *dev,
60149 u32 features);
60150 };
60151+typedef struct net_device_ops __no_const net_device_ops_no_const;
60152
60153 /*
60154 * The DEVICE structure.
60155@@ -1088,7 +1089,7 @@ struct net_device {
60156 int iflink;
60157
60158 struct net_device_stats stats;
60159- atomic_long_t rx_dropped; /* dropped packets by core network
60160+ atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
60161 * Do not use this in drivers.
60162 */
60163
60164diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
60165new file mode 100644
60166index 0000000..33f4af8
60167--- /dev/null
60168+++ b/include/linux/netfilter/xt_gradm.h
60169@@ -0,0 +1,9 @@
60170+#ifndef _LINUX_NETFILTER_XT_GRADM_H
60171+#define _LINUX_NETFILTER_XT_GRADM_H 1
60172+
60173+struct xt_gradm_mtinfo {
60174+ __u16 flags;
60175+ __u16 invflags;
60176+};
60177+
60178+#endif
60179diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
60180index c65a18a..0c05f3a 100644
60181--- a/include/linux/of_pdt.h
60182+++ b/include/linux/of_pdt.h
60183@@ -32,7 +32,7 @@ struct of_pdt_ops {
60184
60185 /* return 0 on success; fill in 'len' with number of bytes in path */
60186 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
60187-};
60188+} __no_const;
60189
60190 extern void *prom_early_alloc(unsigned long size);
60191
60192diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
60193index a4c5624..79d6d88 100644
60194--- a/include/linux/oprofile.h
60195+++ b/include/linux/oprofile.h
60196@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
60197 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
60198 char const * name, ulong * val);
60199
60200-/** Create a file for read-only access to an atomic_t. */
60201+/** Create a file for read-only access to an atomic_unchecked_t. */
60202 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
60203- char const * name, atomic_t * val);
60204+ char const * name, atomic_unchecked_t * val);
60205
60206 /** create a directory */
60207 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
60208diff --git a/include/linux/padata.h b/include/linux/padata.h
60209index 4633b2f..988bc08 100644
60210--- a/include/linux/padata.h
60211+++ b/include/linux/padata.h
60212@@ -129,7 +129,7 @@ struct parallel_data {
60213 struct padata_instance *pinst;
60214 struct padata_parallel_queue __percpu *pqueue;
60215 struct padata_serial_queue __percpu *squeue;
60216- atomic_t seq_nr;
60217+ atomic_unchecked_t seq_nr;
60218 atomic_t reorder_objects;
60219 atomic_t refcnt;
60220 unsigned int max_seq_nr;
60221diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
60222index b1f8912..c955bff 100644
60223--- a/include/linux/perf_event.h
60224+++ b/include/linux/perf_event.h
60225@@ -748,8 +748,8 @@ struct perf_event {
60226
60227 enum perf_event_active_state state;
60228 unsigned int attach_state;
60229- local64_t count;
60230- atomic64_t child_count;
60231+ local64_t count; /* PaX: fix it one day */
60232+ atomic64_unchecked_t child_count;
60233
60234 /*
60235 * These are the total time in nanoseconds that the event
60236@@ -800,8 +800,8 @@ struct perf_event {
60237 * These accumulate total time (in nanoseconds) that children
60238 * events have been enabled and running, respectively.
60239 */
60240- atomic64_t child_total_time_enabled;
60241- atomic64_t child_total_time_running;
60242+ atomic64_unchecked_t child_total_time_enabled;
60243+ atomic64_unchecked_t child_total_time_running;
60244
60245 /*
60246 * Protect attach/detach and child_list:
60247diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
60248index 77257c9..51d473a 100644
60249--- a/include/linux/pipe_fs_i.h
60250+++ b/include/linux/pipe_fs_i.h
60251@@ -46,9 +46,9 @@ struct pipe_buffer {
60252 struct pipe_inode_info {
60253 wait_queue_head_t wait;
60254 unsigned int nrbufs, curbuf, buffers;
60255- unsigned int readers;
60256- unsigned int writers;
60257- unsigned int waiting_writers;
60258+ atomic_t readers;
60259+ atomic_t writers;
60260+ atomic_t waiting_writers;
60261 unsigned int r_counter;
60262 unsigned int w_counter;
60263 struct page *tmp_page;
60264diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
60265index d3085e7..fd01052 100644
60266--- a/include/linux/pm_runtime.h
60267+++ b/include/linux/pm_runtime.h
60268@@ -95,7 +95,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
60269
60270 static inline void pm_runtime_mark_last_busy(struct device *dev)
60271 {
60272- ACCESS_ONCE(dev->power.last_busy) = jiffies;
60273+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
60274 }
60275
60276 #else /* !CONFIG_PM_RUNTIME */
60277diff --git a/include/linux/poison.h b/include/linux/poison.h
60278index 79159de..f1233a9 100644
60279--- a/include/linux/poison.h
60280+++ b/include/linux/poison.h
60281@@ -19,8 +19,8 @@
60282 * under normal circumstances, used to verify that nobody uses
60283 * non-initialized list entries.
60284 */
60285-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
60286-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
60287+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
60288+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
60289
60290 /********** include/linux/timer.h **********/
60291 /*
60292diff --git a/include/linux/preempt.h b/include/linux/preempt.h
60293index 58969b2..ead129b 100644
60294--- a/include/linux/preempt.h
60295+++ b/include/linux/preempt.h
60296@@ -123,7 +123,7 @@ struct preempt_ops {
60297 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
60298 void (*sched_out)(struct preempt_notifier *notifier,
60299 struct task_struct *next);
60300-};
60301+} __no_const;
60302
60303 /**
60304 * preempt_notifier - key for installing preemption notifiers
60305diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
60306index 643b96c..ef55a9c 100644
60307--- a/include/linux/proc_fs.h
60308+++ b/include/linux/proc_fs.h
60309@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
60310 return proc_create_data(name, mode, parent, proc_fops, NULL);
60311 }
60312
60313+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
60314+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
60315+{
60316+#ifdef CONFIG_GRKERNSEC_PROC_USER
60317+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
60318+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60319+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
60320+#else
60321+ return proc_create_data(name, mode, parent, proc_fops, NULL);
60322+#endif
60323+}
60324+
60325+
60326 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
60327 mode_t mode, struct proc_dir_entry *base,
60328 read_proc_t *read_proc, void * data)
60329@@ -258,7 +271,7 @@ union proc_op {
60330 int (*proc_show)(struct seq_file *m,
60331 struct pid_namespace *ns, struct pid *pid,
60332 struct task_struct *task);
60333-};
60334+} __no_const;
60335
60336 struct ctl_table_header;
60337 struct ctl_table;
60338diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
60339index 800f113..e9ee2e3 100644
60340--- a/include/linux/ptrace.h
60341+++ b/include/linux/ptrace.h
60342@@ -129,10 +129,12 @@ extern void __ptrace_unlink(struct task_struct *child);
60343 extern void exit_ptrace(struct task_struct *tracer);
60344 #define PTRACE_MODE_READ 1
60345 #define PTRACE_MODE_ATTACH 2
60346-/* Returns 0 on success, -errno on denial. */
60347-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
60348 /* Returns true on success, false on denial. */
60349 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
60350+/* Returns true on success, false on denial. */
60351+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
60352+/* Returns true on success, false on denial. */
60353+extern bool ptrace_may_access_nolock(struct task_struct *task, unsigned int mode);
60354
60355 static inline int ptrace_reparented(struct task_struct *child)
60356 {
60357diff --git a/include/linux/random.h b/include/linux/random.h
60358index 8f74538..02a1012 100644
60359--- a/include/linux/random.h
60360+++ b/include/linux/random.h
60361@@ -69,12 +69,17 @@ void srandom32(u32 seed);
60362
60363 u32 prandom32(struct rnd_state *);
60364
60365+static inline unsigned long pax_get_random_long(void)
60366+{
60367+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
60368+}
60369+
60370 /*
60371 * Handle minimum values for seeds
60372 */
60373 static inline u32 __seed(u32 x, u32 m)
60374 {
60375- return (x < m) ? x + m : x;
60376+ return (x <= m) ? x + m + 1 : x;
60377 }
60378
60379 /**
60380diff --git a/include/linux/reboot.h b/include/linux/reboot.h
60381index e0879a7..a12f962 100644
60382--- a/include/linux/reboot.h
60383+++ b/include/linux/reboot.h
60384@@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
60385 * Architecture-specific implementations of sys_reboot commands.
60386 */
60387
60388-extern void machine_restart(char *cmd);
60389-extern void machine_halt(void);
60390-extern void machine_power_off(void);
60391+extern void machine_restart(char *cmd) __noreturn;
60392+extern void machine_halt(void) __noreturn;
60393+extern void machine_power_off(void) __noreturn;
60394
60395 extern void machine_shutdown(void);
60396 struct pt_regs;
60397@@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
60398 */
60399
60400 extern void kernel_restart_prepare(char *cmd);
60401-extern void kernel_restart(char *cmd);
60402-extern void kernel_halt(void);
60403-extern void kernel_power_off(void);
60404+extern void kernel_restart(char *cmd) __noreturn;
60405+extern void kernel_halt(void) __noreturn;
60406+extern void kernel_power_off(void) __noreturn;
60407
60408 extern int C_A_D; /* for sysctl */
60409 void ctrl_alt_del(void);
60410@@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
60411 * Emergency restart, callable from an interrupt handler.
60412 */
60413
60414-extern void emergency_restart(void);
60415+extern void emergency_restart(void) __noreturn;
60416 #include <asm/emergency-restart.h>
60417
60418 #endif
60419diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
60420index 96d465f..b084e05 100644
60421--- a/include/linux/reiserfs_fs.h
60422+++ b/include/linux/reiserfs_fs.h
60423@@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
60424 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
60425
60426 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
60427-#define get_generation(s) atomic_read (&fs_generation(s))
60428+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
60429 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
60430 #define __fs_changed(gen,s) (gen != get_generation (s))
60431 #define fs_changed(gen,s) \
60432diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
60433index 52c83b6..18ed7eb 100644
60434--- a/include/linux/reiserfs_fs_sb.h
60435+++ b/include/linux/reiserfs_fs_sb.h
60436@@ -386,7 +386,7 @@ struct reiserfs_sb_info {
60437 /* Comment? -Hans */
60438 wait_queue_head_t s_wait;
60439 /* To be obsoleted soon by per buffer seals.. -Hans */
60440- atomic_t s_generation_counter; // increased by one every time the
60441+ atomic_unchecked_t s_generation_counter; // increased by one every time the
60442 // tree gets re-balanced
60443 unsigned long s_properties; /* File system properties. Currently holds
60444 on-disk FS format */
60445diff --git a/include/linux/relay.h b/include/linux/relay.h
60446index 14a86bc..17d0700 100644
60447--- a/include/linux/relay.h
60448+++ b/include/linux/relay.h
60449@@ -159,7 +159,7 @@ struct rchan_callbacks
60450 * The callback should return 0 if successful, negative if not.
60451 */
60452 int (*remove_buf_file)(struct dentry *dentry);
60453-};
60454+} __no_const;
60455
60456 /*
60457 * CONFIG_RELAY kernel API, kernel/relay.c
60458diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
60459index c6c6084..5bf1212 100644
60460--- a/include/linux/rfkill.h
60461+++ b/include/linux/rfkill.h
60462@@ -147,6 +147,7 @@ struct rfkill_ops {
60463 void (*query)(struct rfkill *rfkill, void *data);
60464 int (*set_block)(void *data, bool blocked);
60465 };
60466+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
60467
60468 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
60469 /**
60470diff --git a/include/linux/rio.h b/include/linux/rio.h
60471index 4d50611..c6858a2 100644
60472--- a/include/linux/rio.h
60473+++ b/include/linux/rio.h
60474@@ -315,7 +315,7 @@ struct rio_ops {
60475 int mbox, void *buffer, size_t len);
60476 int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf);
60477 void *(*get_inb_message)(struct rio_mport *mport, int mbox);
60478-};
60479+} __no_const;
60480
60481 #define RIO_RESOURCE_MEM 0x00000100
60482 #define RIO_RESOURCE_DOORBELL 0x00000200
60483diff --git a/include/linux/rmap.h b/include/linux/rmap.h
60484index 2148b12..519b820 100644
60485--- a/include/linux/rmap.h
60486+++ b/include/linux/rmap.h
60487@@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
60488 void anon_vma_init(void); /* create anon_vma_cachep */
60489 int anon_vma_prepare(struct vm_area_struct *);
60490 void unlink_anon_vmas(struct vm_area_struct *);
60491-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
60492-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
60493+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
60494+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
60495 void __anon_vma_link(struct vm_area_struct *);
60496
60497 static inline void anon_vma_merge(struct vm_area_struct *vma,
60498diff --git a/include/linux/sched.h b/include/linux/sched.h
60499index 1c4f3e9..c5b241a 100644
60500--- a/include/linux/sched.h
60501+++ b/include/linux/sched.h
60502@@ -101,6 +101,7 @@ struct bio_list;
60503 struct fs_struct;
60504 struct perf_event_context;
60505 struct blk_plug;
60506+struct linux_binprm;
60507
60508 /*
60509 * List of flags we want to share for kernel threads,
60510@@ -380,10 +381,13 @@ struct user_namespace;
60511 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
60512
60513 extern int sysctl_max_map_count;
60514+extern unsigned long sysctl_heap_stack_gap;
60515
60516 #include <linux/aio.h>
60517
60518 #ifdef CONFIG_MMU
60519+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
60520+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
60521 extern void arch_pick_mmap_layout(struct mm_struct *mm);
60522 extern unsigned long
60523 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
60524@@ -629,6 +633,17 @@ struct signal_struct {
60525 #ifdef CONFIG_TASKSTATS
60526 struct taskstats *stats;
60527 #endif
60528+
60529+#ifdef CONFIG_GRKERNSEC
60530+ u32 curr_ip;
60531+ u32 saved_ip;
60532+ u32 gr_saddr;
60533+ u32 gr_daddr;
60534+ u16 gr_sport;
60535+ u16 gr_dport;
60536+ u8 used_accept:1;
60537+#endif
60538+
60539 #ifdef CONFIG_AUDIT
60540 unsigned audit_tty;
60541 struct tty_audit_buf *tty_audit_buf;
60542@@ -710,6 +725,11 @@ struct user_struct {
60543 struct key *session_keyring; /* UID's default session keyring */
60544 #endif
60545
60546+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
60547+ unsigned int banned;
60548+ unsigned long ban_expires;
60549+#endif
60550+
60551 /* Hash table maintenance information */
60552 struct hlist_node uidhash_node;
60553 uid_t uid;
60554@@ -1337,8 +1357,8 @@ struct task_struct {
60555 struct list_head thread_group;
60556
60557 struct completion *vfork_done; /* for vfork() */
60558- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
60559- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60560+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
60561+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60562
60563 cputime_t utime, stime, utimescaled, stimescaled;
60564 cputime_t gtime;
60565@@ -1354,13 +1374,6 @@ struct task_struct {
60566 struct task_cputime cputime_expires;
60567 struct list_head cpu_timers[3];
60568
60569-/* process credentials */
60570- const struct cred __rcu *real_cred; /* objective and real subjective task
60571- * credentials (COW) */
60572- const struct cred __rcu *cred; /* effective (overridable) subjective task
60573- * credentials (COW) */
60574- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60575-
60576 char comm[TASK_COMM_LEN]; /* executable name excluding path
60577 - access with [gs]et_task_comm (which lock
60578 it with task_lock())
60579@@ -1377,8 +1390,16 @@ struct task_struct {
60580 #endif
60581 /* CPU-specific state of this task */
60582 struct thread_struct thread;
60583+/* thread_info moved to task_struct */
60584+#ifdef CONFIG_X86
60585+ struct thread_info tinfo;
60586+#endif
60587 /* filesystem information */
60588 struct fs_struct *fs;
60589+
60590+ const struct cred __rcu *cred; /* effective (overridable) subjective task
60591+ * credentials (COW) */
60592+
60593 /* open file information */
60594 struct files_struct *files;
60595 /* namespaces */
60596@@ -1425,6 +1446,11 @@ struct task_struct {
60597 struct rt_mutex_waiter *pi_blocked_on;
60598 #endif
60599
60600+/* process credentials */
60601+ const struct cred __rcu *real_cred; /* objective and real subjective task
60602+ * credentials (COW) */
60603+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60604+
60605 #ifdef CONFIG_DEBUG_MUTEXES
60606 /* mutex deadlock detection */
60607 struct mutex_waiter *blocked_on;
60608@@ -1540,6 +1566,24 @@ struct task_struct {
60609 unsigned long default_timer_slack_ns;
60610
60611 struct list_head *scm_work_list;
60612+
60613+#ifdef CONFIG_GRKERNSEC
60614+ /* grsecurity */
60615+#ifdef CONFIG_GRKERNSEC_SETXID
60616+ const struct cred *delayed_cred;
60617+#endif
60618+ struct dentry *gr_chroot_dentry;
60619+ struct acl_subject_label *acl;
60620+ struct acl_role_label *role;
60621+ struct file *exec_file;
60622+ u16 acl_role_id;
60623+ /* is this the task that authenticated to the special role */
60624+ u8 acl_sp_role;
60625+ u8 is_writable;
60626+ u8 brute;
60627+ u8 gr_is_chrooted;
60628+#endif
60629+
60630 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
60631 /* Index of current stored address in ret_stack */
60632 int curr_ret_stack;
60633@@ -1574,6 +1618,51 @@ struct task_struct {
60634 #endif
60635 };
60636
60637+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
60638+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
60639+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
60640+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
60641+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
60642+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
60643+
60644+#ifdef CONFIG_PAX_SOFTMODE
60645+extern int pax_softmode;
60646+#endif
60647+
60648+extern int pax_check_flags(unsigned long *);
60649+
60650+/* if tsk != current then task_lock must be held on it */
60651+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60652+static inline unsigned long pax_get_flags(struct task_struct *tsk)
60653+{
60654+ if (likely(tsk->mm))
60655+ return tsk->mm->pax_flags;
60656+ else
60657+ return 0UL;
60658+}
60659+
60660+/* if tsk != current then task_lock must be held on it */
60661+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
60662+{
60663+ if (likely(tsk->mm)) {
60664+ tsk->mm->pax_flags = flags;
60665+ return 0;
60666+ }
60667+ return -EINVAL;
60668+}
60669+#endif
60670+
60671+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60672+extern void pax_set_initial_flags(struct linux_binprm *bprm);
60673+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
60674+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
60675+#endif
60676+
60677+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
60678+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
60679+extern void pax_report_refcount_overflow(struct pt_regs *regs);
60680+extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
60681+
60682 /* Future-safe accessor for struct task_struct's cpus_allowed. */
60683 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
60684
60685@@ -2081,7 +2170,9 @@ void yield(void);
60686 extern struct exec_domain default_exec_domain;
60687
60688 union thread_union {
60689+#ifndef CONFIG_X86
60690 struct thread_info thread_info;
60691+#endif
60692 unsigned long stack[THREAD_SIZE/sizeof(long)];
60693 };
60694
60695@@ -2114,6 +2205,7 @@ extern struct pid_namespace init_pid_ns;
60696 */
60697
60698 extern struct task_struct *find_task_by_vpid(pid_t nr);
60699+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
60700 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
60701 struct pid_namespace *ns);
60702
60703@@ -2251,7 +2343,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
60704 extern void exit_itimers(struct signal_struct *);
60705 extern void flush_itimer_signals(void);
60706
60707-extern NORET_TYPE void do_group_exit(int);
60708+extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
60709
60710 extern void daemonize(const char *, ...);
60711 extern int allow_signal(int);
60712@@ -2416,13 +2508,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
60713
60714 #endif
60715
60716-static inline int object_is_on_stack(void *obj)
60717+static inline int object_starts_on_stack(void *obj)
60718 {
60719- void *stack = task_stack_page(current);
60720+ const void *stack = task_stack_page(current);
60721
60722 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
60723 }
60724
60725+#ifdef CONFIG_PAX_USERCOPY
60726+extern int object_is_on_stack(const void *obj, unsigned long len);
60727+#endif
60728+
60729 extern void thread_info_cache_init(void);
60730
60731 #ifdef CONFIG_DEBUG_STACK_USAGE
60732diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
60733index 899fbb4..1cb4138 100644
60734--- a/include/linux/screen_info.h
60735+++ b/include/linux/screen_info.h
60736@@ -43,7 +43,8 @@ struct screen_info {
60737 __u16 pages; /* 0x32 */
60738 __u16 vesa_attributes; /* 0x34 */
60739 __u32 capabilities; /* 0x36 */
60740- __u8 _reserved[6]; /* 0x3a */
60741+ __u16 vesapm_size; /* 0x3a */
60742+ __u8 _reserved[4]; /* 0x3c */
60743 } __attribute__((packed));
60744
60745 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
60746diff --git a/include/linux/security.h b/include/linux/security.h
60747index e8c619d..e0cbd1c 100644
60748--- a/include/linux/security.h
60749+++ b/include/linux/security.h
60750@@ -37,6 +37,7 @@
60751 #include <linux/xfrm.h>
60752 #include <linux/slab.h>
60753 #include <linux/xattr.h>
60754+#include <linux/grsecurity.h>
60755 #include <net/flow.h>
60756
60757 /* Maximum number of letters for an LSM name string */
60758diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
60759index 0b69a46..e9e5538 100644
60760--- a/include/linux/seq_file.h
60761+++ b/include/linux/seq_file.h
60762@@ -33,6 +33,7 @@ struct seq_operations {
60763 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
60764 int (*show) (struct seq_file *m, void *v);
60765 };
60766+typedef struct seq_operations __no_const seq_operations_no_const;
60767
60768 #define SEQ_SKIP 1
60769
60770diff --git a/include/linux/shm.h b/include/linux/shm.h
60771index 92808b8..c28cac4 100644
60772--- a/include/linux/shm.h
60773+++ b/include/linux/shm.h
60774@@ -98,6 +98,10 @@ struct shmid_kernel /* private to the kernel */
60775
60776 /* The task created the shm object. NULL if the task is dead. */
60777 struct task_struct *shm_creator;
60778+#ifdef CONFIG_GRKERNSEC
60779+ time_t shm_createtime;
60780+ pid_t shm_lapid;
60781+#endif
60782 };
60783
60784 /* shm_mode upper byte flags */
60785diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
60786index fe86488..1563c1c 100644
60787--- a/include/linux/skbuff.h
60788+++ b/include/linux/skbuff.h
60789@@ -642,7 +642,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
60790 */
60791 static inline int skb_queue_empty(const struct sk_buff_head *list)
60792 {
60793- return list->next == (struct sk_buff *)list;
60794+ return list->next == (const struct sk_buff *)list;
60795 }
60796
60797 /**
60798@@ -655,7 +655,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
60799 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
60800 const struct sk_buff *skb)
60801 {
60802- return skb->next == (struct sk_buff *)list;
60803+ return skb->next == (const struct sk_buff *)list;
60804 }
60805
60806 /**
60807@@ -668,7 +668,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
60808 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
60809 const struct sk_buff *skb)
60810 {
60811- return skb->prev == (struct sk_buff *)list;
60812+ return skb->prev == (const struct sk_buff *)list;
60813 }
60814
60815 /**
60816@@ -1523,7 +1523,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
60817 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
60818 */
60819 #ifndef NET_SKB_PAD
60820-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
60821+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
60822 #endif
60823
60824 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
60825diff --git a/include/linux/slab.h b/include/linux/slab.h
60826index 573c809..e84c132 100644
60827--- a/include/linux/slab.h
60828+++ b/include/linux/slab.h
60829@@ -11,12 +11,20 @@
60830
60831 #include <linux/gfp.h>
60832 #include <linux/types.h>
60833+#include <linux/err.h>
60834
60835 /*
60836 * Flags to pass to kmem_cache_create().
60837 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
60838 */
60839 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
60840+
60841+#ifdef CONFIG_PAX_USERCOPY
60842+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
60843+#else
60844+#define SLAB_USERCOPY 0x00000000UL
60845+#endif
60846+
60847 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
60848 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
60849 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
60850@@ -87,10 +95,13 @@
60851 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
60852 * Both make kfree a no-op.
60853 */
60854-#define ZERO_SIZE_PTR ((void *)16)
60855+#define ZERO_SIZE_PTR \
60856+({ \
60857+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
60858+ (void *)(-MAX_ERRNO-1L); \
60859+})
60860
60861-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
60862- (unsigned long)ZERO_SIZE_PTR)
60863+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
60864
60865 /*
60866 * struct kmem_cache related prototypes
60867@@ -161,6 +172,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
60868 void kfree(const void *);
60869 void kzfree(const void *);
60870 size_t ksize(const void *);
60871+void check_object_size(const void *ptr, unsigned long n, bool to);
60872
60873 /*
60874 * Allocator specific definitions. These are mainly used to establish optimized
60875@@ -353,4 +365,59 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
60876
60877 void __init kmem_cache_init_late(void);
60878
60879+#define kmalloc(x, y) \
60880+({ \
60881+ void *___retval; \
60882+ intoverflow_t ___x = (intoverflow_t)x; \
60883+ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
60884+ ___retval = NULL; \
60885+ else \
60886+ ___retval = kmalloc((size_t)___x, (y)); \
60887+ ___retval; \
60888+})
60889+
60890+#define kmalloc_node(x, y, z) \
60891+({ \
60892+ void *___retval; \
60893+ intoverflow_t ___x = (intoverflow_t)x; \
60894+ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
60895+ ___retval = NULL; \
60896+ else \
60897+ ___retval = kmalloc_node((size_t)___x, (y), (z));\
60898+ ___retval; \
60899+})
60900+
60901+#define kzalloc(x, y) \
60902+({ \
60903+ void *___retval; \
60904+ intoverflow_t ___x = (intoverflow_t)x; \
60905+ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
60906+ ___retval = NULL; \
60907+ else \
60908+ ___retval = kzalloc((size_t)___x, (y)); \
60909+ ___retval; \
60910+})
60911+
60912+#define __krealloc(x, y, z) \
60913+({ \
60914+ void *___retval; \
60915+ intoverflow_t ___y = (intoverflow_t)y; \
60916+ if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
60917+ ___retval = NULL; \
60918+ else \
60919+ ___retval = __krealloc((x), (size_t)___y, (z)); \
60920+ ___retval; \
60921+})
60922+
60923+#define krealloc(x, y, z) \
60924+({ \
60925+ void *___retval; \
60926+ intoverflow_t ___y = (intoverflow_t)y; \
60927+ if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
60928+ ___retval = NULL; \
60929+ else \
60930+ ___retval = krealloc((x), (size_t)___y, (z)); \
60931+ ___retval; \
60932+})
60933+
60934 #endif /* _LINUX_SLAB_H */
60935diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
60936index d00e0ba..1b3bf7b 100644
60937--- a/include/linux/slab_def.h
60938+++ b/include/linux/slab_def.h
60939@@ -68,10 +68,10 @@ struct kmem_cache {
60940 unsigned long node_allocs;
60941 unsigned long node_frees;
60942 unsigned long node_overflow;
60943- atomic_t allochit;
60944- atomic_t allocmiss;
60945- atomic_t freehit;
60946- atomic_t freemiss;
60947+ atomic_unchecked_t allochit;
60948+ atomic_unchecked_t allocmiss;
60949+ atomic_unchecked_t freehit;
60950+ atomic_unchecked_t freemiss;
60951
60952 /*
60953 * If debugging is enabled, then the allocator can add additional
60954diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
60955index a32bcfd..53b71f4 100644
60956--- a/include/linux/slub_def.h
60957+++ b/include/linux/slub_def.h
60958@@ -89,7 +89,7 @@ struct kmem_cache {
60959 struct kmem_cache_order_objects max;
60960 struct kmem_cache_order_objects min;
60961 gfp_t allocflags; /* gfp flags to use on each alloc */
60962- int refcount; /* Refcount for slab cache destroy */
60963+ atomic_t refcount; /* Refcount for slab cache destroy */
60964 void (*ctor)(void *);
60965 int inuse; /* Offset to metadata */
60966 int align; /* Alignment */
60967@@ -215,7 +215,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
60968 }
60969
60970 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
60971-void *__kmalloc(size_t size, gfp_t flags);
60972+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
60973
60974 static __always_inline void *
60975 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
60976diff --git a/include/linux/sonet.h b/include/linux/sonet.h
60977index de8832d..0147b46 100644
60978--- a/include/linux/sonet.h
60979+++ b/include/linux/sonet.h
60980@@ -61,7 +61,7 @@ struct sonet_stats {
60981 #include <linux/atomic.h>
60982
60983 struct k_sonet_stats {
60984-#define __HANDLE_ITEM(i) atomic_t i
60985+#define __HANDLE_ITEM(i) atomic_unchecked_t i
60986 __SONET_ITEMS
60987 #undef __HANDLE_ITEM
60988 };
60989diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
60990index 3d8f9c4..69f1c0a 100644
60991--- a/include/linux/sunrpc/clnt.h
60992+++ b/include/linux/sunrpc/clnt.h
60993@@ -172,9 +172,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
60994 {
60995 switch (sap->sa_family) {
60996 case AF_INET:
60997- return ntohs(((struct sockaddr_in *)sap)->sin_port);
60998+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
60999 case AF_INET6:
61000- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
61001+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
61002 }
61003 return 0;
61004 }
61005@@ -207,7 +207,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
61006 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
61007 const struct sockaddr *src)
61008 {
61009- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
61010+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
61011 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
61012
61013 dsin->sin_family = ssin->sin_family;
61014@@ -310,7 +310,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
61015 if (sa->sa_family != AF_INET6)
61016 return 0;
61017
61018- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
61019+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
61020 }
61021
61022 #endif /* __KERNEL__ */
61023diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
61024index e775689..9e206d9 100644
61025--- a/include/linux/sunrpc/sched.h
61026+++ b/include/linux/sunrpc/sched.h
61027@@ -105,6 +105,7 @@ struct rpc_call_ops {
61028 void (*rpc_call_done)(struct rpc_task *, void *);
61029 void (*rpc_release)(void *);
61030 };
61031+typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
61032
61033 struct rpc_task_setup {
61034 struct rpc_task *task;
61035diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
61036index c14fe86..393245e 100644
61037--- a/include/linux/sunrpc/svc_rdma.h
61038+++ b/include/linux/sunrpc/svc_rdma.h
61039@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
61040 extern unsigned int svcrdma_max_requests;
61041 extern unsigned int svcrdma_max_req_size;
61042
61043-extern atomic_t rdma_stat_recv;
61044-extern atomic_t rdma_stat_read;
61045-extern atomic_t rdma_stat_write;
61046-extern atomic_t rdma_stat_sq_starve;
61047-extern atomic_t rdma_stat_rq_starve;
61048-extern atomic_t rdma_stat_rq_poll;
61049-extern atomic_t rdma_stat_rq_prod;
61050-extern atomic_t rdma_stat_sq_poll;
61051-extern atomic_t rdma_stat_sq_prod;
61052+extern atomic_unchecked_t rdma_stat_recv;
61053+extern atomic_unchecked_t rdma_stat_read;
61054+extern atomic_unchecked_t rdma_stat_write;
61055+extern atomic_unchecked_t rdma_stat_sq_starve;
61056+extern atomic_unchecked_t rdma_stat_rq_starve;
61057+extern atomic_unchecked_t rdma_stat_rq_poll;
61058+extern atomic_unchecked_t rdma_stat_rq_prod;
61059+extern atomic_unchecked_t rdma_stat_sq_poll;
61060+extern atomic_unchecked_t rdma_stat_sq_prod;
61061
61062 #define RPCRDMA_VERSION 1
61063
61064diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
61065index 703cfa3..0b8ca72ac 100644
61066--- a/include/linux/sysctl.h
61067+++ b/include/linux/sysctl.h
61068@@ -155,7 +155,11 @@ enum
61069 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
61070 };
61071
61072-
61073+#ifdef CONFIG_PAX_SOFTMODE
61074+enum {
61075+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
61076+};
61077+#endif
61078
61079 /* CTL_VM names: */
61080 enum
61081@@ -968,6 +972,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
61082
61083 extern int proc_dostring(struct ctl_table *, int,
61084 void __user *, size_t *, loff_t *);
61085+extern int proc_dostring_modpriv(struct ctl_table *, int,
61086+ void __user *, size_t *, loff_t *);
61087 extern int proc_dointvec(struct ctl_table *, int,
61088 void __user *, size_t *, loff_t *);
61089 extern int proc_dointvec_minmax(struct ctl_table *, int,
61090diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
61091index ff7dc08..893e1bd 100644
61092--- a/include/linux/tty_ldisc.h
61093+++ b/include/linux/tty_ldisc.h
61094@@ -148,7 +148,7 @@ struct tty_ldisc_ops {
61095
61096 struct module *owner;
61097
61098- int refcount;
61099+ atomic_t refcount;
61100 };
61101
61102 struct tty_ldisc {
61103diff --git a/include/linux/types.h b/include/linux/types.h
61104index 57a9723..dbe234a 100644
61105--- a/include/linux/types.h
61106+++ b/include/linux/types.h
61107@@ -213,10 +213,26 @@ typedef struct {
61108 int counter;
61109 } atomic_t;
61110
61111+#ifdef CONFIG_PAX_REFCOUNT
61112+typedef struct {
61113+ int counter;
61114+} atomic_unchecked_t;
61115+#else
61116+typedef atomic_t atomic_unchecked_t;
61117+#endif
61118+
61119 #ifdef CONFIG_64BIT
61120 typedef struct {
61121 long counter;
61122 } atomic64_t;
61123+
61124+#ifdef CONFIG_PAX_REFCOUNT
61125+typedef struct {
61126+ long counter;
61127+} atomic64_unchecked_t;
61128+#else
61129+typedef atomic64_t atomic64_unchecked_t;
61130+#endif
61131 #endif
61132
61133 struct list_head {
61134diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
61135index 5ca0951..ab496a5 100644
61136--- a/include/linux/uaccess.h
61137+++ b/include/linux/uaccess.h
61138@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
61139 long ret; \
61140 mm_segment_t old_fs = get_fs(); \
61141 \
61142- set_fs(KERNEL_DS); \
61143 pagefault_disable(); \
61144- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
61145- pagefault_enable(); \
61146+ set_fs(KERNEL_DS); \
61147+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
61148 set_fs(old_fs); \
61149+ pagefault_enable(); \
61150 ret; \
61151 })
61152
61153diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
61154index 99c1b4d..bb94261 100644
61155--- a/include/linux/unaligned/access_ok.h
61156+++ b/include/linux/unaligned/access_ok.h
61157@@ -6,32 +6,32 @@
61158
61159 static inline u16 get_unaligned_le16(const void *p)
61160 {
61161- return le16_to_cpup((__le16 *)p);
61162+ return le16_to_cpup((const __le16 *)p);
61163 }
61164
61165 static inline u32 get_unaligned_le32(const void *p)
61166 {
61167- return le32_to_cpup((__le32 *)p);
61168+ return le32_to_cpup((const __le32 *)p);
61169 }
61170
61171 static inline u64 get_unaligned_le64(const void *p)
61172 {
61173- return le64_to_cpup((__le64 *)p);
61174+ return le64_to_cpup((const __le64 *)p);
61175 }
61176
61177 static inline u16 get_unaligned_be16(const void *p)
61178 {
61179- return be16_to_cpup((__be16 *)p);
61180+ return be16_to_cpup((const __be16 *)p);
61181 }
61182
61183 static inline u32 get_unaligned_be32(const void *p)
61184 {
61185- return be32_to_cpup((__be32 *)p);
61186+ return be32_to_cpup((const __be32 *)p);
61187 }
61188
61189 static inline u64 get_unaligned_be64(const void *p)
61190 {
61191- return be64_to_cpup((__be64 *)p);
61192+ return be64_to_cpup((const __be64 *)p);
61193 }
61194
61195 static inline void put_unaligned_le16(u16 val, void *p)
61196diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
61197index e5a40c3..20ab0f6 100644
61198--- a/include/linux/usb/renesas_usbhs.h
61199+++ b/include/linux/usb/renesas_usbhs.h
61200@@ -39,7 +39,7 @@ enum {
61201 */
61202 struct renesas_usbhs_driver_callback {
61203 int (*notify_hotplug)(struct platform_device *pdev);
61204-};
61205+} __no_const;
61206
61207 /*
61208 * callback functions for platform
61209@@ -89,7 +89,7 @@ struct renesas_usbhs_platform_callback {
61210 * VBUS control is needed for Host
61211 */
61212 int (*set_vbus)(struct platform_device *pdev, int enable);
61213-};
61214+} __no_const;
61215
61216 /*
61217 * parameters for renesas usbhs
61218diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
61219index 6f8fbcf..8259001 100644
61220--- a/include/linux/vermagic.h
61221+++ b/include/linux/vermagic.h
61222@@ -25,9 +25,35 @@
61223 #define MODULE_ARCH_VERMAGIC ""
61224 #endif
61225
61226+#ifdef CONFIG_PAX_REFCOUNT
61227+#define MODULE_PAX_REFCOUNT "REFCOUNT "
61228+#else
61229+#define MODULE_PAX_REFCOUNT ""
61230+#endif
61231+
61232+#ifdef CONSTIFY_PLUGIN
61233+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
61234+#else
61235+#define MODULE_CONSTIFY_PLUGIN ""
61236+#endif
61237+
61238+#ifdef STACKLEAK_PLUGIN
61239+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
61240+#else
61241+#define MODULE_STACKLEAK_PLUGIN ""
61242+#endif
61243+
61244+#ifdef CONFIG_GRKERNSEC
61245+#define MODULE_GRSEC "GRSEC "
61246+#else
61247+#define MODULE_GRSEC ""
61248+#endif
61249+
61250 #define VERMAGIC_STRING \
61251 UTS_RELEASE " " \
61252 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
61253 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
61254- MODULE_ARCH_VERMAGIC
61255+ MODULE_ARCH_VERMAGIC \
61256+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
61257+ MODULE_GRSEC
61258
61259diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
61260index 4bde182..aec92c1 100644
61261--- a/include/linux/vmalloc.h
61262+++ b/include/linux/vmalloc.h
61263@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
61264 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
61265 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
61266 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
61267+
61268+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
61269+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
61270+#endif
61271+
61272 /* bits [20..32] reserved for arch specific ioremap internals */
61273
61274 /*
61275@@ -156,4 +161,103 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
61276 # endif
61277 #endif
61278
61279+#define vmalloc(x) \
61280+({ \
61281+ void *___retval; \
61282+ intoverflow_t ___x = (intoverflow_t)x; \
61283+ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
61284+ ___retval = NULL; \
61285+ else \
61286+ ___retval = vmalloc((unsigned long)___x); \
61287+ ___retval; \
61288+})
61289+
61290+#define vzalloc(x) \
61291+({ \
61292+ void *___retval; \
61293+ intoverflow_t ___x = (intoverflow_t)x; \
61294+ if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
61295+ ___retval = NULL; \
61296+ else \
61297+ ___retval = vzalloc((unsigned long)___x); \
61298+ ___retval; \
61299+})
61300+
61301+#define __vmalloc(x, y, z) \
61302+({ \
61303+ void *___retval; \
61304+ intoverflow_t ___x = (intoverflow_t)x; \
61305+ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
61306+ ___retval = NULL; \
61307+ else \
61308+ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
61309+ ___retval; \
61310+})
61311+
61312+#define vmalloc_user(x) \
61313+({ \
61314+ void *___retval; \
61315+ intoverflow_t ___x = (intoverflow_t)x; \
61316+ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
61317+ ___retval = NULL; \
61318+ else \
61319+ ___retval = vmalloc_user((unsigned long)___x); \
61320+ ___retval; \
61321+})
61322+
61323+#define vmalloc_exec(x) \
61324+({ \
61325+ void *___retval; \
61326+ intoverflow_t ___x = (intoverflow_t)x; \
61327+ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
61328+ ___retval = NULL; \
61329+ else \
61330+ ___retval = vmalloc_exec((unsigned long)___x); \
61331+ ___retval; \
61332+})
61333+
61334+#define vmalloc_node(x, y) \
61335+({ \
61336+ void *___retval; \
61337+ intoverflow_t ___x = (intoverflow_t)x; \
61338+ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
61339+ ___retval = NULL; \
61340+ else \
61341+ ___retval = vmalloc_node((unsigned long)___x, (y));\
61342+ ___retval; \
61343+})
61344+
61345+#define vzalloc_node(x, y) \
61346+({ \
61347+ void *___retval; \
61348+ intoverflow_t ___x = (intoverflow_t)x; \
61349+ if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
61350+ ___retval = NULL; \
61351+ else \
61352+ ___retval = vzalloc_node((unsigned long)___x, (y));\
61353+ ___retval; \
61354+})
61355+
61356+#define vmalloc_32(x) \
61357+({ \
61358+ void *___retval; \
61359+ intoverflow_t ___x = (intoverflow_t)x; \
61360+ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
61361+ ___retval = NULL; \
61362+ else \
61363+ ___retval = vmalloc_32((unsigned long)___x); \
61364+ ___retval; \
61365+})
61366+
61367+#define vmalloc_32_user(x) \
61368+({ \
61369+void *___retval; \
61370+ intoverflow_t ___x = (intoverflow_t)x; \
61371+ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
61372+ ___retval = NULL; \
61373+ else \
61374+ ___retval = vmalloc_32_user((unsigned long)___x);\
61375+ ___retval; \
61376+})
61377+
61378 #endif /* _LINUX_VMALLOC_H */
61379diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
61380index 65efb92..137adbb 100644
61381--- a/include/linux/vmstat.h
61382+++ b/include/linux/vmstat.h
61383@@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
61384 /*
61385 * Zone based page accounting with per cpu differentials.
61386 */
61387-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61388+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61389
61390 static inline void zone_page_state_add(long x, struct zone *zone,
61391 enum zone_stat_item item)
61392 {
61393- atomic_long_add(x, &zone->vm_stat[item]);
61394- atomic_long_add(x, &vm_stat[item]);
61395+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
61396+ atomic_long_add_unchecked(x, &vm_stat[item]);
61397 }
61398
61399 static inline unsigned long global_page_state(enum zone_stat_item item)
61400 {
61401- long x = atomic_long_read(&vm_stat[item]);
61402+ long x = atomic_long_read_unchecked(&vm_stat[item]);
61403 #ifdef CONFIG_SMP
61404 if (x < 0)
61405 x = 0;
61406@@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
61407 static inline unsigned long zone_page_state(struct zone *zone,
61408 enum zone_stat_item item)
61409 {
61410- long x = atomic_long_read(&zone->vm_stat[item]);
61411+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
61412 #ifdef CONFIG_SMP
61413 if (x < 0)
61414 x = 0;
61415@@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
61416 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
61417 enum zone_stat_item item)
61418 {
61419- long x = atomic_long_read(&zone->vm_stat[item]);
61420+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
61421
61422 #ifdef CONFIG_SMP
61423 int cpu;
61424@@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
61425
61426 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
61427 {
61428- atomic_long_inc(&zone->vm_stat[item]);
61429- atomic_long_inc(&vm_stat[item]);
61430+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
61431+ atomic_long_inc_unchecked(&vm_stat[item]);
61432 }
61433
61434 static inline void __inc_zone_page_state(struct page *page,
61435@@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
61436
61437 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
61438 {
61439- atomic_long_dec(&zone->vm_stat[item]);
61440- atomic_long_dec(&vm_stat[item]);
61441+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
61442+ atomic_long_dec_unchecked(&vm_stat[item]);
61443 }
61444
61445 static inline void __dec_zone_page_state(struct page *page,
61446diff --git a/include/linux/xattr.h b/include/linux/xattr.h
61447index e5d1220..ef6e406 100644
61448--- a/include/linux/xattr.h
61449+++ b/include/linux/xattr.h
61450@@ -57,6 +57,11 @@
61451 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
61452 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
61453
61454+/* User namespace */
61455+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
61456+#define XATTR_PAX_FLAGS_SUFFIX "flags"
61457+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
61458+
61459 #ifdef __KERNEL__
61460
61461 #include <linux/types.h>
61462diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
61463index 4aeff96..b378cdc 100644
61464--- a/include/media/saa7146_vv.h
61465+++ b/include/media/saa7146_vv.h
61466@@ -163,7 +163,7 @@ struct saa7146_ext_vv
61467 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
61468
61469 /* the extension can override this */
61470- struct v4l2_ioctl_ops ops;
61471+ v4l2_ioctl_ops_no_const ops;
61472 /* pointer to the saa7146 core ops */
61473 const struct v4l2_ioctl_ops *core_ops;
61474
61475diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
61476index c7c40f1..4f01585 100644
61477--- a/include/media/v4l2-dev.h
61478+++ b/include/media/v4l2-dev.h
61479@@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local);
61480
61481
61482 struct v4l2_file_operations {
61483- struct module *owner;
61484+ struct module * const owner;
61485 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
61486 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
61487 unsigned int (*poll) (struct file *, struct poll_table_struct *);
61488@@ -68,6 +68,7 @@ struct v4l2_file_operations {
61489 int (*open) (struct file *);
61490 int (*release) (struct file *);
61491 };
61492+typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
61493
61494 /*
61495 * Newer version of video_device, handled by videodev2.c
61496diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
61497index 4d1c74a..65e1221 100644
61498--- a/include/media/v4l2-ioctl.h
61499+++ b/include/media/v4l2-ioctl.h
61500@@ -274,7 +274,7 @@ struct v4l2_ioctl_ops {
61501 long (*vidioc_default) (struct file *file, void *fh,
61502 bool valid_prio, int cmd, void *arg);
61503 };
61504-
61505+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
61506
61507 /* v4l debugging and diagnostics */
61508
61509diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
61510index 8d55251..dfe5b0a 100644
61511--- a/include/net/caif/caif_hsi.h
61512+++ b/include/net/caif/caif_hsi.h
61513@@ -98,7 +98,7 @@ struct cfhsi_drv {
61514 void (*rx_done_cb) (struct cfhsi_drv *drv);
61515 void (*wake_up_cb) (struct cfhsi_drv *drv);
61516 void (*wake_down_cb) (struct cfhsi_drv *drv);
61517-};
61518+} __no_const;
61519
61520 /* Structure implemented by HSI device. */
61521 struct cfhsi_dev {
61522diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
61523index 9e5425b..8136ffc 100644
61524--- a/include/net/caif/cfctrl.h
61525+++ b/include/net/caif/cfctrl.h
61526@@ -52,7 +52,7 @@ struct cfctrl_rsp {
61527 void (*radioset_rsp)(void);
61528 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
61529 struct cflayer *client_layer);
61530-};
61531+} __no_const;
61532
61533 /* Link Setup Parameters for CAIF-Links. */
61534 struct cfctrl_link_param {
61535@@ -101,8 +101,8 @@ struct cfctrl_request_info {
61536 struct cfctrl {
61537 struct cfsrvl serv;
61538 struct cfctrl_rsp res;
61539- atomic_t req_seq_no;
61540- atomic_t rsp_seq_no;
61541+ atomic_unchecked_t req_seq_no;
61542+ atomic_unchecked_t rsp_seq_no;
61543 struct list_head list;
61544 /* Protects from simultaneous access to first_req list */
61545 spinlock_t info_list_lock;
61546diff --git a/include/net/flow.h b/include/net/flow.h
61547index 57f15a7..0de26c6 100644
61548--- a/include/net/flow.h
61549+++ b/include/net/flow.h
61550@@ -208,6 +208,6 @@ extern struct flow_cache_object *flow_cache_lookup(
61551
61552 extern void flow_cache_flush(void);
61553 extern void flow_cache_flush_deferred(void);
61554-extern atomic_t flow_cache_genid;
61555+extern atomic_unchecked_t flow_cache_genid;
61556
61557 #endif
61558diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
61559index e9ff3fc..9d3e5c7 100644
61560--- a/include/net/inetpeer.h
61561+++ b/include/net/inetpeer.h
61562@@ -48,8 +48,8 @@ struct inet_peer {
61563 */
61564 union {
61565 struct {
61566- atomic_t rid; /* Frag reception counter */
61567- atomic_t ip_id_count; /* IP ID for the next packet */
61568+ atomic_unchecked_t rid; /* Frag reception counter */
61569+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
61570 __u32 tcp_ts;
61571 __u32 tcp_ts_stamp;
61572 };
61573@@ -113,11 +113,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
61574 more++;
61575 inet_peer_refcheck(p);
61576 do {
61577- old = atomic_read(&p->ip_id_count);
61578+ old = atomic_read_unchecked(&p->ip_id_count);
61579 new = old + more;
61580 if (!new)
61581 new = 1;
61582- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
61583+ } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
61584 return new;
61585 }
61586
61587diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
61588index 10422ef..662570f 100644
61589--- a/include/net/ip_fib.h
61590+++ b/include/net/ip_fib.h
61591@@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
61592
61593 #define FIB_RES_SADDR(net, res) \
61594 ((FIB_RES_NH(res).nh_saddr_genid == \
61595- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
61596+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
61597 FIB_RES_NH(res).nh_saddr : \
61598 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
61599 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
61600diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
61601index e5a7b9a..f4fc44b 100644
61602--- a/include/net/ip_vs.h
61603+++ b/include/net/ip_vs.h
61604@@ -509,7 +509,7 @@ struct ip_vs_conn {
61605 struct ip_vs_conn *control; /* Master control connection */
61606 atomic_t n_control; /* Number of controlled ones */
61607 struct ip_vs_dest *dest; /* real server */
61608- atomic_t in_pkts; /* incoming packet counter */
61609+ atomic_unchecked_t in_pkts; /* incoming packet counter */
61610
61611 /* packet transmitter for different forwarding methods. If it
61612 mangles the packet, it must return NF_DROP or better NF_STOLEN,
61613@@ -647,7 +647,7 @@ struct ip_vs_dest {
61614 __be16 port; /* port number of the server */
61615 union nf_inet_addr addr; /* IP address of the server */
61616 volatile unsigned flags; /* dest status flags */
61617- atomic_t conn_flags; /* flags to copy to conn */
61618+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
61619 atomic_t weight; /* server weight */
61620
61621 atomic_t refcnt; /* reference counter */
61622diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
61623index 69b610a..fe3962c 100644
61624--- a/include/net/irda/ircomm_core.h
61625+++ b/include/net/irda/ircomm_core.h
61626@@ -51,7 +51,7 @@ typedef struct {
61627 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
61628 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
61629 struct ircomm_info *);
61630-} call_t;
61631+} __no_const call_t;
61632
61633 struct ircomm_cb {
61634 irda_queue_t queue;
61635diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
61636index 59ba38bc..d515662 100644
61637--- a/include/net/irda/ircomm_tty.h
61638+++ b/include/net/irda/ircomm_tty.h
61639@@ -35,6 +35,7 @@
61640 #include <linux/termios.h>
61641 #include <linux/timer.h>
61642 #include <linux/tty.h> /* struct tty_struct */
61643+#include <asm/local.h>
61644
61645 #include <net/irda/irias_object.h>
61646 #include <net/irda/ircomm_core.h>
61647@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
61648 unsigned short close_delay;
61649 unsigned short closing_wait; /* time to wait before closing */
61650
61651- int open_count;
61652- int blocked_open; /* # of blocked opens */
61653+ local_t open_count;
61654+ local_t blocked_open; /* # of blocked opens */
61655
61656 /* Protect concurent access to :
61657 * o self->open_count
61658diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
61659index f2419cf..473679f 100644
61660--- a/include/net/iucv/af_iucv.h
61661+++ b/include/net/iucv/af_iucv.h
61662@@ -139,7 +139,7 @@ struct iucv_sock {
61663 struct iucv_sock_list {
61664 struct hlist_head head;
61665 rwlock_t lock;
61666- atomic_t autobind_name;
61667+ atomic_unchecked_t autobind_name;
61668 };
61669
61670 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
61671diff --git a/include/net/neighbour.h b/include/net/neighbour.h
61672index 2720884..3aa5c25 100644
61673--- a/include/net/neighbour.h
61674+++ b/include/net/neighbour.h
61675@@ -122,7 +122,7 @@ struct neigh_ops {
61676 void (*error_report)(struct neighbour *, struct sk_buff *);
61677 int (*output)(struct neighbour *, struct sk_buff *);
61678 int (*connected_output)(struct neighbour *, struct sk_buff *);
61679-};
61680+} __do_const;
61681
61682 struct pneigh_entry {
61683 struct pneigh_entry *next;
61684diff --git a/include/net/netlink.h b/include/net/netlink.h
61685index cb1f350..3279d2c 100644
61686--- a/include/net/netlink.h
61687+++ b/include/net/netlink.h
61688@@ -569,7 +569,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
61689 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
61690 {
61691 if (mark)
61692- skb_trim(skb, (unsigned char *) mark - skb->data);
61693+ skb_trim(skb, (const unsigned char *) mark - skb->data);
61694 }
61695
61696 /**
61697diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
61698index d786b4f..4c3dd41 100644
61699--- a/include/net/netns/ipv4.h
61700+++ b/include/net/netns/ipv4.h
61701@@ -56,8 +56,8 @@ struct netns_ipv4 {
61702
61703 unsigned int sysctl_ping_group_range[2];
61704
61705- atomic_t rt_genid;
61706- atomic_t dev_addr_genid;
61707+ atomic_unchecked_t rt_genid;
61708+ atomic_unchecked_t dev_addr_genid;
61709
61710 #ifdef CONFIG_IP_MROUTE
61711 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
61712diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
61713index 6a72a58..e6a127d 100644
61714--- a/include/net/sctp/sctp.h
61715+++ b/include/net/sctp/sctp.h
61716@@ -318,9 +318,9 @@ do { \
61717
61718 #else /* SCTP_DEBUG */
61719
61720-#define SCTP_DEBUG_PRINTK(whatever...)
61721-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
61722-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
61723+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
61724+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
61725+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
61726 #define SCTP_ENABLE_DEBUG
61727 #define SCTP_DISABLE_DEBUG
61728 #define SCTP_ASSERT(expr, str, func)
61729diff --git a/include/net/sock.h b/include/net/sock.h
61730index 32e3937..87a1dbc 100644
61731--- a/include/net/sock.h
61732+++ b/include/net/sock.h
61733@@ -277,7 +277,7 @@ struct sock {
61734 #ifdef CONFIG_RPS
61735 __u32 sk_rxhash;
61736 #endif
61737- atomic_t sk_drops;
61738+ atomic_unchecked_t sk_drops;
61739 int sk_rcvbuf;
61740
61741 struct sk_filter __rcu *sk_filter;
61742@@ -1402,7 +1402,7 @@ static inline void sk_nocaps_add(struct sock *sk, int flags)
61743 }
61744
61745 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
61746- char __user *from, char *to,
61747+ char __user *from, unsigned char *to,
61748 int copy, int offset)
61749 {
61750 if (skb->ip_summed == CHECKSUM_NONE) {
61751diff --git a/include/net/tcp.h b/include/net/tcp.h
61752index bb18c4d..bb87972 100644
61753--- a/include/net/tcp.h
61754+++ b/include/net/tcp.h
61755@@ -1409,7 +1409,7 @@ struct tcp_seq_afinfo {
61756 char *name;
61757 sa_family_t family;
61758 const struct file_operations *seq_fops;
61759- struct seq_operations seq_ops;
61760+ seq_operations_no_const seq_ops;
61761 };
61762
61763 struct tcp_iter_state {
61764diff --git a/include/net/udp.h b/include/net/udp.h
61765index 3b285f4..0219639 100644
61766--- a/include/net/udp.h
61767+++ b/include/net/udp.h
61768@@ -237,7 +237,7 @@ struct udp_seq_afinfo {
61769 sa_family_t family;
61770 struct udp_table *udp_table;
61771 const struct file_operations *seq_fops;
61772- struct seq_operations seq_ops;
61773+ seq_operations_no_const seq_ops;
61774 };
61775
61776 struct udp_iter_state {
61777diff --git a/include/net/xfrm.h b/include/net/xfrm.h
61778index b203e14..1df3991 100644
61779--- a/include/net/xfrm.h
61780+++ b/include/net/xfrm.h
61781@@ -505,7 +505,7 @@ struct xfrm_policy {
61782 struct timer_list timer;
61783
61784 struct flow_cache_object flo;
61785- atomic_t genid;
61786+ atomic_unchecked_t genid;
61787 u32 priority;
61788 u32 index;
61789 struct xfrm_mark mark;
61790diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
61791index 1a046b1..ee0bef0 100644
61792--- a/include/rdma/iw_cm.h
61793+++ b/include/rdma/iw_cm.h
61794@@ -122,7 +122,7 @@ struct iw_cm_verbs {
61795 int backlog);
61796
61797 int (*destroy_listen)(struct iw_cm_id *cm_id);
61798-};
61799+} __no_const;
61800
61801 /**
61802 * iw_create_cm_id - Create an IW CM identifier.
61803diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
61804index 5d1a758..1dbf795 100644
61805--- a/include/scsi/libfc.h
61806+++ b/include/scsi/libfc.h
61807@@ -748,6 +748,7 @@ struct libfc_function_template {
61808 */
61809 void (*disc_stop_final) (struct fc_lport *);
61810 };
61811+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
61812
61813 /**
61814 * struct fc_disc - Discovery context
61815@@ -851,7 +852,7 @@ struct fc_lport {
61816 struct fc_vport *vport;
61817
61818 /* Operational Information */
61819- struct libfc_function_template tt;
61820+ libfc_function_template_no_const tt;
61821 u8 link_up;
61822 u8 qfull;
61823 enum fc_lport_state state;
61824diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
61825index 5591ed5..13eb457 100644
61826--- a/include/scsi/scsi_device.h
61827+++ b/include/scsi/scsi_device.h
61828@@ -161,9 +161,9 @@ struct scsi_device {
61829 unsigned int max_device_blocked; /* what device_blocked counts down from */
61830 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
61831
61832- atomic_t iorequest_cnt;
61833- atomic_t iodone_cnt;
61834- atomic_t ioerr_cnt;
61835+ atomic_unchecked_t iorequest_cnt;
61836+ atomic_unchecked_t iodone_cnt;
61837+ atomic_unchecked_t ioerr_cnt;
61838
61839 struct device sdev_gendev,
61840 sdev_dev;
61841diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
61842index 2a65167..91e01f8 100644
61843--- a/include/scsi/scsi_transport_fc.h
61844+++ b/include/scsi/scsi_transport_fc.h
61845@@ -711,7 +711,7 @@ struct fc_function_template {
61846 unsigned long show_host_system_hostname:1;
61847
61848 unsigned long disable_target_scan:1;
61849-};
61850+} __do_const;
61851
61852
61853 /**
61854diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
61855index 030b87c..98a6954 100644
61856--- a/include/sound/ak4xxx-adda.h
61857+++ b/include/sound/ak4xxx-adda.h
61858@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
61859 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
61860 unsigned char val);
61861 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
61862-};
61863+} __no_const;
61864
61865 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
61866
61867diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
61868index 8c05e47..2b5df97 100644
61869--- a/include/sound/hwdep.h
61870+++ b/include/sound/hwdep.h
61871@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
61872 struct snd_hwdep_dsp_status *status);
61873 int (*dsp_load)(struct snd_hwdep *hw,
61874 struct snd_hwdep_dsp_image *image);
61875-};
61876+} __no_const;
61877
61878 struct snd_hwdep {
61879 struct snd_card *card;
61880diff --git a/include/sound/info.h b/include/sound/info.h
61881index 5492cc4..1a65278 100644
61882--- a/include/sound/info.h
61883+++ b/include/sound/info.h
61884@@ -44,7 +44,7 @@ struct snd_info_entry_text {
61885 struct snd_info_buffer *buffer);
61886 void (*write)(struct snd_info_entry *entry,
61887 struct snd_info_buffer *buffer);
61888-};
61889+} __no_const;
61890
61891 struct snd_info_entry_ops {
61892 int (*open)(struct snd_info_entry *entry,
61893diff --git a/include/sound/pcm.h b/include/sound/pcm.h
61894index 0cf91b2..b70cae4 100644
61895--- a/include/sound/pcm.h
61896+++ b/include/sound/pcm.h
61897@@ -81,6 +81,7 @@ struct snd_pcm_ops {
61898 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
61899 int (*ack)(struct snd_pcm_substream *substream);
61900 };
61901+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
61902
61903 /*
61904 *
61905diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
61906index af1b49e..a5d55a5 100644
61907--- a/include/sound/sb16_csp.h
61908+++ b/include/sound/sb16_csp.h
61909@@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
61910 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
61911 int (*csp_stop) (struct snd_sb_csp * p);
61912 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
61913-};
61914+} __no_const;
61915
61916 /*
61917 * CSP private data
61918diff --git a/include/sound/soc.h b/include/sound/soc.h
61919index 11cfb59..e3f93f4 100644
61920--- a/include/sound/soc.h
61921+++ b/include/sound/soc.h
61922@@ -683,7 +683,7 @@ struct snd_soc_platform_driver {
61923 /* platform IO - used for platform DAPM */
61924 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
61925 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
61926-};
61927+} __do_const;
61928
61929 struct snd_soc_platform {
61930 const char *name;
61931diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
61932index 444cd6b..3327cc5 100644
61933--- a/include/sound/ymfpci.h
61934+++ b/include/sound/ymfpci.h
61935@@ -358,7 +358,7 @@ struct snd_ymfpci {
61936 spinlock_t reg_lock;
61937 spinlock_t voice_lock;
61938 wait_queue_head_t interrupt_sleep;
61939- atomic_t interrupt_sleep_count;
61940+ atomic_unchecked_t interrupt_sleep_count;
61941 struct snd_info_entry *proc_entry;
61942 const struct firmware *dsp_microcode;
61943 const struct firmware *controller_microcode;
61944diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
61945index a79886c..b483af6 100644
61946--- a/include/target/target_core_base.h
61947+++ b/include/target/target_core_base.h
61948@@ -346,7 +346,7 @@ struct t10_reservation_ops {
61949 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
61950 int (*t10_pr_register)(struct se_cmd *);
61951 int (*t10_pr_clear)(struct se_cmd *);
61952-};
61953+} __no_const;
61954
61955 struct t10_reservation {
61956 /* Reservation effects all target ports */
61957@@ -465,8 +465,8 @@ struct se_cmd {
61958 atomic_t t_se_count;
61959 atomic_t t_task_cdbs_left;
61960 atomic_t t_task_cdbs_ex_left;
61961- atomic_t t_task_cdbs_sent;
61962- atomic_t t_transport_aborted;
61963+ atomic_unchecked_t t_task_cdbs_sent;
61964+ atomic_unchecked_t t_transport_aborted;
61965 atomic_t t_transport_active;
61966 atomic_t t_transport_complete;
61967 atomic_t t_transport_queue_active;
61968@@ -704,7 +704,7 @@ struct se_device {
61969 /* Active commands on this virtual SE device */
61970 atomic_t simple_cmds;
61971 atomic_t depth_left;
61972- atomic_t dev_ordered_id;
61973+ atomic_unchecked_t dev_ordered_id;
61974 atomic_t execute_tasks;
61975 atomic_t dev_ordered_sync;
61976 atomic_t dev_qf_count;
61977diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
61978index 1c09820..7f5ec79 100644
61979--- a/include/trace/events/irq.h
61980+++ b/include/trace/events/irq.h
61981@@ -36,7 +36,7 @@ struct softirq_action;
61982 */
61983 TRACE_EVENT(irq_handler_entry,
61984
61985- TP_PROTO(int irq, struct irqaction *action),
61986+ TP_PROTO(int irq, const struct irqaction *action),
61987
61988 TP_ARGS(irq, action),
61989
61990@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
61991 */
61992 TRACE_EVENT(irq_handler_exit,
61993
61994- TP_PROTO(int irq, struct irqaction *action, int ret),
61995+ TP_PROTO(int irq, const struct irqaction *action, int ret),
61996
61997 TP_ARGS(irq, action, ret),
61998
61999diff --git a/include/video/udlfb.h b/include/video/udlfb.h
62000index c41f308..6918de3 100644
62001--- a/include/video/udlfb.h
62002+++ b/include/video/udlfb.h
62003@@ -52,10 +52,10 @@ struct dlfb_data {
62004 u32 pseudo_palette[256];
62005 int blank_mode; /*one of FB_BLANK_ */
62006 /* blit-only rendering path metrics, exposed through sysfs */
62007- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
62008- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
62009- atomic_t bytes_sent; /* to usb, after compression including overhead */
62010- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
62011+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
62012+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
62013+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
62014+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
62015 };
62016
62017 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
62018diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
62019index 0993a22..32ba2fe 100644
62020--- a/include/video/uvesafb.h
62021+++ b/include/video/uvesafb.h
62022@@ -177,6 +177,7 @@ struct uvesafb_par {
62023 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
62024 u8 pmi_setpal; /* PMI for palette changes */
62025 u16 *pmi_base; /* protected mode interface location */
62026+ u8 *pmi_code; /* protected mode code location */
62027 void *pmi_start;
62028 void *pmi_pal;
62029 u8 *vbe_state_orig; /*
62030diff --git a/init/Kconfig b/init/Kconfig
62031index 43298f9..2f56c12 100644
62032--- a/init/Kconfig
62033+++ b/init/Kconfig
62034@@ -1214,7 +1214,7 @@ config SLUB_DEBUG
62035
62036 config COMPAT_BRK
62037 bool "Disable heap randomization"
62038- default y
62039+ default n
62040 help
62041 Randomizing heap placement makes heap exploits harder, but it
62042 also breaks ancient binaries (including anything libc5 based).
62043diff --git a/init/do_mounts.c b/init/do_mounts.c
62044index db6e5ee..7677ff7 100644
62045--- a/init/do_mounts.c
62046+++ b/init/do_mounts.c
62047@@ -325,11 +325,11 @@ static void __init get_fs_names(char *page)
62048
62049 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
62050 {
62051- int err = sys_mount(name, "/root", fs, flags, data);
62052+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
62053 if (err)
62054 return err;
62055
62056- sys_chdir((const char __user __force *)"/root");
62057+ sys_chdir((const char __force_user*)"/root");
62058 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
62059 printk(KERN_INFO
62060 "VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
62061@@ -448,18 +448,18 @@ void __init change_floppy(char *fmt, ...)
62062 va_start(args, fmt);
62063 vsprintf(buf, fmt, args);
62064 va_end(args);
62065- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
62066+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
62067 if (fd >= 0) {
62068 sys_ioctl(fd, FDEJECT, 0);
62069 sys_close(fd);
62070 }
62071 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
62072- fd = sys_open("/dev/console", O_RDWR, 0);
62073+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
62074 if (fd >= 0) {
62075 sys_ioctl(fd, TCGETS, (long)&termios);
62076 termios.c_lflag &= ~ICANON;
62077 sys_ioctl(fd, TCSETSF, (long)&termios);
62078- sys_read(fd, &c, 1);
62079+ sys_read(fd, (char __user *)&c, 1);
62080 termios.c_lflag |= ICANON;
62081 sys_ioctl(fd, TCSETSF, (long)&termios);
62082 sys_close(fd);
62083@@ -553,6 +553,6 @@ void __init prepare_namespace(void)
62084 mount_root();
62085 out:
62086 devtmpfs_mount("dev");
62087- sys_mount(".", "/", NULL, MS_MOVE, NULL);
62088- sys_chroot((const char __user __force *)".");
62089+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
62090+ sys_chroot((const char __force_user *)".");
62091 }
62092diff --git a/init/do_mounts.h b/init/do_mounts.h
62093index f5b978a..69dbfe8 100644
62094--- a/init/do_mounts.h
62095+++ b/init/do_mounts.h
62096@@ -15,15 +15,15 @@ extern int root_mountflags;
62097
62098 static inline int create_dev(char *name, dev_t dev)
62099 {
62100- sys_unlink(name);
62101- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
62102+ sys_unlink((char __force_user *)name);
62103+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
62104 }
62105
62106 #if BITS_PER_LONG == 32
62107 static inline u32 bstat(char *name)
62108 {
62109 struct stat64 stat;
62110- if (sys_stat64(name, &stat) != 0)
62111+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
62112 return 0;
62113 if (!S_ISBLK(stat.st_mode))
62114 return 0;
62115@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
62116 static inline u32 bstat(char *name)
62117 {
62118 struct stat stat;
62119- if (sys_newstat(name, &stat) != 0)
62120+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
62121 return 0;
62122 if (!S_ISBLK(stat.st_mode))
62123 return 0;
62124diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
62125index 3098a38..253064e 100644
62126--- a/init/do_mounts_initrd.c
62127+++ b/init/do_mounts_initrd.c
62128@@ -44,13 +44,13 @@ static void __init handle_initrd(void)
62129 create_dev("/dev/root.old", Root_RAM0);
62130 /* mount initrd on rootfs' /root */
62131 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
62132- sys_mkdir("/old", 0700);
62133- root_fd = sys_open("/", 0, 0);
62134- old_fd = sys_open("/old", 0, 0);
62135+ sys_mkdir((const char __force_user *)"/old", 0700);
62136+ root_fd = sys_open((const char __force_user *)"/", 0, 0);
62137+ old_fd = sys_open((const char __force_user *)"/old", 0, 0);
62138 /* move initrd over / and chdir/chroot in initrd root */
62139- sys_chdir("/root");
62140- sys_mount(".", "/", NULL, MS_MOVE, NULL);
62141- sys_chroot(".");
62142+ sys_chdir((const char __force_user *)"/root");
62143+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
62144+ sys_chroot((const char __force_user *)".");
62145
62146 /*
62147 * In case that a resume from disk is carried out by linuxrc or one of
62148@@ -67,15 +67,15 @@ static void __init handle_initrd(void)
62149
62150 /* move initrd to rootfs' /old */
62151 sys_fchdir(old_fd);
62152- sys_mount("/", ".", NULL, MS_MOVE, NULL);
62153+ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
62154 /* switch root and cwd back to / of rootfs */
62155 sys_fchdir(root_fd);
62156- sys_chroot(".");
62157+ sys_chroot((const char __force_user *)".");
62158 sys_close(old_fd);
62159 sys_close(root_fd);
62160
62161 if (new_decode_dev(real_root_dev) == Root_RAM0) {
62162- sys_chdir("/old");
62163+ sys_chdir((const char __force_user *)"/old");
62164 return;
62165 }
62166
62167@@ -83,17 +83,17 @@ static void __init handle_initrd(void)
62168 mount_root();
62169
62170 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
62171- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
62172+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
62173 if (!error)
62174 printk("okay\n");
62175 else {
62176- int fd = sys_open("/dev/root.old", O_RDWR, 0);
62177+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
62178 if (error == -ENOENT)
62179 printk("/initrd does not exist. Ignored.\n");
62180 else
62181 printk("failed\n");
62182 printk(KERN_NOTICE "Unmounting old root\n");
62183- sys_umount("/old", MNT_DETACH);
62184+ sys_umount((char __force_user *)"/old", MNT_DETACH);
62185 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
62186 if (fd < 0) {
62187 error = fd;
62188@@ -116,11 +116,11 @@ int __init initrd_load(void)
62189 * mounted in the normal path.
62190 */
62191 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
62192- sys_unlink("/initrd.image");
62193+ sys_unlink((const char __force_user *)"/initrd.image");
62194 handle_initrd();
62195 return 1;
62196 }
62197 }
62198- sys_unlink("/initrd.image");
62199+ sys_unlink((const char __force_user *)"/initrd.image");
62200 return 0;
62201 }
62202diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
62203index 32c4799..c27ee74 100644
62204--- a/init/do_mounts_md.c
62205+++ b/init/do_mounts_md.c
62206@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
62207 partitioned ? "_d" : "", minor,
62208 md_setup_args[ent].device_names);
62209
62210- fd = sys_open(name, 0, 0);
62211+ fd = sys_open((char __force_user *)name, 0, 0);
62212 if (fd < 0) {
62213 printk(KERN_ERR "md: open failed - cannot start "
62214 "array %s\n", name);
62215@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
62216 * array without it
62217 */
62218 sys_close(fd);
62219- fd = sys_open(name, 0, 0);
62220+ fd = sys_open((char __force_user *)name, 0, 0);
62221 sys_ioctl(fd, BLKRRPART, 0);
62222 }
62223 sys_close(fd);
62224@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
62225
62226 wait_for_device_probe();
62227
62228- fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
62229+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
62230 if (fd >= 0) {
62231 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
62232 sys_close(fd);
62233diff --git a/init/initramfs.c b/init/initramfs.c
62234index 2531811..040d4d4 100644
62235--- a/init/initramfs.c
62236+++ b/init/initramfs.c
62237@@ -74,7 +74,7 @@ static void __init free_hash(void)
62238 }
62239 }
62240
62241-static long __init do_utime(char __user *filename, time_t mtime)
62242+static long __init do_utime(__force char __user *filename, time_t mtime)
62243 {
62244 struct timespec t[2];
62245
62246@@ -109,7 +109,7 @@ static void __init dir_utime(void)
62247 struct dir_entry *de, *tmp;
62248 list_for_each_entry_safe(de, tmp, &dir_list, list) {
62249 list_del(&de->list);
62250- do_utime(de->name, de->mtime);
62251+ do_utime((char __force_user *)de->name, de->mtime);
62252 kfree(de->name);
62253 kfree(de);
62254 }
62255@@ -271,7 +271,7 @@ static int __init maybe_link(void)
62256 if (nlink >= 2) {
62257 char *old = find_link(major, minor, ino, mode, collected);
62258 if (old)
62259- return (sys_link(old, collected) < 0) ? -1 : 1;
62260+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
62261 }
62262 return 0;
62263 }
62264@@ -280,11 +280,11 @@ static void __init clean_path(char *path, mode_t mode)
62265 {
62266 struct stat st;
62267
62268- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
62269+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
62270 if (S_ISDIR(st.st_mode))
62271- sys_rmdir(path);
62272+ sys_rmdir((char __force_user *)path);
62273 else
62274- sys_unlink(path);
62275+ sys_unlink((char __force_user *)path);
62276 }
62277 }
62278
62279@@ -305,7 +305,7 @@ static int __init do_name(void)
62280 int openflags = O_WRONLY|O_CREAT;
62281 if (ml != 1)
62282 openflags |= O_TRUNC;
62283- wfd = sys_open(collected, openflags, mode);
62284+ wfd = sys_open((char __force_user *)collected, openflags, mode);
62285
62286 if (wfd >= 0) {
62287 sys_fchown(wfd, uid, gid);
62288@@ -317,17 +317,17 @@ static int __init do_name(void)
62289 }
62290 }
62291 } else if (S_ISDIR(mode)) {
62292- sys_mkdir(collected, mode);
62293- sys_chown(collected, uid, gid);
62294- sys_chmod(collected, mode);
62295+ sys_mkdir((char __force_user *)collected, mode);
62296+ sys_chown((char __force_user *)collected, uid, gid);
62297+ sys_chmod((char __force_user *)collected, mode);
62298 dir_add(collected, mtime);
62299 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
62300 S_ISFIFO(mode) || S_ISSOCK(mode)) {
62301 if (maybe_link() == 0) {
62302- sys_mknod(collected, mode, rdev);
62303- sys_chown(collected, uid, gid);
62304- sys_chmod(collected, mode);
62305- do_utime(collected, mtime);
62306+ sys_mknod((char __force_user *)collected, mode, rdev);
62307+ sys_chown((char __force_user *)collected, uid, gid);
62308+ sys_chmod((char __force_user *)collected, mode);
62309+ do_utime((char __force_user *)collected, mtime);
62310 }
62311 }
62312 return 0;
62313@@ -336,15 +336,15 @@ static int __init do_name(void)
62314 static int __init do_copy(void)
62315 {
62316 if (count >= body_len) {
62317- sys_write(wfd, victim, body_len);
62318+ sys_write(wfd, (char __force_user *)victim, body_len);
62319 sys_close(wfd);
62320- do_utime(vcollected, mtime);
62321+ do_utime((char __force_user *)vcollected, mtime);
62322 kfree(vcollected);
62323 eat(body_len);
62324 state = SkipIt;
62325 return 0;
62326 } else {
62327- sys_write(wfd, victim, count);
62328+ sys_write(wfd, (char __force_user *)victim, count);
62329 body_len -= count;
62330 eat(count);
62331 return 1;
62332@@ -355,9 +355,9 @@ static int __init do_symlink(void)
62333 {
62334 collected[N_ALIGN(name_len) + body_len] = '\0';
62335 clean_path(collected, 0);
62336- sys_symlink(collected + N_ALIGN(name_len), collected);
62337- sys_lchown(collected, uid, gid);
62338- do_utime(collected, mtime);
62339+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
62340+ sys_lchown((char __force_user *)collected, uid, gid);
62341+ do_utime((char __force_user *)collected, mtime);
62342 state = SkipIt;
62343 next_state = Reset;
62344 return 0;
62345diff --git a/init/main.c b/init/main.c
62346index 217ed23..32e5731 100644
62347--- a/init/main.c
62348+++ b/init/main.c
62349@@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { }
62350 extern void tc_init(void);
62351 #endif
62352
62353+extern void grsecurity_init(void);
62354+
62355 /*
62356 * Debug helper: via this flag we know that we are in 'early bootup code'
62357 * where only the boot processor is running with IRQ disabled. This means
62358@@ -149,6 +151,49 @@ static int __init set_reset_devices(char *str)
62359
62360 __setup("reset_devices", set_reset_devices);
62361
62362+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
62363+extern char pax_enter_kernel_user[];
62364+extern char pax_exit_kernel_user[];
62365+extern pgdval_t clone_pgd_mask;
62366+#endif
62367+
62368+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
62369+static int __init setup_pax_nouderef(char *str)
62370+{
62371+#ifdef CONFIG_X86_32
62372+ unsigned int cpu;
62373+ struct desc_struct *gdt;
62374+
62375+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
62376+ gdt = get_cpu_gdt_table(cpu);
62377+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
62378+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
62379+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
62380+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
62381+ }
62382+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
62383+#else
62384+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
62385+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
62386+ clone_pgd_mask = ~(pgdval_t)0UL;
62387+#endif
62388+
62389+ return 0;
62390+}
62391+early_param("pax_nouderef", setup_pax_nouderef);
62392+#endif
62393+
62394+#ifdef CONFIG_PAX_SOFTMODE
62395+int pax_softmode;
62396+
62397+static int __init setup_pax_softmode(char *str)
62398+{
62399+ get_option(&str, &pax_softmode);
62400+ return 1;
62401+}
62402+__setup("pax_softmode=", setup_pax_softmode);
62403+#endif
62404+
62405 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
62406 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
62407 static const char *panic_later, *panic_param;
62408@@ -681,6 +726,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
62409 {
62410 int count = preempt_count();
62411 int ret;
62412+ const char *msg1 = "", *msg2 = "";
62413
62414 if (initcall_debug)
62415 ret = do_one_initcall_debug(fn);
62416@@ -693,15 +739,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
62417 sprintf(msgbuf, "error code %d ", ret);
62418
62419 if (preempt_count() != count) {
62420- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
62421+ msg1 = " preemption imbalance";
62422 preempt_count() = count;
62423 }
62424 if (irqs_disabled()) {
62425- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
62426+ msg2 = " disabled interrupts";
62427 local_irq_enable();
62428 }
62429- if (msgbuf[0]) {
62430- printk("initcall %pF returned with %s\n", fn, msgbuf);
62431+ if (msgbuf[0] || *msg1 || *msg2) {
62432+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
62433 }
62434
62435 return ret;
62436@@ -820,7 +866,7 @@ static int __init kernel_init(void * unused)
62437 do_basic_setup();
62438
62439 /* Open the /dev/console on the rootfs, this should never fail */
62440- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
62441+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
62442 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
62443
62444 (void) sys_dup(0);
62445@@ -833,11 +879,13 @@ static int __init kernel_init(void * unused)
62446 if (!ramdisk_execute_command)
62447 ramdisk_execute_command = "/init";
62448
62449- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
62450+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
62451 ramdisk_execute_command = NULL;
62452 prepare_namespace();
62453 }
62454
62455+ grsecurity_init();
62456+
62457 /*
62458 * Ok, we have completed the initial bootup, and
62459 * we're essentially up and running. Get rid of the
62460diff --git a/ipc/mqueue.c b/ipc/mqueue.c
62461index 5b4293d..f179875 100644
62462--- a/ipc/mqueue.c
62463+++ b/ipc/mqueue.c
62464@@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
62465 mq_bytes = (mq_msg_tblsz +
62466 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
62467
62468+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
62469 spin_lock(&mq_lock);
62470 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
62471 u->mq_bytes + mq_bytes > task_rlimit(p, RLIMIT_MSGQUEUE)) {
62472diff --git a/ipc/msg.c b/ipc/msg.c
62473index 7385de2..a8180e0 100644
62474--- a/ipc/msg.c
62475+++ b/ipc/msg.c
62476@@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
62477 return security_msg_queue_associate(msq, msgflg);
62478 }
62479
62480+static struct ipc_ops msg_ops = {
62481+ .getnew = newque,
62482+ .associate = msg_security,
62483+ .more_checks = NULL
62484+};
62485+
62486 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
62487 {
62488 struct ipc_namespace *ns;
62489- struct ipc_ops msg_ops;
62490 struct ipc_params msg_params;
62491
62492 ns = current->nsproxy->ipc_ns;
62493
62494- msg_ops.getnew = newque;
62495- msg_ops.associate = msg_security;
62496- msg_ops.more_checks = NULL;
62497-
62498 msg_params.key = key;
62499 msg_params.flg = msgflg;
62500
62501diff --git a/ipc/sem.c b/ipc/sem.c
62502index 5215a81..cfc0cac 100644
62503--- a/ipc/sem.c
62504+++ b/ipc/sem.c
62505@@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
62506 return 0;
62507 }
62508
62509+static struct ipc_ops sem_ops = {
62510+ .getnew = newary,
62511+ .associate = sem_security,
62512+ .more_checks = sem_more_checks
62513+};
62514+
62515 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
62516 {
62517 struct ipc_namespace *ns;
62518- struct ipc_ops sem_ops;
62519 struct ipc_params sem_params;
62520
62521 ns = current->nsproxy->ipc_ns;
62522@@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
62523 if (nsems < 0 || nsems > ns->sc_semmsl)
62524 return -EINVAL;
62525
62526- sem_ops.getnew = newary;
62527- sem_ops.associate = sem_security;
62528- sem_ops.more_checks = sem_more_checks;
62529-
62530 sem_params.key = key;
62531 sem_params.flg = semflg;
62532 sem_params.u.nsems = nsems;
62533diff --git a/ipc/shm.c b/ipc/shm.c
62534index b76be5b..859e750 100644
62535--- a/ipc/shm.c
62536+++ b/ipc/shm.c
62537@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
62538 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
62539 #endif
62540
62541+#ifdef CONFIG_GRKERNSEC
62542+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62543+ const time_t shm_createtime, const uid_t cuid,
62544+ const int shmid);
62545+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62546+ const time_t shm_createtime);
62547+#endif
62548+
62549 void shm_init_ns(struct ipc_namespace *ns)
62550 {
62551 ns->shm_ctlmax = SHMMAX;
62552@@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
62553 shp->shm_lprid = 0;
62554 shp->shm_atim = shp->shm_dtim = 0;
62555 shp->shm_ctim = get_seconds();
62556+#ifdef CONFIG_GRKERNSEC
62557+ {
62558+ struct timespec timeval;
62559+ do_posix_clock_monotonic_gettime(&timeval);
62560+
62561+ shp->shm_createtime = timeval.tv_sec;
62562+ }
62563+#endif
62564 shp->shm_segsz = size;
62565 shp->shm_nattch = 0;
62566 shp->shm_file = file;
62567@@ -559,18 +575,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
62568 return 0;
62569 }
62570
62571+static struct ipc_ops shm_ops = {
62572+ .getnew = newseg,
62573+ .associate = shm_security,
62574+ .more_checks = shm_more_checks
62575+};
62576+
62577 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
62578 {
62579 struct ipc_namespace *ns;
62580- struct ipc_ops shm_ops;
62581 struct ipc_params shm_params;
62582
62583 ns = current->nsproxy->ipc_ns;
62584
62585- shm_ops.getnew = newseg;
62586- shm_ops.associate = shm_security;
62587- shm_ops.more_checks = shm_more_checks;
62588-
62589 shm_params.key = key;
62590 shm_params.flg = shmflg;
62591 shm_params.u.size = size;
62592@@ -988,6 +1005,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
62593 f_mode = FMODE_READ | FMODE_WRITE;
62594 }
62595 if (shmflg & SHM_EXEC) {
62596+
62597+#ifdef CONFIG_PAX_MPROTECT
62598+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
62599+ goto out;
62600+#endif
62601+
62602 prot |= PROT_EXEC;
62603 acc_mode |= S_IXUGO;
62604 }
62605@@ -1011,9 +1034,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
62606 if (err)
62607 goto out_unlock;
62608
62609+#ifdef CONFIG_GRKERNSEC
62610+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
62611+ shp->shm_perm.cuid, shmid) ||
62612+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
62613+ err = -EACCES;
62614+ goto out_unlock;
62615+ }
62616+#endif
62617+
62618 path = shp->shm_file->f_path;
62619 path_get(&path);
62620 shp->shm_nattch++;
62621+#ifdef CONFIG_GRKERNSEC
62622+ shp->shm_lapid = current->pid;
62623+#endif
62624 size = i_size_read(path.dentry->d_inode);
62625 shm_unlock(shp);
62626
62627diff --git a/kernel/acct.c b/kernel/acct.c
62628index fa7eb3d..7faf116 100644
62629--- a/kernel/acct.c
62630+++ b/kernel/acct.c
62631@@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
62632 */
62633 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
62634 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
62635- file->f_op->write(file, (char *)&ac,
62636+ file->f_op->write(file, (char __force_user *)&ac,
62637 sizeof(acct_t), &file->f_pos);
62638 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
62639 set_fs(fs);
62640diff --git a/kernel/audit.c b/kernel/audit.c
62641index 09fae26..ed71d5b 100644
62642--- a/kernel/audit.c
62643+++ b/kernel/audit.c
62644@@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
62645 3) suppressed due to audit_rate_limit
62646 4) suppressed due to audit_backlog_limit
62647 */
62648-static atomic_t audit_lost = ATOMIC_INIT(0);
62649+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
62650
62651 /* The netlink socket. */
62652 static struct sock *audit_sock;
62653@@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
62654 unsigned long now;
62655 int print;
62656
62657- atomic_inc(&audit_lost);
62658+ atomic_inc_unchecked(&audit_lost);
62659
62660 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
62661
62662@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
62663 printk(KERN_WARNING
62664 "audit: audit_lost=%d audit_rate_limit=%d "
62665 "audit_backlog_limit=%d\n",
62666- atomic_read(&audit_lost),
62667+ atomic_read_unchecked(&audit_lost),
62668 audit_rate_limit,
62669 audit_backlog_limit);
62670 audit_panic(message);
62671@@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
62672 status_set.pid = audit_pid;
62673 status_set.rate_limit = audit_rate_limit;
62674 status_set.backlog_limit = audit_backlog_limit;
62675- status_set.lost = atomic_read(&audit_lost);
62676+ status_set.lost = atomic_read_unchecked(&audit_lost);
62677 status_set.backlog = skb_queue_len(&audit_skb_queue);
62678 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
62679 &status_set, sizeof(status_set));
62680@@ -1260,12 +1260,13 @@ static void audit_log_vformat(struct audit_buffer *ab, const char *fmt,
62681 avail = audit_expand(ab,
62682 max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail));
62683 if (!avail)
62684- goto out;
62685+ goto out_va_end;
62686 len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2);
62687 }
62688- va_end(args2);
62689 if (len > 0)
62690 skb_put(skb, len);
62691+out_va_end:
62692+ va_end(args2);
62693 out:
62694 return;
62695 }
62696diff --git a/kernel/auditsc.c b/kernel/auditsc.c
62697index 47b7fc1..c003c33 100644
62698--- a/kernel/auditsc.c
62699+++ b/kernel/auditsc.c
62700@@ -1166,8 +1166,8 @@ static void audit_log_execve_info(struct audit_context *context,
62701 struct audit_buffer **ab,
62702 struct audit_aux_data_execve *axi)
62703 {
62704- int i;
62705- size_t len, len_sent = 0;
62706+ int i, len;
62707+ size_t len_sent = 0;
62708 const char __user *p;
62709 char *buf;
62710
62711@@ -2118,7 +2118,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
62712 }
62713
62714 /* global counter which is incremented every time something logs in */
62715-static atomic_t session_id = ATOMIC_INIT(0);
62716+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
62717
62718 /**
62719 * audit_set_loginuid - set a task's audit_context loginuid
62720@@ -2131,7 +2131,7 @@ static atomic_t session_id = ATOMIC_INIT(0);
62721 */
62722 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
62723 {
62724- unsigned int sessionid = atomic_inc_return(&session_id);
62725+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
62726 struct audit_context *context = task->audit_context;
62727
62728 if (context && context->in_syscall) {
62729diff --git a/kernel/capability.c b/kernel/capability.c
62730index b463871..fa3ea1f 100644
62731--- a/kernel/capability.c
62732+++ b/kernel/capability.c
62733@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
62734 * before modification is attempted and the application
62735 * fails.
62736 */
62737+ if (tocopy > ARRAY_SIZE(kdata))
62738+ return -EFAULT;
62739+
62740 if (copy_to_user(dataptr, kdata, tocopy
62741 * sizeof(struct __user_cap_data_struct))) {
62742 return -EFAULT;
62743@@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
62744 BUG();
62745 }
62746
62747- if (security_capable(ns, current_cred(), cap) == 0) {
62748+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
62749 current->flags |= PF_SUPERPRIV;
62750 return true;
62751 }
62752@@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *ns, int cap)
62753 }
62754 EXPORT_SYMBOL(ns_capable);
62755
62756+bool ns_capable_nolog(struct user_namespace *ns, int cap)
62757+{
62758+ if (unlikely(!cap_valid(cap))) {
62759+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
62760+ BUG();
62761+ }
62762+
62763+ if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
62764+ current->flags |= PF_SUPERPRIV;
62765+ return true;
62766+ }
62767+ return false;
62768+}
62769+EXPORT_SYMBOL(ns_capable_nolog);
62770+
62771+bool capable_nolog(int cap)
62772+{
62773+ return ns_capable_nolog(&init_user_ns, cap);
62774+}
62775+EXPORT_SYMBOL(capable_nolog);
62776+
62777 /**
62778 * task_ns_capable - Determine whether current task has a superior
62779 * capability targeted at a specific task's user namespace.
62780@@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct *t, int cap)
62781 }
62782 EXPORT_SYMBOL(task_ns_capable);
62783
62784+bool task_ns_capable_nolog(struct task_struct *t, int cap)
62785+{
62786+ return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
62787+}
62788+EXPORT_SYMBOL(task_ns_capable_nolog);
62789+
62790 /**
62791 * nsown_capable - Check superior capability to one's own user_ns
62792 * @cap: The capability in question
62793diff --git a/kernel/compat.c b/kernel/compat.c
62794index f346ced..aa2b1f4 100644
62795--- a/kernel/compat.c
62796+++ b/kernel/compat.c
62797@@ -13,6 +13,7 @@
62798
62799 #include <linux/linkage.h>
62800 #include <linux/compat.h>
62801+#include <linux/module.h>
62802 #include <linux/errno.h>
62803 #include <linux/time.h>
62804 #include <linux/signal.h>
62805@@ -168,7 +169,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
62806 mm_segment_t oldfs;
62807 long ret;
62808
62809- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
62810+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
62811 oldfs = get_fs();
62812 set_fs(KERNEL_DS);
62813 ret = hrtimer_nanosleep_restart(restart);
62814@@ -200,7 +201,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
62815 oldfs = get_fs();
62816 set_fs(KERNEL_DS);
62817 ret = hrtimer_nanosleep(&tu,
62818- rmtp ? (struct timespec __user *)&rmt : NULL,
62819+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
62820 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
62821 set_fs(oldfs);
62822
62823@@ -309,7 +310,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
62824 mm_segment_t old_fs = get_fs();
62825
62826 set_fs(KERNEL_DS);
62827- ret = sys_sigpending((old_sigset_t __user *) &s);
62828+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
62829 set_fs(old_fs);
62830 if (ret == 0)
62831 ret = put_user(s, set);
62832@@ -332,8 +333,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
62833 old_fs = get_fs();
62834 set_fs(KERNEL_DS);
62835 ret = sys_sigprocmask(how,
62836- set ? (old_sigset_t __user *) &s : NULL,
62837- oset ? (old_sigset_t __user *) &s : NULL);
62838+ set ? (old_sigset_t __force_user *) &s : NULL,
62839+ oset ? (old_sigset_t __force_user *) &s : NULL);
62840 set_fs(old_fs);
62841 if (ret == 0)
62842 if (oset)
62843@@ -370,7 +371,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
62844 mm_segment_t old_fs = get_fs();
62845
62846 set_fs(KERNEL_DS);
62847- ret = sys_old_getrlimit(resource, &r);
62848+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
62849 set_fs(old_fs);
62850
62851 if (!ret) {
62852@@ -442,7 +443,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
62853 mm_segment_t old_fs = get_fs();
62854
62855 set_fs(KERNEL_DS);
62856- ret = sys_getrusage(who, (struct rusage __user *) &r);
62857+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
62858 set_fs(old_fs);
62859
62860 if (ret)
62861@@ -469,8 +470,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
62862 set_fs (KERNEL_DS);
62863 ret = sys_wait4(pid,
62864 (stat_addr ?
62865- (unsigned int __user *) &status : NULL),
62866- options, (struct rusage __user *) &r);
62867+ (unsigned int __force_user *) &status : NULL),
62868+ options, (struct rusage __force_user *) &r);
62869 set_fs (old_fs);
62870
62871 if (ret > 0) {
62872@@ -495,8 +496,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
62873 memset(&info, 0, sizeof(info));
62874
62875 set_fs(KERNEL_DS);
62876- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
62877- uru ? (struct rusage __user *)&ru : NULL);
62878+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
62879+ uru ? (struct rusage __force_user *)&ru : NULL);
62880 set_fs(old_fs);
62881
62882 if ((ret < 0) || (info.si_signo == 0))
62883@@ -626,8 +627,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
62884 oldfs = get_fs();
62885 set_fs(KERNEL_DS);
62886 err = sys_timer_settime(timer_id, flags,
62887- (struct itimerspec __user *) &newts,
62888- (struct itimerspec __user *) &oldts);
62889+ (struct itimerspec __force_user *) &newts,
62890+ (struct itimerspec __force_user *) &oldts);
62891 set_fs(oldfs);
62892 if (!err && old && put_compat_itimerspec(old, &oldts))
62893 return -EFAULT;
62894@@ -644,7 +645,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
62895 oldfs = get_fs();
62896 set_fs(KERNEL_DS);
62897 err = sys_timer_gettime(timer_id,
62898- (struct itimerspec __user *) &ts);
62899+ (struct itimerspec __force_user *) &ts);
62900 set_fs(oldfs);
62901 if (!err && put_compat_itimerspec(setting, &ts))
62902 return -EFAULT;
62903@@ -663,7 +664,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
62904 oldfs = get_fs();
62905 set_fs(KERNEL_DS);
62906 err = sys_clock_settime(which_clock,
62907- (struct timespec __user *) &ts);
62908+ (struct timespec __force_user *) &ts);
62909 set_fs(oldfs);
62910 return err;
62911 }
62912@@ -678,7 +679,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
62913 oldfs = get_fs();
62914 set_fs(KERNEL_DS);
62915 err = sys_clock_gettime(which_clock,
62916- (struct timespec __user *) &ts);
62917+ (struct timespec __force_user *) &ts);
62918 set_fs(oldfs);
62919 if (!err && put_compat_timespec(&ts, tp))
62920 return -EFAULT;
62921@@ -698,7 +699,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
62922
62923 oldfs = get_fs();
62924 set_fs(KERNEL_DS);
62925- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
62926+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
62927 set_fs(oldfs);
62928
62929 err = compat_put_timex(utp, &txc);
62930@@ -718,7 +719,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
62931 oldfs = get_fs();
62932 set_fs(KERNEL_DS);
62933 err = sys_clock_getres(which_clock,
62934- (struct timespec __user *) &ts);
62935+ (struct timespec __force_user *) &ts);
62936 set_fs(oldfs);
62937 if (!err && tp && put_compat_timespec(&ts, tp))
62938 return -EFAULT;
62939@@ -730,9 +731,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
62940 long err;
62941 mm_segment_t oldfs;
62942 struct timespec tu;
62943- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
62944+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
62945
62946- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
62947+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
62948 oldfs = get_fs();
62949 set_fs(KERNEL_DS);
62950 err = clock_nanosleep_restart(restart);
62951@@ -764,8 +765,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
62952 oldfs = get_fs();
62953 set_fs(KERNEL_DS);
62954 err = sys_clock_nanosleep(which_clock, flags,
62955- (struct timespec __user *) &in,
62956- (struct timespec __user *) &out);
62957+ (struct timespec __force_user *) &in,
62958+ (struct timespec __force_user *) &out);
62959 set_fs(oldfs);
62960
62961 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
62962diff --git a/kernel/configs.c b/kernel/configs.c
62963index 42e8fa0..9e7406b 100644
62964--- a/kernel/configs.c
62965+++ b/kernel/configs.c
62966@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
62967 struct proc_dir_entry *entry;
62968
62969 /* create the current config file */
62970+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
62971+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
62972+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
62973+ &ikconfig_file_ops);
62974+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62975+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
62976+ &ikconfig_file_ops);
62977+#endif
62978+#else
62979 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
62980 &ikconfig_file_ops);
62981+#endif
62982+
62983 if (!entry)
62984 return -ENOMEM;
62985
62986diff --git a/kernel/cred.c b/kernel/cred.c
62987index 5791612..a3c04dc 100644
62988--- a/kernel/cred.c
62989+++ b/kernel/cred.c
62990@@ -204,6 +204,15 @@ void exit_creds(struct task_struct *tsk)
62991 validate_creds(cred);
62992 put_cred(cred);
62993 }
62994+
62995+#ifdef CONFIG_GRKERNSEC_SETXID
62996+ cred = (struct cred *) tsk->delayed_cred;
62997+ if (cred) {
62998+ tsk->delayed_cred = NULL;
62999+ validate_creds(cred);
63000+ put_cred(cred);
63001+ }
63002+#endif
63003 }
63004
63005 /**
63006@@ -470,7 +479,7 @@ error_put:
63007 * Always returns 0 thus allowing this function to be tail-called at the end
63008 * of, say, sys_setgid().
63009 */
63010-int commit_creds(struct cred *new)
63011+static int __commit_creds(struct cred *new)
63012 {
63013 struct task_struct *task = current;
63014 const struct cred *old = task->real_cred;
63015@@ -489,6 +498,8 @@ int commit_creds(struct cred *new)
63016
63017 get_cred(new); /* we will require a ref for the subj creds too */
63018
63019+ gr_set_role_label(task, new->uid, new->gid);
63020+
63021 /* dumpability changes */
63022 if (old->euid != new->euid ||
63023 old->egid != new->egid ||
63024@@ -538,6 +549,92 @@ int commit_creds(struct cred *new)
63025 put_cred(old);
63026 return 0;
63027 }
63028+#ifdef CONFIG_GRKERNSEC_SETXID
63029+extern int set_user(struct cred *new);
63030+
63031+void gr_delayed_cred_worker(void)
63032+{
63033+ const struct cred *new = current->delayed_cred;
63034+ struct cred *ncred;
63035+
63036+ current->delayed_cred = NULL;
63037+
63038+ if (current_uid() && new != NULL) {
63039+ // from doing get_cred on it when queueing this
63040+ put_cred(new);
63041+ return;
63042+ } else if (new == NULL)
63043+ return;
63044+
63045+ ncred = prepare_creds();
63046+ if (!ncred)
63047+ goto die;
63048+ // uids
63049+ ncred->uid = new->uid;
63050+ ncred->euid = new->euid;
63051+ ncred->suid = new->suid;
63052+ ncred->fsuid = new->fsuid;
63053+ // gids
63054+ ncred->gid = new->gid;
63055+ ncred->egid = new->egid;
63056+ ncred->sgid = new->sgid;
63057+ ncred->fsgid = new->fsgid;
63058+ // groups
63059+ if (set_groups(ncred, new->group_info) < 0) {
63060+ abort_creds(ncred);
63061+ goto die;
63062+ }
63063+ // caps
63064+ ncred->securebits = new->securebits;
63065+ ncred->cap_inheritable = new->cap_inheritable;
63066+ ncred->cap_permitted = new->cap_permitted;
63067+ ncred->cap_effective = new->cap_effective;
63068+ ncred->cap_bset = new->cap_bset;
63069+
63070+ if (set_user(ncred)) {
63071+ abort_creds(ncred);
63072+ goto die;
63073+ }
63074+
63075+ // from doing get_cred on it when queueing this
63076+ put_cred(new);
63077+
63078+ __commit_creds(ncred);
63079+ return;
63080+die:
63081+ // from doing get_cred on it when queueing this
63082+ put_cred(new);
63083+ do_group_exit(SIGKILL);
63084+}
63085+#endif
63086+
63087+int commit_creds(struct cred *new)
63088+{
63089+#ifdef CONFIG_GRKERNSEC_SETXID
63090+ struct task_struct *t;
63091+
63092+ /* we won't get called with tasklist_lock held for writing
63093+ and interrupts disabled as the cred struct in that case is
63094+ init_cred
63095+ */
63096+ if (grsec_enable_setxid && !current_is_single_threaded() &&
63097+ !current_uid() && new->uid) {
63098+ rcu_read_lock();
63099+ read_lock(&tasklist_lock);
63100+ for (t = next_thread(current); t != current;
63101+ t = next_thread(t)) {
63102+ if (t->delayed_cred == NULL) {
63103+ t->delayed_cred = get_cred(new);
63104+ set_tsk_need_resched(t);
63105+ }
63106+ }
63107+ read_unlock(&tasklist_lock);
63108+ rcu_read_unlock();
63109+ }
63110+#endif
63111+ return __commit_creds(new);
63112+}
63113+
63114 EXPORT_SYMBOL(commit_creds);
63115
63116 /**
63117diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
63118index 0d7c087..01b8cef 100644
63119--- a/kernel/debug/debug_core.c
63120+++ b/kernel/debug/debug_core.c
63121@@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
63122 */
63123 static atomic_t masters_in_kgdb;
63124 static atomic_t slaves_in_kgdb;
63125-static atomic_t kgdb_break_tasklet_var;
63126+static atomic_unchecked_t kgdb_break_tasklet_var;
63127 atomic_t kgdb_setting_breakpoint;
63128
63129 struct task_struct *kgdb_usethread;
63130@@ -129,7 +129,7 @@ int kgdb_single_step;
63131 static pid_t kgdb_sstep_pid;
63132
63133 /* to keep track of the CPU which is doing the single stepping*/
63134-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
63135+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
63136
63137 /*
63138 * If you are debugging a problem where roundup (the collection of
63139@@ -542,7 +542,7 @@ return_normal:
63140 * kernel will only try for the value of sstep_tries before
63141 * giving up and continuing on.
63142 */
63143- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
63144+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
63145 (kgdb_info[cpu].task &&
63146 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
63147 atomic_set(&kgdb_active, -1);
63148@@ -636,8 +636,8 @@ cpu_master_loop:
63149 }
63150
63151 kgdb_restore:
63152- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
63153- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
63154+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
63155+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
63156 if (kgdb_info[sstep_cpu].task)
63157 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
63158 else
63159@@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(void)
63160 static void kgdb_tasklet_bpt(unsigned long ing)
63161 {
63162 kgdb_breakpoint();
63163- atomic_set(&kgdb_break_tasklet_var, 0);
63164+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
63165 }
63166
63167 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
63168
63169 void kgdb_schedule_breakpoint(void)
63170 {
63171- if (atomic_read(&kgdb_break_tasklet_var) ||
63172+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
63173 atomic_read(&kgdb_active) != -1 ||
63174 atomic_read(&kgdb_setting_breakpoint))
63175 return;
63176- atomic_inc(&kgdb_break_tasklet_var);
63177+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
63178 tasklet_schedule(&kgdb_tasklet_breakpoint);
63179 }
63180 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
63181diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
63182index 63786e7..0780cac 100644
63183--- a/kernel/debug/kdb/kdb_main.c
63184+++ b/kernel/debug/kdb/kdb_main.c
63185@@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const char **argv)
63186 list_for_each_entry(mod, kdb_modules, list) {
63187
63188 kdb_printf("%-20s%8u 0x%p ", mod->name,
63189- mod->core_size, (void *)mod);
63190+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
63191 #ifdef CONFIG_MODULE_UNLOAD
63192 kdb_printf("%4d ", module_refcount(mod));
63193 #endif
63194@@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const char **argv)
63195 kdb_printf(" (Loading)");
63196 else
63197 kdb_printf(" (Live)");
63198- kdb_printf(" 0x%p", mod->module_core);
63199+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
63200
63201 #ifdef CONFIG_MODULE_UNLOAD
63202 {
63203diff --git a/kernel/events/core.c b/kernel/events/core.c
63204index 58690af..d903d75 100644
63205--- a/kernel/events/core.c
63206+++ b/kernel/events/core.c
63207@@ -173,7 +173,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
63208 return 0;
63209 }
63210
63211-static atomic64_t perf_event_id;
63212+static atomic64_unchecked_t perf_event_id;
63213
63214 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
63215 enum event_type_t event_type);
63216@@ -2540,7 +2540,7 @@ static void __perf_event_read(void *info)
63217
63218 static inline u64 perf_event_count(struct perf_event *event)
63219 {
63220- return local64_read(&event->count) + atomic64_read(&event->child_count);
63221+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
63222 }
63223
63224 static u64 perf_event_read(struct perf_event *event)
63225@@ -3065,9 +3065,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
63226 mutex_lock(&event->child_mutex);
63227 total += perf_event_read(event);
63228 *enabled += event->total_time_enabled +
63229- atomic64_read(&event->child_total_time_enabled);
63230+ atomic64_read_unchecked(&event->child_total_time_enabled);
63231 *running += event->total_time_running +
63232- atomic64_read(&event->child_total_time_running);
63233+ atomic64_read_unchecked(&event->child_total_time_running);
63234
63235 list_for_each_entry(child, &event->child_list, child_list) {
63236 total += perf_event_read(child);
63237@@ -3474,10 +3474,10 @@ void perf_event_update_userpage(struct perf_event *event)
63238 userpg->offset -= local64_read(&event->hw.prev_count);
63239
63240 userpg->time_enabled = enabled +
63241- atomic64_read(&event->child_total_time_enabled);
63242+ atomic64_read_unchecked(&event->child_total_time_enabled);
63243
63244 userpg->time_running = running +
63245- atomic64_read(&event->child_total_time_running);
63246+ atomic64_read_unchecked(&event->child_total_time_running);
63247
63248 barrier();
63249 ++userpg->lock;
63250@@ -3906,11 +3906,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
63251 values[n++] = perf_event_count(event);
63252 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
63253 values[n++] = enabled +
63254- atomic64_read(&event->child_total_time_enabled);
63255+ atomic64_read_unchecked(&event->child_total_time_enabled);
63256 }
63257 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
63258 values[n++] = running +
63259- atomic64_read(&event->child_total_time_running);
63260+ atomic64_read_unchecked(&event->child_total_time_running);
63261 }
63262 if (read_format & PERF_FORMAT_ID)
63263 values[n++] = primary_event_id(event);
63264@@ -4561,12 +4561,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
63265 * need to add enough zero bytes after the string to handle
63266 * the 64bit alignment we do later.
63267 */
63268- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
63269+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
63270 if (!buf) {
63271 name = strncpy(tmp, "//enomem", sizeof(tmp));
63272 goto got_name;
63273 }
63274- name = d_path(&file->f_path, buf, PATH_MAX);
63275+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
63276 if (IS_ERR(name)) {
63277 name = strncpy(tmp, "//toolong", sizeof(tmp));
63278 goto got_name;
63279@@ -5921,7 +5921,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
63280 event->parent = parent_event;
63281
63282 event->ns = get_pid_ns(current->nsproxy->pid_ns);
63283- event->id = atomic64_inc_return(&perf_event_id);
63284+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
63285
63286 event->state = PERF_EVENT_STATE_INACTIVE;
63287
63288@@ -6443,10 +6443,10 @@ static void sync_child_event(struct perf_event *child_event,
63289 /*
63290 * Add back the child's count to the parent's count:
63291 */
63292- atomic64_add(child_val, &parent_event->child_count);
63293- atomic64_add(child_event->total_time_enabled,
63294+ atomic64_add_unchecked(child_val, &parent_event->child_count);
63295+ atomic64_add_unchecked(child_event->total_time_enabled,
63296 &parent_event->child_total_time_enabled);
63297- atomic64_add(child_event->total_time_running,
63298+ atomic64_add_unchecked(child_event->total_time_running,
63299 &parent_event->child_total_time_running);
63300
63301 /*
63302diff --git a/kernel/exit.c b/kernel/exit.c
63303index e6e01b9..619f837 100644
63304--- a/kernel/exit.c
63305+++ b/kernel/exit.c
63306@@ -57,6 +57,10 @@
63307 #include <asm/pgtable.h>
63308 #include <asm/mmu_context.h>
63309
63310+#ifdef CONFIG_GRKERNSEC
63311+extern rwlock_t grsec_exec_file_lock;
63312+#endif
63313+
63314 static void exit_mm(struct task_struct * tsk);
63315
63316 static void __unhash_process(struct task_struct *p, bool group_dead)
63317@@ -168,6 +172,10 @@ void release_task(struct task_struct * p)
63318 struct task_struct *leader;
63319 int zap_leader;
63320 repeat:
63321+#ifdef CONFIG_NET
63322+ gr_del_task_from_ip_table(p);
63323+#endif
63324+
63325 /* don't need to get the RCU readlock here - the process is dead and
63326 * can't be modifying its own credentials. But shut RCU-lockdep up */
63327 rcu_read_lock();
63328@@ -380,7 +388,7 @@ int allow_signal(int sig)
63329 * know it'll be handled, so that they don't get converted to
63330 * SIGKILL or just silently dropped.
63331 */
63332- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
63333+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
63334 recalc_sigpending();
63335 spin_unlock_irq(&current->sighand->siglock);
63336 return 0;
63337@@ -416,6 +424,17 @@ void daemonize(const char *name, ...)
63338 vsnprintf(current->comm, sizeof(current->comm), name, args);
63339 va_end(args);
63340
63341+#ifdef CONFIG_GRKERNSEC
63342+ write_lock(&grsec_exec_file_lock);
63343+ if (current->exec_file) {
63344+ fput(current->exec_file);
63345+ current->exec_file = NULL;
63346+ }
63347+ write_unlock(&grsec_exec_file_lock);
63348+#endif
63349+
63350+ gr_set_kernel_label(current);
63351+
63352 /*
63353 * If we were started as result of loading a module, close all of the
63354 * user space pages. We don't need them, and if we didn't close them
63355@@ -893,6 +912,8 @@ NORET_TYPE void do_exit(long code)
63356 struct task_struct *tsk = current;
63357 int group_dead;
63358
63359+ set_fs(USER_DS);
63360+
63361 profile_task_exit(tsk);
63362
63363 WARN_ON(blk_needs_flush_plug(tsk));
63364@@ -909,7 +930,6 @@ NORET_TYPE void do_exit(long code)
63365 * mm_release()->clear_child_tid() from writing to a user-controlled
63366 * kernel address.
63367 */
63368- set_fs(USER_DS);
63369
63370 ptrace_event(PTRACE_EVENT_EXIT, code);
63371
63372@@ -971,6 +991,9 @@ NORET_TYPE void do_exit(long code)
63373 tsk->exit_code = code;
63374 taskstats_exit(tsk, group_dead);
63375
63376+ gr_acl_handle_psacct(tsk, code);
63377+ gr_acl_handle_exit();
63378+
63379 exit_mm(tsk);
63380
63381 if (group_dead)
63382diff --git a/kernel/fork.c b/kernel/fork.c
63383index da4a6a1..c04943c 100644
63384--- a/kernel/fork.c
63385+++ b/kernel/fork.c
63386@@ -280,7 +280,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
63387 *stackend = STACK_END_MAGIC; /* for overflow detection */
63388
63389 #ifdef CONFIG_CC_STACKPROTECTOR
63390- tsk->stack_canary = get_random_int();
63391+ tsk->stack_canary = pax_get_random_long();
63392 #endif
63393
63394 /*
63395@@ -304,13 +304,77 @@ out:
63396 }
63397
63398 #ifdef CONFIG_MMU
63399+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
63400+{
63401+ struct vm_area_struct *tmp;
63402+ unsigned long charge;
63403+ struct mempolicy *pol;
63404+ struct file *file;
63405+
63406+ charge = 0;
63407+ if (mpnt->vm_flags & VM_ACCOUNT) {
63408+ unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
63409+ if (security_vm_enough_memory(len))
63410+ goto fail_nomem;
63411+ charge = len;
63412+ }
63413+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
63414+ if (!tmp)
63415+ goto fail_nomem;
63416+ *tmp = *mpnt;
63417+ tmp->vm_mm = mm;
63418+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
63419+ pol = mpol_dup(vma_policy(mpnt));
63420+ if (IS_ERR(pol))
63421+ goto fail_nomem_policy;
63422+ vma_set_policy(tmp, pol);
63423+ if (anon_vma_fork(tmp, mpnt))
63424+ goto fail_nomem_anon_vma_fork;
63425+ tmp->vm_flags &= ~VM_LOCKED;
63426+ tmp->vm_next = tmp->vm_prev = NULL;
63427+ tmp->vm_mirror = NULL;
63428+ file = tmp->vm_file;
63429+ if (file) {
63430+ struct inode *inode = file->f_path.dentry->d_inode;
63431+ struct address_space *mapping = file->f_mapping;
63432+
63433+ get_file(file);
63434+ if (tmp->vm_flags & VM_DENYWRITE)
63435+ atomic_dec(&inode->i_writecount);
63436+ mutex_lock(&mapping->i_mmap_mutex);
63437+ if (tmp->vm_flags & VM_SHARED)
63438+ mapping->i_mmap_writable++;
63439+ flush_dcache_mmap_lock(mapping);
63440+ /* insert tmp into the share list, just after mpnt */
63441+ vma_prio_tree_add(tmp, mpnt);
63442+ flush_dcache_mmap_unlock(mapping);
63443+ mutex_unlock(&mapping->i_mmap_mutex);
63444+ }
63445+
63446+ /*
63447+ * Clear hugetlb-related page reserves for children. This only
63448+ * affects MAP_PRIVATE mappings. Faults generated by the child
63449+ * are not guaranteed to succeed, even if read-only
63450+ */
63451+ if (is_vm_hugetlb_page(tmp))
63452+ reset_vma_resv_huge_pages(tmp);
63453+
63454+ return tmp;
63455+
63456+fail_nomem_anon_vma_fork:
63457+ mpol_put(pol);
63458+fail_nomem_policy:
63459+ kmem_cache_free(vm_area_cachep, tmp);
63460+fail_nomem:
63461+ vm_unacct_memory(charge);
63462+ return NULL;
63463+}
63464+
63465 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63466 {
63467 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
63468 struct rb_node **rb_link, *rb_parent;
63469 int retval;
63470- unsigned long charge;
63471- struct mempolicy *pol;
63472
63473 down_write(&oldmm->mmap_sem);
63474 flush_cache_dup_mm(oldmm);
63475@@ -322,8 +386,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63476 mm->locked_vm = 0;
63477 mm->mmap = NULL;
63478 mm->mmap_cache = NULL;
63479- mm->free_area_cache = oldmm->mmap_base;
63480- mm->cached_hole_size = ~0UL;
63481+ mm->free_area_cache = oldmm->free_area_cache;
63482+ mm->cached_hole_size = oldmm->cached_hole_size;
63483 mm->map_count = 0;
63484 cpumask_clear(mm_cpumask(mm));
63485 mm->mm_rb = RB_ROOT;
63486@@ -339,8 +403,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63487
63488 prev = NULL;
63489 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
63490- struct file *file;
63491-
63492 if (mpnt->vm_flags & VM_DONTCOPY) {
63493 long pages = vma_pages(mpnt);
63494 mm->total_vm -= pages;
63495@@ -348,53 +410,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63496 -pages);
63497 continue;
63498 }
63499- charge = 0;
63500- if (mpnt->vm_flags & VM_ACCOUNT) {
63501- unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
63502- if (security_vm_enough_memory(len))
63503- goto fail_nomem;
63504- charge = len;
63505+ tmp = dup_vma(mm, mpnt);
63506+ if (!tmp) {
63507+ retval = -ENOMEM;
63508+ goto out;
63509 }
63510- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
63511- if (!tmp)
63512- goto fail_nomem;
63513- *tmp = *mpnt;
63514- INIT_LIST_HEAD(&tmp->anon_vma_chain);
63515- pol = mpol_dup(vma_policy(mpnt));
63516- retval = PTR_ERR(pol);
63517- if (IS_ERR(pol))
63518- goto fail_nomem_policy;
63519- vma_set_policy(tmp, pol);
63520- tmp->vm_mm = mm;
63521- if (anon_vma_fork(tmp, mpnt))
63522- goto fail_nomem_anon_vma_fork;
63523- tmp->vm_flags &= ~VM_LOCKED;
63524- tmp->vm_next = tmp->vm_prev = NULL;
63525- file = tmp->vm_file;
63526- if (file) {
63527- struct inode *inode = file->f_path.dentry->d_inode;
63528- struct address_space *mapping = file->f_mapping;
63529-
63530- get_file(file);
63531- if (tmp->vm_flags & VM_DENYWRITE)
63532- atomic_dec(&inode->i_writecount);
63533- mutex_lock(&mapping->i_mmap_mutex);
63534- if (tmp->vm_flags & VM_SHARED)
63535- mapping->i_mmap_writable++;
63536- flush_dcache_mmap_lock(mapping);
63537- /* insert tmp into the share list, just after mpnt */
63538- vma_prio_tree_add(tmp, mpnt);
63539- flush_dcache_mmap_unlock(mapping);
63540- mutex_unlock(&mapping->i_mmap_mutex);
63541- }
63542-
63543- /*
63544- * Clear hugetlb-related page reserves for children. This only
63545- * affects MAP_PRIVATE mappings. Faults generated by the child
63546- * are not guaranteed to succeed, even if read-only
63547- */
63548- if (is_vm_hugetlb_page(tmp))
63549- reset_vma_resv_huge_pages(tmp);
63550
63551 /*
63552 * Link in the new vma and copy the page table entries.
63553@@ -417,6 +437,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
63554 if (retval)
63555 goto out;
63556 }
63557+
63558+#ifdef CONFIG_PAX_SEGMEXEC
63559+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
63560+ struct vm_area_struct *mpnt_m;
63561+
63562+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
63563+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
63564+
63565+ if (!mpnt->vm_mirror)
63566+ continue;
63567+
63568+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
63569+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
63570+ mpnt->vm_mirror = mpnt_m;
63571+ } else {
63572+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
63573+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
63574+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
63575+ mpnt->vm_mirror->vm_mirror = mpnt;
63576+ }
63577+ }
63578+ BUG_ON(mpnt_m);
63579+ }
63580+#endif
63581+
63582 /* a new mm has just been created */
63583 arch_dup_mmap(oldmm, mm);
63584 retval = 0;
63585@@ -425,14 +470,6 @@ out:
63586 flush_tlb_mm(oldmm);
63587 up_write(&oldmm->mmap_sem);
63588 return retval;
63589-fail_nomem_anon_vma_fork:
63590- mpol_put(pol);
63591-fail_nomem_policy:
63592- kmem_cache_free(vm_area_cachep, tmp);
63593-fail_nomem:
63594- retval = -ENOMEM;
63595- vm_unacct_memory(charge);
63596- goto out;
63597 }
63598
63599 static inline int mm_alloc_pgd(struct mm_struct *mm)
63600@@ -829,13 +866,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
63601 spin_unlock(&fs->lock);
63602 return -EAGAIN;
63603 }
63604- fs->users++;
63605+ atomic_inc(&fs->users);
63606 spin_unlock(&fs->lock);
63607 return 0;
63608 }
63609 tsk->fs = copy_fs_struct(fs);
63610 if (!tsk->fs)
63611 return -ENOMEM;
63612+ gr_set_chroot_entries(tsk, &tsk->fs->root);
63613 return 0;
63614 }
63615
63616@@ -1097,6 +1135,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
63617 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
63618 #endif
63619 retval = -EAGAIN;
63620+
63621+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
63622+
63623 if (atomic_read(&p->real_cred->user->processes) >=
63624 task_rlimit(p, RLIMIT_NPROC)) {
63625 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
63626@@ -1256,6 +1297,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
63627 if (clone_flags & CLONE_THREAD)
63628 p->tgid = current->tgid;
63629
63630+ gr_copy_label(p);
63631+
63632 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
63633 /*
63634 * Clear TID on mm_release()?
63635@@ -1418,6 +1461,8 @@ bad_fork_cleanup_count:
63636 bad_fork_free:
63637 free_task(p);
63638 fork_out:
63639+ gr_log_forkfail(retval);
63640+
63641 return ERR_PTR(retval);
63642 }
63643
63644@@ -1518,6 +1563,8 @@ long do_fork(unsigned long clone_flags,
63645 if (clone_flags & CLONE_PARENT_SETTID)
63646 put_user(nr, parent_tidptr);
63647
63648+ gr_handle_brute_check();
63649+
63650 if (clone_flags & CLONE_VFORK) {
63651 p->vfork_done = &vfork;
63652 init_completion(&vfork);
63653@@ -1627,7 +1674,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
63654 return 0;
63655
63656 /* don't need lock here; in the worst case we'll do useless copy */
63657- if (fs->users == 1)
63658+ if (atomic_read(&fs->users) == 1)
63659 return 0;
63660
63661 *new_fsp = copy_fs_struct(fs);
63662@@ -1716,7 +1763,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
63663 fs = current->fs;
63664 spin_lock(&fs->lock);
63665 current->fs = new_fs;
63666- if (--fs->users)
63667+ gr_set_chroot_entries(current, &current->fs->root);
63668+ if (atomic_dec_return(&fs->users))
63669 new_fs = NULL;
63670 else
63671 new_fs = fs;
63672diff --git a/kernel/futex.c b/kernel/futex.c
63673index 1614be2..37abc7e 100644
63674--- a/kernel/futex.c
63675+++ b/kernel/futex.c
63676@@ -54,6 +54,7 @@
63677 #include <linux/mount.h>
63678 #include <linux/pagemap.h>
63679 #include <linux/syscalls.h>
63680+#include <linux/ptrace.h>
63681 #include <linux/signal.h>
63682 #include <linux/export.h>
63683 #include <linux/magic.h>
63684@@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
63685 struct page *page, *page_head;
63686 int err, ro = 0;
63687
63688+#ifdef CONFIG_PAX_SEGMEXEC
63689+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
63690+ return -EFAULT;
63691+#endif
63692+
63693 /*
63694 * The futex address must be "naturally" aligned.
63695 */
63696@@ -2459,6 +2465,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
63697 if (!p)
63698 goto err_unlock;
63699 ret = -EPERM;
63700+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63701+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
63702+ goto err_unlock;
63703+#endif
63704 pcred = __task_cred(p);
63705 /* If victim is in different user_ns, then uids are not
63706 comparable, so we must have CAP_SYS_PTRACE */
63707@@ -2724,6 +2734,7 @@ static int __init futex_init(void)
63708 {
63709 u32 curval;
63710 int i;
63711+ mm_segment_t oldfs;
63712
63713 /*
63714 * This will fail and we want it. Some arch implementations do
63715@@ -2735,8 +2746,11 @@ static int __init futex_init(void)
63716 * implementation, the non-functional ones will return
63717 * -ENOSYS.
63718 */
63719+ oldfs = get_fs();
63720+ set_fs(USER_DS);
63721 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
63722 futex_cmpxchg_enabled = 1;
63723+ set_fs(oldfs);
63724
63725 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
63726 plist_head_init(&futex_queues[i].chain);
63727diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
63728index 5f9e689..582d46d 100644
63729--- a/kernel/futex_compat.c
63730+++ b/kernel/futex_compat.c
63731@@ -10,6 +10,7 @@
63732 #include <linux/compat.h>
63733 #include <linux/nsproxy.h>
63734 #include <linux/futex.h>
63735+#include <linux/ptrace.h>
63736
63737 #include <asm/uaccess.h>
63738
63739@@ -136,7 +137,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
63740 {
63741 struct compat_robust_list_head __user *head;
63742 unsigned long ret;
63743- const struct cred *cred = current_cred(), *pcred;
63744+ const struct cred *cred = current_cred();
63745+ const struct cred *pcred;
63746
63747 if (!futex_cmpxchg_enabled)
63748 return -ENOSYS;
63749@@ -152,6 +154,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
63750 if (!p)
63751 goto err_unlock;
63752 ret = -EPERM;
63753+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63754+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
63755+ goto err_unlock;
63756+#endif
63757 pcred = __task_cred(p);
63758 /* If victim is in different user_ns, then uids are not
63759 comparable, so we must have CAP_SYS_PTRACE */
63760diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
63761index 9b22d03..6295b62 100644
63762--- a/kernel/gcov/base.c
63763+++ b/kernel/gcov/base.c
63764@@ -102,11 +102,6 @@ void gcov_enable_events(void)
63765 }
63766
63767 #ifdef CONFIG_MODULES
63768-static inline int within(void *addr, void *start, unsigned long size)
63769-{
63770- return ((addr >= start) && (addr < start + size));
63771-}
63772-
63773 /* Update list and generate events when modules are unloaded. */
63774 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
63775 void *data)
63776@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
63777 prev = NULL;
63778 /* Remove entries located in module from linked list. */
63779 for (info = gcov_info_head; info; info = info->next) {
63780- if (within(info, mod->module_core, mod->core_size)) {
63781+ if (within_module_core_rw((unsigned long)info, mod)) {
63782 if (prev)
63783 prev->next = info->next;
63784 else
63785diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
63786index ae34bf5..4e2f3d0 100644
63787--- a/kernel/hrtimer.c
63788+++ b/kernel/hrtimer.c
63789@@ -1393,7 +1393,7 @@ void hrtimer_peek_ahead_timers(void)
63790 local_irq_restore(flags);
63791 }
63792
63793-static void run_hrtimer_softirq(struct softirq_action *h)
63794+static void run_hrtimer_softirq(void)
63795 {
63796 hrtimer_peek_ahead_timers();
63797 }
63798diff --git a/kernel/jump_label.c b/kernel/jump_label.c
63799index 66ff710..05a5128 100644
63800--- a/kernel/jump_label.c
63801+++ b/kernel/jump_label.c
63802@@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
63803
63804 size = (((unsigned long)stop - (unsigned long)start)
63805 / sizeof(struct jump_entry));
63806+ pax_open_kernel();
63807 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
63808+ pax_close_kernel();
63809 }
63810
63811 static void jump_label_update(struct jump_label_key *key, int enable);
63812@@ -303,10 +305,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
63813 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
63814 struct jump_entry *iter;
63815
63816+ pax_open_kernel();
63817 for (iter = iter_start; iter < iter_stop; iter++) {
63818 if (within_module_init(iter->code, mod))
63819 iter->code = 0;
63820 }
63821+ pax_close_kernel();
63822 }
63823
63824 static int
63825diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
63826index 079f1d3..a407562 100644
63827--- a/kernel/kallsyms.c
63828+++ b/kernel/kallsyms.c
63829@@ -11,6 +11,9 @@
63830 * Changed the compression method from stem compression to "table lookup"
63831 * compression (see scripts/kallsyms.c for a more complete description)
63832 */
63833+#ifdef CONFIG_GRKERNSEC_HIDESYM
63834+#define __INCLUDED_BY_HIDESYM 1
63835+#endif
63836 #include <linux/kallsyms.h>
63837 #include <linux/module.h>
63838 #include <linux/init.h>
63839@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
63840
63841 static inline int is_kernel_inittext(unsigned long addr)
63842 {
63843+ if (system_state != SYSTEM_BOOTING)
63844+ return 0;
63845+
63846 if (addr >= (unsigned long)_sinittext
63847 && addr <= (unsigned long)_einittext)
63848 return 1;
63849 return 0;
63850 }
63851
63852+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63853+#ifdef CONFIG_MODULES
63854+static inline int is_module_text(unsigned long addr)
63855+{
63856+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
63857+ return 1;
63858+
63859+ addr = ktla_ktva(addr);
63860+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
63861+}
63862+#else
63863+static inline int is_module_text(unsigned long addr)
63864+{
63865+ return 0;
63866+}
63867+#endif
63868+#endif
63869+
63870 static inline int is_kernel_text(unsigned long addr)
63871 {
63872 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
63873@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
63874
63875 static inline int is_kernel(unsigned long addr)
63876 {
63877+
63878+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63879+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
63880+ return 1;
63881+
63882+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
63883+#else
63884 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
63885+#endif
63886+
63887 return 1;
63888 return in_gate_area_no_mm(addr);
63889 }
63890
63891 static int is_ksym_addr(unsigned long addr)
63892 {
63893+
63894+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63895+ if (is_module_text(addr))
63896+ return 0;
63897+#endif
63898+
63899 if (all_var)
63900 return is_kernel(addr);
63901
63902@@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
63903
63904 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
63905 {
63906- iter->name[0] = '\0';
63907 iter->nameoff = get_symbol_offset(new_pos);
63908 iter->pos = new_pos;
63909 }
63910@@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, void *p)
63911 {
63912 struct kallsym_iter *iter = m->private;
63913
63914+#ifdef CONFIG_GRKERNSEC_HIDESYM
63915+ if (current_uid())
63916+ return 0;
63917+#endif
63918+
63919 /* Some debugging symbols have no name. Ignore them. */
63920 if (!iter->name[0])
63921 return 0;
63922@@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
63923 struct kallsym_iter *iter;
63924 int ret;
63925
63926- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
63927+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
63928 if (!iter)
63929 return -ENOMEM;
63930 reset_iter(iter, 0);
63931diff --git a/kernel/kexec.c b/kernel/kexec.c
63932index dc7bc08..4601964 100644
63933--- a/kernel/kexec.c
63934+++ b/kernel/kexec.c
63935@@ -1048,7 +1048,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
63936 unsigned long flags)
63937 {
63938 struct compat_kexec_segment in;
63939- struct kexec_segment out, __user *ksegments;
63940+ struct kexec_segment out;
63941+ struct kexec_segment __user *ksegments;
63942 unsigned long i, result;
63943
63944 /* Don't allow clients that don't understand the native
63945diff --git a/kernel/kmod.c b/kernel/kmod.c
63946index a4bea97..7a1ae9a 100644
63947--- a/kernel/kmod.c
63948+++ b/kernel/kmod.c
63949@@ -73,13 +73,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
63950 * If module auto-loading support is disabled then this function
63951 * becomes a no-operation.
63952 */
63953-int __request_module(bool wait, const char *fmt, ...)
63954+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
63955 {
63956- va_list args;
63957 char module_name[MODULE_NAME_LEN];
63958 unsigned int max_modprobes;
63959 int ret;
63960- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
63961+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
63962 static char *envp[] = { "HOME=/",
63963 "TERM=linux",
63964 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
63965@@ -88,9 +87,7 @@ int __request_module(bool wait, const char *fmt, ...)
63966 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
63967 static int kmod_loop_msg;
63968
63969- va_start(args, fmt);
63970- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
63971- va_end(args);
63972+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
63973 if (ret >= MODULE_NAME_LEN)
63974 return -ENAMETOOLONG;
63975
63976@@ -98,6 +95,20 @@ int __request_module(bool wait, const char *fmt, ...)
63977 if (ret)
63978 return ret;
63979
63980+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63981+ if (!current_uid()) {
63982+ /* hack to workaround consolekit/udisks stupidity */
63983+ read_lock(&tasklist_lock);
63984+ if (!strcmp(current->comm, "mount") &&
63985+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
63986+ read_unlock(&tasklist_lock);
63987+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
63988+ return -EPERM;
63989+ }
63990+ read_unlock(&tasklist_lock);
63991+ }
63992+#endif
63993+
63994 /* If modprobe needs a service that is in a module, we get a recursive
63995 * loop. Limit the number of running kmod threads to max_threads/2 or
63996 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
63997@@ -133,6 +144,47 @@ int __request_module(bool wait, const char *fmt, ...)
63998 atomic_dec(&kmod_concurrent);
63999 return ret;
64000 }
64001+
64002+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
64003+{
64004+ va_list args;
64005+ int ret;
64006+
64007+ va_start(args, fmt);
64008+ ret = ____request_module(wait, module_param, fmt, args);
64009+ va_end(args);
64010+
64011+ return ret;
64012+}
64013+
64014+int __request_module(bool wait, const char *fmt, ...)
64015+{
64016+ va_list args;
64017+ int ret;
64018+
64019+#ifdef CONFIG_GRKERNSEC_MODHARDEN
64020+ if (current_uid()) {
64021+ char module_param[MODULE_NAME_LEN];
64022+
64023+ memset(module_param, 0, sizeof(module_param));
64024+
64025+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
64026+
64027+ va_start(args, fmt);
64028+ ret = ____request_module(wait, module_param, fmt, args);
64029+ va_end(args);
64030+
64031+ return ret;
64032+ }
64033+#endif
64034+
64035+ va_start(args, fmt);
64036+ ret = ____request_module(wait, NULL, fmt, args);
64037+ va_end(args);
64038+
64039+ return ret;
64040+}
64041+
64042 EXPORT_SYMBOL(__request_module);
64043 #endif /* CONFIG_MODULES */
64044
64045@@ -222,7 +274,7 @@ static int wait_for_helper(void *data)
64046 *
64047 * Thus the __user pointer cast is valid here.
64048 */
64049- sys_wait4(pid, (int __user *)&ret, 0, NULL);
64050+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
64051
64052 /*
64053 * If ret is 0, either ____call_usermodehelper failed and the
64054diff --git a/kernel/kprobes.c b/kernel/kprobes.c
64055index 52fd049..3def6a8 100644
64056--- a/kernel/kprobes.c
64057+++ b/kernel/kprobes.c
64058@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
64059 * kernel image and loaded module images reside. This is required
64060 * so x86_64 can correctly handle the %rip-relative fixups.
64061 */
64062- kip->insns = module_alloc(PAGE_SIZE);
64063+ kip->insns = module_alloc_exec(PAGE_SIZE);
64064 if (!kip->insns) {
64065 kfree(kip);
64066 return NULL;
64067@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
64068 */
64069 if (!list_is_singular(&kip->list)) {
64070 list_del(&kip->list);
64071- module_free(NULL, kip->insns);
64072+ module_free_exec(NULL, kip->insns);
64073 kfree(kip);
64074 }
64075 return 1;
64076@@ -1949,7 +1949,7 @@ static int __init init_kprobes(void)
64077 {
64078 int i, err = 0;
64079 unsigned long offset = 0, size = 0;
64080- char *modname, namebuf[128];
64081+ char *modname, namebuf[KSYM_NAME_LEN];
64082 const char *symbol_name;
64083 void *addr;
64084 struct kprobe_blackpoint *kb;
64085@@ -2075,7 +2075,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
64086 const char *sym = NULL;
64087 unsigned int i = *(loff_t *) v;
64088 unsigned long offset = 0;
64089- char *modname, namebuf[128];
64090+ char *modname, namebuf[KSYM_NAME_LEN];
64091
64092 head = &kprobe_table[i];
64093 preempt_disable();
64094diff --git a/kernel/lockdep.c b/kernel/lockdep.c
64095index b2e08c9..01d8049 100644
64096--- a/kernel/lockdep.c
64097+++ b/kernel/lockdep.c
64098@@ -592,6 +592,10 @@ static int static_obj(void *obj)
64099 end = (unsigned long) &_end,
64100 addr = (unsigned long) obj;
64101
64102+#ifdef CONFIG_PAX_KERNEXEC
64103+ start = ktla_ktva(start);
64104+#endif
64105+
64106 /*
64107 * static variable?
64108 */
64109@@ -731,6 +735,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
64110 if (!static_obj(lock->key)) {
64111 debug_locks_off();
64112 printk("INFO: trying to register non-static key.\n");
64113+ printk("lock:%pS key:%pS.\n", lock, lock->key);
64114 printk("the code is fine but needs lockdep annotation.\n");
64115 printk("turning off the locking correctness validator.\n");
64116 dump_stack();
64117@@ -3042,7 +3047,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
64118 if (!class)
64119 return 0;
64120 }
64121- atomic_inc((atomic_t *)&class->ops);
64122+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
64123 if (very_verbose(class)) {
64124 printk("\nacquire class [%p] %s", class->key, class->name);
64125 if (class->name_version > 1)
64126diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
64127index 91c32a0..b2c71c5 100644
64128--- a/kernel/lockdep_proc.c
64129+++ b/kernel/lockdep_proc.c
64130@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
64131
64132 static void print_name(struct seq_file *m, struct lock_class *class)
64133 {
64134- char str[128];
64135+ char str[KSYM_NAME_LEN];
64136 const char *name = class->name;
64137
64138 if (!name) {
64139diff --git a/kernel/module.c b/kernel/module.c
64140index 178333c..04e3408 100644
64141--- a/kernel/module.c
64142+++ b/kernel/module.c
64143@@ -58,6 +58,7 @@
64144 #include <linux/jump_label.h>
64145 #include <linux/pfn.h>
64146 #include <linux/bsearch.h>
64147+#include <linux/grsecurity.h>
64148
64149 #define CREATE_TRACE_POINTS
64150 #include <trace/events/module.h>
64151@@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
64152
64153 /* Bounds of module allocation, for speeding __module_address.
64154 * Protected by module_mutex. */
64155-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
64156+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
64157+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
64158
64159 int register_module_notifier(struct notifier_block * nb)
64160 {
64161@@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
64162 return true;
64163
64164 list_for_each_entry_rcu(mod, &modules, list) {
64165- struct symsearch arr[] = {
64166+ struct symsearch modarr[] = {
64167 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
64168 NOT_GPL_ONLY, false },
64169 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
64170@@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
64171 #endif
64172 };
64173
64174- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
64175+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
64176 return true;
64177 }
64178 return false;
64179@@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
64180 static int percpu_modalloc(struct module *mod,
64181 unsigned long size, unsigned long align)
64182 {
64183- if (align > PAGE_SIZE) {
64184+ if (align-1 >= PAGE_SIZE) {
64185 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
64186 mod->name, align, PAGE_SIZE);
64187 align = PAGE_SIZE;
64188@@ -1183,7 +1185,7 @@ resolve_symbol_wait(struct module *mod,
64189 */
64190 #ifdef CONFIG_SYSFS
64191
64192-#ifdef CONFIG_KALLSYMS
64193+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
64194 static inline bool sect_empty(const Elf_Shdr *sect)
64195 {
64196 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
64197@@ -1649,21 +1651,21 @@ static void set_section_ro_nx(void *base,
64198
64199 static void unset_module_core_ro_nx(struct module *mod)
64200 {
64201- set_page_attributes(mod->module_core + mod->core_text_size,
64202- mod->module_core + mod->core_size,
64203+ set_page_attributes(mod->module_core_rw,
64204+ mod->module_core_rw + mod->core_size_rw,
64205 set_memory_x);
64206- set_page_attributes(mod->module_core,
64207- mod->module_core + mod->core_ro_size,
64208+ set_page_attributes(mod->module_core_rx,
64209+ mod->module_core_rx + mod->core_size_rx,
64210 set_memory_rw);
64211 }
64212
64213 static void unset_module_init_ro_nx(struct module *mod)
64214 {
64215- set_page_attributes(mod->module_init + mod->init_text_size,
64216- mod->module_init + mod->init_size,
64217+ set_page_attributes(mod->module_init_rw,
64218+ mod->module_init_rw + mod->init_size_rw,
64219 set_memory_x);
64220- set_page_attributes(mod->module_init,
64221- mod->module_init + mod->init_ro_size,
64222+ set_page_attributes(mod->module_init_rx,
64223+ mod->module_init_rx + mod->init_size_rx,
64224 set_memory_rw);
64225 }
64226
64227@@ -1674,14 +1676,14 @@ void set_all_modules_text_rw(void)
64228
64229 mutex_lock(&module_mutex);
64230 list_for_each_entry_rcu(mod, &modules, list) {
64231- if ((mod->module_core) && (mod->core_text_size)) {
64232- set_page_attributes(mod->module_core,
64233- mod->module_core + mod->core_text_size,
64234+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
64235+ set_page_attributes(mod->module_core_rx,
64236+ mod->module_core_rx + mod->core_size_rx,
64237 set_memory_rw);
64238 }
64239- if ((mod->module_init) && (mod->init_text_size)) {
64240- set_page_attributes(mod->module_init,
64241- mod->module_init + mod->init_text_size,
64242+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
64243+ set_page_attributes(mod->module_init_rx,
64244+ mod->module_init_rx + mod->init_size_rx,
64245 set_memory_rw);
64246 }
64247 }
64248@@ -1695,14 +1697,14 @@ void set_all_modules_text_ro(void)
64249
64250 mutex_lock(&module_mutex);
64251 list_for_each_entry_rcu(mod, &modules, list) {
64252- if ((mod->module_core) && (mod->core_text_size)) {
64253- set_page_attributes(mod->module_core,
64254- mod->module_core + mod->core_text_size,
64255+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
64256+ set_page_attributes(mod->module_core_rx,
64257+ mod->module_core_rx + mod->core_size_rx,
64258 set_memory_ro);
64259 }
64260- if ((mod->module_init) && (mod->init_text_size)) {
64261- set_page_attributes(mod->module_init,
64262- mod->module_init + mod->init_text_size,
64263+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
64264+ set_page_attributes(mod->module_init_rx,
64265+ mod->module_init_rx + mod->init_size_rx,
64266 set_memory_ro);
64267 }
64268 }
64269@@ -1748,16 +1750,19 @@ static void free_module(struct module *mod)
64270
64271 /* This may be NULL, but that's OK */
64272 unset_module_init_ro_nx(mod);
64273- module_free(mod, mod->module_init);
64274+ module_free(mod, mod->module_init_rw);
64275+ module_free_exec(mod, mod->module_init_rx);
64276 kfree(mod->args);
64277 percpu_modfree(mod);
64278
64279 /* Free lock-classes: */
64280- lockdep_free_key_range(mod->module_core, mod->core_size);
64281+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
64282+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
64283
64284 /* Finally, free the core (containing the module structure) */
64285 unset_module_core_ro_nx(mod);
64286- module_free(mod, mod->module_core);
64287+ module_free_exec(mod, mod->module_core_rx);
64288+ module_free(mod, mod->module_core_rw);
64289
64290 #ifdef CONFIG_MPU
64291 update_protections(current->mm);
64292@@ -1826,10 +1831,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
64293 unsigned int i;
64294 int ret = 0;
64295 const struct kernel_symbol *ksym;
64296+#ifdef CONFIG_GRKERNSEC_MODHARDEN
64297+ int is_fs_load = 0;
64298+ int register_filesystem_found = 0;
64299+ char *p;
64300+
64301+ p = strstr(mod->args, "grsec_modharden_fs");
64302+ if (p) {
64303+ char *endptr = p + strlen("grsec_modharden_fs");
64304+ /* copy \0 as well */
64305+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
64306+ is_fs_load = 1;
64307+ }
64308+#endif
64309
64310 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
64311 const char *name = info->strtab + sym[i].st_name;
64312
64313+#ifdef CONFIG_GRKERNSEC_MODHARDEN
64314+ /* it's a real shame this will never get ripped and copied
64315+ upstream! ;(
64316+ */
64317+ if (is_fs_load && !strcmp(name, "register_filesystem"))
64318+ register_filesystem_found = 1;
64319+#endif
64320+
64321 switch (sym[i].st_shndx) {
64322 case SHN_COMMON:
64323 /* We compiled with -fno-common. These are not
64324@@ -1850,7 +1876,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
64325 ksym = resolve_symbol_wait(mod, info, name);
64326 /* Ok if resolved. */
64327 if (ksym && !IS_ERR(ksym)) {
64328+ pax_open_kernel();
64329 sym[i].st_value = ksym->value;
64330+ pax_close_kernel();
64331 break;
64332 }
64333
64334@@ -1869,11 +1897,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
64335 secbase = (unsigned long)mod_percpu(mod);
64336 else
64337 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
64338+ pax_open_kernel();
64339 sym[i].st_value += secbase;
64340+ pax_close_kernel();
64341 break;
64342 }
64343 }
64344
64345+#ifdef CONFIG_GRKERNSEC_MODHARDEN
64346+ if (is_fs_load && !register_filesystem_found) {
64347+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
64348+ ret = -EPERM;
64349+ }
64350+#endif
64351+
64352 return ret;
64353 }
64354
64355@@ -1977,22 +2014,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
64356 || s->sh_entsize != ~0UL
64357 || strstarts(sname, ".init"))
64358 continue;
64359- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
64360+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
64361+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
64362+ else
64363+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
64364 DEBUGP("\t%s\n", name);
64365 }
64366- switch (m) {
64367- case 0: /* executable */
64368- mod->core_size = debug_align(mod->core_size);
64369- mod->core_text_size = mod->core_size;
64370- break;
64371- case 1: /* RO: text and ro-data */
64372- mod->core_size = debug_align(mod->core_size);
64373- mod->core_ro_size = mod->core_size;
64374- break;
64375- case 3: /* whole core */
64376- mod->core_size = debug_align(mod->core_size);
64377- break;
64378- }
64379 }
64380
64381 DEBUGP("Init section allocation order:\n");
64382@@ -2006,23 +2033,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
64383 || s->sh_entsize != ~0UL
64384 || !strstarts(sname, ".init"))
64385 continue;
64386- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
64387- | INIT_OFFSET_MASK);
64388+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
64389+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
64390+ else
64391+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
64392+ s->sh_entsize |= INIT_OFFSET_MASK;
64393 DEBUGP("\t%s\n", sname);
64394 }
64395- switch (m) {
64396- case 0: /* executable */
64397- mod->init_size = debug_align(mod->init_size);
64398- mod->init_text_size = mod->init_size;
64399- break;
64400- case 1: /* RO: text and ro-data */
64401- mod->init_size = debug_align(mod->init_size);
64402- mod->init_ro_size = mod->init_size;
64403- break;
64404- case 3: /* whole init */
64405- mod->init_size = debug_align(mod->init_size);
64406- break;
64407- }
64408 }
64409 }
64410
64411@@ -2187,7 +2204,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
64412
64413 /* Put symbol section at end of init part of module. */
64414 symsect->sh_flags |= SHF_ALLOC;
64415- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
64416+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
64417 info->index.sym) | INIT_OFFSET_MASK;
64418 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
64419
64420@@ -2204,19 +2221,19 @@ static void layout_symtab(struct module *mod, struct load_info *info)
64421 }
64422
64423 /* Append room for core symbols at end of core part. */
64424- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
64425- mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
64426+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
64427+ mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
64428
64429 /* Put string table section at end of init part of module. */
64430 strsect->sh_flags |= SHF_ALLOC;
64431- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
64432+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
64433 info->index.str) | INIT_OFFSET_MASK;
64434 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
64435
64436 /* Append room for core symbols' strings at end of core part. */
64437- info->stroffs = mod->core_size;
64438+ info->stroffs = mod->core_size_rx;
64439 __set_bit(0, info->strmap);
64440- mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
64441+ mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
64442 }
64443
64444 static void add_kallsyms(struct module *mod, const struct load_info *info)
64445@@ -2232,11 +2249,13 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
64446 /* Make sure we get permanent strtab: don't use info->strtab. */
64447 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
64448
64449+ pax_open_kernel();
64450+
64451 /* Set types up while we still have access to sections. */
64452 for (i = 0; i < mod->num_symtab; i++)
64453 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
64454
64455- mod->core_symtab = dst = mod->module_core + info->symoffs;
64456+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
64457 src = mod->symtab;
64458 *dst = *src;
64459 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
64460@@ -2249,10 +2268,12 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
64461 }
64462 mod->core_num_syms = ndst;
64463
64464- mod->core_strtab = s = mod->module_core + info->stroffs;
64465+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
64466 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
64467 if (test_bit(i, info->strmap))
64468 *++s = mod->strtab[i];
64469+
64470+ pax_close_kernel();
64471 }
64472 #else
64473 static inline void layout_symtab(struct module *mod, struct load_info *info)
64474@@ -2286,17 +2307,33 @@ void * __weak module_alloc(unsigned long size)
64475 return size == 0 ? NULL : vmalloc_exec(size);
64476 }
64477
64478-static void *module_alloc_update_bounds(unsigned long size)
64479+static void *module_alloc_update_bounds_rw(unsigned long size)
64480 {
64481 void *ret = module_alloc(size);
64482
64483 if (ret) {
64484 mutex_lock(&module_mutex);
64485 /* Update module bounds. */
64486- if ((unsigned long)ret < module_addr_min)
64487- module_addr_min = (unsigned long)ret;
64488- if ((unsigned long)ret + size > module_addr_max)
64489- module_addr_max = (unsigned long)ret + size;
64490+ if ((unsigned long)ret < module_addr_min_rw)
64491+ module_addr_min_rw = (unsigned long)ret;
64492+ if ((unsigned long)ret + size > module_addr_max_rw)
64493+ module_addr_max_rw = (unsigned long)ret + size;
64494+ mutex_unlock(&module_mutex);
64495+ }
64496+ return ret;
64497+}
64498+
64499+static void *module_alloc_update_bounds_rx(unsigned long size)
64500+{
64501+ void *ret = module_alloc_exec(size);
64502+
64503+ if (ret) {
64504+ mutex_lock(&module_mutex);
64505+ /* Update module bounds. */
64506+ if ((unsigned long)ret < module_addr_min_rx)
64507+ module_addr_min_rx = (unsigned long)ret;
64508+ if ((unsigned long)ret + size > module_addr_max_rx)
64509+ module_addr_max_rx = (unsigned long)ret + size;
64510 mutex_unlock(&module_mutex);
64511 }
64512 return ret;
64513@@ -2474,8 +2511,14 @@ static struct module *setup_load_info(struct load_info *info)
64514 static int check_modinfo(struct module *mod, struct load_info *info)
64515 {
64516 const char *modmagic = get_modinfo(info, "vermagic");
64517+ const char *license = get_modinfo(info, "license");
64518 int err;
64519
64520+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
64521+ if (!license || !license_is_gpl_compatible(license))
64522+ return -ENOEXEC;
64523+#endif
64524+
64525 /* This is allowed: modprobe --force will invalidate it. */
64526 if (!modmagic) {
64527 err = try_to_force_load(mod, "bad vermagic");
64528@@ -2498,7 +2541,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
64529 }
64530
64531 /* Set up license info based on the info section */
64532- set_license(mod, get_modinfo(info, "license"));
64533+ set_license(mod, license);
64534
64535 return 0;
64536 }
64537@@ -2592,7 +2635,7 @@ static int move_module(struct module *mod, struct load_info *info)
64538 void *ptr;
64539
64540 /* Do the allocs. */
64541- ptr = module_alloc_update_bounds(mod->core_size);
64542+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
64543 /*
64544 * The pointer to this block is stored in the module structure
64545 * which is inside the block. Just mark it as not being a
64546@@ -2602,23 +2645,50 @@ static int move_module(struct module *mod, struct load_info *info)
64547 if (!ptr)
64548 return -ENOMEM;
64549
64550- memset(ptr, 0, mod->core_size);
64551- mod->module_core = ptr;
64552+ memset(ptr, 0, mod->core_size_rw);
64553+ mod->module_core_rw = ptr;
64554
64555- ptr = module_alloc_update_bounds(mod->init_size);
64556+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
64557 /*
64558 * The pointer to this block is stored in the module structure
64559 * which is inside the block. This block doesn't need to be
64560 * scanned as it contains data and code that will be freed
64561 * after the module is initialized.
64562 */
64563- kmemleak_ignore(ptr);
64564- if (!ptr && mod->init_size) {
64565- module_free(mod, mod->module_core);
64566+ kmemleak_not_leak(ptr);
64567+ if (!ptr && mod->init_size_rw) {
64568+ module_free(mod, mod->module_core_rw);
64569 return -ENOMEM;
64570 }
64571- memset(ptr, 0, mod->init_size);
64572- mod->module_init = ptr;
64573+ memset(ptr, 0, mod->init_size_rw);
64574+ mod->module_init_rw = ptr;
64575+
64576+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
64577+ kmemleak_not_leak(ptr);
64578+ if (!ptr) {
64579+ module_free(mod, mod->module_init_rw);
64580+ module_free(mod, mod->module_core_rw);
64581+ return -ENOMEM;
64582+ }
64583+
64584+ pax_open_kernel();
64585+ memset(ptr, 0, mod->core_size_rx);
64586+ pax_close_kernel();
64587+ mod->module_core_rx = ptr;
64588+
64589+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
64590+ kmemleak_not_leak(ptr);
64591+ if (!ptr && mod->init_size_rx) {
64592+ module_free_exec(mod, mod->module_core_rx);
64593+ module_free(mod, mod->module_init_rw);
64594+ module_free(mod, mod->module_core_rw);
64595+ return -ENOMEM;
64596+ }
64597+
64598+ pax_open_kernel();
64599+ memset(ptr, 0, mod->init_size_rx);
64600+ pax_close_kernel();
64601+ mod->module_init_rx = ptr;
64602
64603 /* Transfer each section which specifies SHF_ALLOC */
64604 DEBUGP("final section addresses:\n");
64605@@ -2629,16 +2699,45 @@ static int move_module(struct module *mod, struct load_info *info)
64606 if (!(shdr->sh_flags & SHF_ALLOC))
64607 continue;
64608
64609- if (shdr->sh_entsize & INIT_OFFSET_MASK)
64610- dest = mod->module_init
64611- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64612- else
64613- dest = mod->module_core + shdr->sh_entsize;
64614+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
64615+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
64616+ dest = mod->module_init_rw
64617+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64618+ else
64619+ dest = mod->module_init_rx
64620+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
64621+ } else {
64622+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
64623+ dest = mod->module_core_rw + shdr->sh_entsize;
64624+ else
64625+ dest = mod->module_core_rx + shdr->sh_entsize;
64626+ }
64627+
64628+ if (shdr->sh_type != SHT_NOBITS) {
64629+
64630+#ifdef CONFIG_PAX_KERNEXEC
64631+#ifdef CONFIG_X86_64
64632+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
64633+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
64634+#endif
64635+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
64636+ pax_open_kernel();
64637+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
64638+ pax_close_kernel();
64639+ } else
64640+#endif
64641
64642- if (shdr->sh_type != SHT_NOBITS)
64643 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
64644+ }
64645 /* Update sh_addr to point to copy in image. */
64646- shdr->sh_addr = (unsigned long)dest;
64647+
64648+#ifdef CONFIG_PAX_KERNEXEC
64649+ if (shdr->sh_flags & SHF_EXECINSTR)
64650+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
64651+ else
64652+#endif
64653+
64654+ shdr->sh_addr = (unsigned long)dest;
64655 DEBUGP("\t0x%lx %s\n",
64656 shdr->sh_addr, info->secstrings + shdr->sh_name);
64657 }
64658@@ -2689,12 +2788,12 @@ static void flush_module_icache(const struct module *mod)
64659 * Do it before processing of module parameters, so the module
64660 * can provide parameter accessor functions of its own.
64661 */
64662- if (mod->module_init)
64663- flush_icache_range((unsigned long)mod->module_init,
64664- (unsigned long)mod->module_init
64665- + mod->init_size);
64666- flush_icache_range((unsigned long)mod->module_core,
64667- (unsigned long)mod->module_core + mod->core_size);
64668+ if (mod->module_init_rx)
64669+ flush_icache_range((unsigned long)mod->module_init_rx,
64670+ (unsigned long)mod->module_init_rx
64671+ + mod->init_size_rx);
64672+ flush_icache_range((unsigned long)mod->module_core_rx,
64673+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
64674
64675 set_fs(old_fs);
64676 }
64677@@ -2774,8 +2873,10 @@ static void module_deallocate(struct module *mod, struct load_info *info)
64678 {
64679 kfree(info->strmap);
64680 percpu_modfree(mod);
64681- module_free(mod, mod->module_init);
64682- module_free(mod, mod->module_core);
64683+ module_free_exec(mod, mod->module_init_rx);
64684+ module_free_exec(mod, mod->module_core_rx);
64685+ module_free(mod, mod->module_init_rw);
64686+ module_free(mod, mod->module_core_rw);
64687 }
64688
64689 int __weak module_finalize(const Elf_Ehdr *hdr,
64690@@ -2839,9 +2940,38 @@ static struct module *load_module(void __user *umod,
64691 if (err)
64692 goto free_unload;
64693
64694+ /* Now copy in args */
64695+ mod->args = strndup_user(uargs, ~0UL >> 1);
64696+ if (IS_ERR(mod->args)) {
64697+ err = PTR_ERR(mod->args);
64698+ goto free_unload;
64699+ }
64700+
64701 /* Set up MODINFO_ATTR fields */
64702 setup_modinfo(mod, &info);
64703
64704+#ifdef CONFIG_GRKERNSEC_MODHARDEN
64705+ {
64706+ char *p, *p2;
64707+
64708+ if (strstr(mod->args, "grsec_modharden_netdev")) {
64709+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
64710+ err = -EPERM;
64711+ goto free_modinfo;
64712+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
64713+ p += strlen("grsec_modharden_normal");
64714+ p2 = strstr(p, "_");
64715+ if (p2) {
64716+ *p2 = '\0';
64717+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
64718+ *p2 = '_';
64719+ }
64720+ err = -EPERM;
64721+ goto free_modinfo;
64722+ }
64723+ }
64724+#endif
64725+
64726 /* Fix up syms, so that st_value is a pointer to location. */
64727 err = simplify_symbols(mod, &info);
64728 if (err < 0)
64729@@ -2857,13 +2987,6 @@ static struct module *load_module(void __user *umod,
64730
64731 flush_module_icache(mod);
64732
64733- /* Now copy in args */
64734- mod->args = strndup_user(uargs, ~0UL >> 1);
64735- if (IS_ERR(mod->args)) {
64736- err = PTR_ERR(mod->args);
64737- goto free_arch_cleanup;
64738- }
64739-
64740 /* Mark state as coming so strong_try_module_get() ignores us. */
64741 mod->state = MODULE_STATE_COMING;
64742
64743@@ -2921,11 +3044,10 @@ static struct module *load_module(void __user *umod,
64744 unlock:
64745 mutex_unlock(&module_mutex);
64746 synchronize_sched();
64747- kfree(mod->args);
64748- free_arch_cleanup:
64749 module_arch_cleanup(mod);
64750 free_modinfo:
64751 free_modinfo(mod);
64752+ kfree(mod->args);
64753 free_unload:
64754 module_unload_free(mod);
64755 free_module:
64756@@ -2966,16 +3088,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
64757 MODULE_STATE_COMING, mod);
64758
64759 /* Set RO and NX regions for core */
64760- set_section_ro_nx(mod->module_core,
64761- mod->core_text_size,
64762- mod->core_ro_size,
64763- mod->core_size);
64764+ set_section_ro_nx(mod->module_core_rx,
64765+ mod->core_size_rx,
64766+ mod->core_size_rx,
64767+ mod->core_size_rx);
64768
64769 /* Set RO and NX regions for init */
64770- set_section_ro_nx(mod->module_init,
64771- mod->init_text_size,
64772- mod->init_ro_size,
64773- mod->init_size);
64774+ set_section_ro_nx(mod->module_init_rx,
64775+ mod->init_size_rx,
64776+ mod->init_size_rx,
64777+ mod->init_size_rx);
64778
64779 do_mod_ctors(mod);
64780 /* Start the module */
64781@@ -3021,11 +3143,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
64782 mod->strtab = mod->core_strtab;
64783 #endif
64784 unset_module_init_ro_nx(mod);
64785- module_free(mod, mod->module_init);
64786- mod->module_init = NULL;
64787- mod->init_size = 0;
64788- mod->init_ro_size = 0;
64789- mod->init_text_size = 0;
64790+ module_free(mod, mod->module_init_rw);
64791+ module_free_exec(mod, mod->module_init_rx);
64792+ mod->module_init_rw = NULL;
64793+ mod->module_init_rx = NULL;
64794+ mod->init_size_rw = 0;
64795+ mod->init_size_rx = 0;
64796 mutex_unlock(&module_mutex);
64797
64798 return 0;
64799@@ -3056,10 +3179,16 @@ static const char *get_ksymbol(struct module *mod,
64800 unsigned long nextval;
64801
64802 /* At worse, next value is at end of module */
64803- if (within_module_init(addr, mod))
64804- nextval = (unsigned long)mod->module_init+mod->init_text_size;
64805+ if (within_module_init_rx(addr, mod))
64806+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
64807+ else if (within_module_init_rw(addr, mod))
64808+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
64809+ else if (within_module_core_rx(addr, mod))
64810+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
64811+ else if (within_module_core_rw(addr, mod))
64812+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
64813 else
64814- nextval = (unsigned long)mod->module_core+mod->core_text_size;
64815+ return NULL;
64816
64817 /* Scan for closest preceding symbol, and next symbol. (ELF
64818 starts real symbols at 1). */
64819@@ -3307,7 +3436,7 @@ static int m_show(struct seq_file *m, void *p)
64820 char buf[8];
64821
64822 seq_printf(m, "%s %u",
64823- mod->name, mod->init_size + mod->core_size);
64824+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
64825 print_unload_info(m, mod);
64826
64827 /* Informative for users. */
64828@@ -3316,7 +3445,7 @@ static int m_show(struct seq_file *m, void *p)
64829 mod->state == MODULE_STATE_COMING ? "Loading":
64830 "Live");
64831 /* Used by oprofile and other similar tools. */
64832- seq_printf(m, " 0x%pK", mod->module_core);
64833+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
64834
64835 /* Taints info */
64836 if (mod->taints)
64837@@ -3352,7 +3481,17 @@ static const struct file_operations proc_modules_operations = {
64838
64839 static int __init proc_modules_init(void)
64840 {
64841+#ifndef CONFIG_GRKERNSEC_HIDESYM
64842+#ifdef CONFIG_GRKERNSEC_PROC_USER
64843+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
64844+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64845+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
64846+#else
64847 proc_create("modules", 0, NULL, &proc_modules_operations);
64848+#endif
64849+#else
64850+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
64851+#endif
64852 return 0;
64853 }
64854 module_init(proc_modules_init);
64855@@ -3411,12 +3550,12 @@ struct module *__module_address(unsigned long addr)
64856 {
64857 struct module *mod;
64858
64859- if (addr < module_addr_min || addr > module_addr_max)
64860+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
64861+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
64862 return NULL;
64863
64864 list_for_each_entry_rcu(mod, &modules, list)
64865- if (within_module_core(addr, mod)
64866- || within_module_init(addr, mod))
64867+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
64868 return mod;
64869 return NULL;
64870 }
64871@@ -3450,11 +3589,20 @@ bool is_module_text_address(unsigned long addr)
64872 */
64873 struct module *__module_text_address(unsigned long addr)
64874 {
64875- struct module *mod = __module_address(addr);
64876+ struct module *mod;
64877+
64878+#ifdef CONFIG_X86_32
64879+ addr = ktla_ktva(addr);
64880+#endif
64881+
64882+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
64883+ return NULL;
64884+
64885+ mod = __module_address(addr);
64886+
64887 if (mod) {
64888 /* Make sure it's within the text section. */
64889- if (!within(addr, mod->module_init, mod->init_text_size)
64890- && !within(addr, mod->module_core, mod->core_text_size))
64891+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
64892 mod = NULL;
64893 }
64894 return mod;
64895diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
64896index 7e3443f..b2a1e6b 100644
64897--- a/kernel/mutex-debug.c
64898+++ b/kernel/mutex-debug.c
64899@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
64900 }
64901
64902 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64903- struct thread_info *ti)
64904+ struct task_struct *task)
64905 {
64906 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
64907
64908 /* Mark the current thread as blocked on the lock: */
64909- ti->task->blocked_on = waiter;
64910+ task->blocked_on = waiter;
64911 }
64912
64913 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64914- struct thread_info *ti)
64915+ struct task_struct *task)
64916 {
64917 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
64918- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
64919- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
64920- ti->task->blocked_on = NULL;
64921+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
64922+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
64923+ task->blocked_on = NULL;
64924
64925 list_del_init(&waiter->list);
64926 waiter->task = NULL;
64927diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
64928index 0799fd3..d06ae3b 100644
64929--- a/kernel/mutex-debug.h
64930+++ b/kernel/mutex-debug.h
64931@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
64932 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
64933 extern void debug_mutex_add_waiter(struct mutex *lock,
64934 struct mutex_waiter *waiter,
64935- struct thread_info *ti);
64936+ struct task_struct *task);
64937 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64938- struct thread_info *ti);
64939+ struct task_struct *task);
64940 extern void debug_mutex_unlock(struct mutex *lock);
64941 extern void debug_mutex_init(struct mutex *lock, const char *name,
64942 struct lock_class_key *key);
64943diff --git a/kernel/mutex.c b/kernel/mutex.c
64944index 89096dd..f91ebc5 100644
64945--- a/kernel/mutex.c
64946+++ b/kernel/mutex.c
64947@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
64948 spin_lock_mutex(&lock->wait_lock, flags);
64949
64950 debug_mutex_lock_common(lock, &waiter);
64951- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
64952+ debug_mutex_add_waiter(lock, &waiter, task);
64953
64954 /* add waiting tasks to the end of the waitqueue (FIFO): */
64955 list_add_tail(&waiter.list, &lock->wait_list);
64956@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
64957 * TASK_UNINTERRUPTIBLE case.)
64958 */
64959 if (unlikely(signal_pending_state(state, task))) {
64960- mutex_remove_waiter(lock, &waiter,
64961- task_thread_info(task));
64962+ mutex_remove_waiter(lock, &waiter, task);
64963 mutex_release(&lock->dep_map, 1, ip);
64964 spin_unlock_mutex(&lock->wait_lock, flags);
64965
64966@@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
64967 done:
64968 lock_acquired(&lock->dep_map, ip);
64969 /* got the lock - rejoice! */
64970- mutex_remove_waiter(lock, &waiter, current_thread_info());
64971+ mutex_remove_waiter(lock, &waiter, task);
64972 mutex_set_owner(lock);
64973
64974 /* set it to 0 if there are no waiters left: */
64975diff --git a/kernel/padata.c b/kernel/padata.c
64976index b452599..5d68f4e 100644
64977--- a/kernel/padata.c
64978+++ b/kernel/padata.c
64979@@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_instance *pinst,
64980 padata->pd = pd;
64981 padata->cb_cpu = cb_cpu;
64982
64983- if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
64984- atomic_set(&pd->seq_nr, -1);
64985+ if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
64986+ atomic_set_unchecked(&pd->seq_nr, -1);
64987
64988- padata->seq_nr = atomic_inc_return(&pd->seq_nr);
64989+ padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
64990
64991 target_cpu = padata_cpu_hash(padata);
64992 queue = per_cpu_ptr(pd->pqueue, target_cpu);
64993@@ -444,7 +444,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
64994 padata_init_pqueues(pd);
64995 padata_init_squeues(pd);
64996 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
64997- atomic_set(&pd->seq_nr, -1);
64998+ atomic_set_unchecked(&pd->seq_nr, -1);
64999 atomic_set(&pd->reorder_objects, 0);
65000 atomic_set(&pd->refcnt, 0);
65001 pd->pinst = pinst;
65002diff --git a/kernel/panic.c b/kernel/panic.c
65003index b2659360..5972a0f 100644
65004--- a/kernel/panic.c
65005+++ b/kernel/panic.c
65006@@ -78,7 +78,11 @@ NORET_TYPE void panic(const char * fmt, ...)
65007 va_end(args);
65008 printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
65009 #ifdef CONFIG_DEBUG_BUGVERBOSE
65010- dump_stack();
65011+ /*
65012+ * Avoid nested stack-dumping if a panic occurs during oops processing
65013+ */
65014+ if (!oops_in_progress)
65015+ dump_stack();
65016 #endif
65017
65018 /*
65019@@ -373,7 +377,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
65020 const char *board;
65021
65022 printk(KERN_WARNING "------------[ cut here ]------------\n");
65023- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
65024+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
65025 board = dmi_get_system_info(DMI_PRODUCT_NAME);
65026 if (board)
65027 printk(KERN_WARNING "Hardware name: %s\n", board);
65028@@ -428,7 +432,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
65029 */
65030 void __stack_chk_fail(void)
65031 {
65032- panic("stack-protector: Kernel stack is corrupted in: %p\n",
65033+ dump_stack();
65034+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
65035 __builtin_return_address(0));
65036 }
65037 EXPORT_SYMBOL(__stack_chk_fail);
65038diff --git a/kernel/pid.c b/kernel/pid.c
65039index fa5f722..0c93e57 100644
65040--- a/kernel/pid.c
65041+++ b/kernel/pid.c
65042@@ -33,6 +33,7 @@
65043 #include <linux/rculist.h>
65044 #include <linux/bootmem.h>
65045 #include <linux/hash.h>
65046+#include <linux/security.h>
65047 #include <linux/pid_namespace.h>
65048 #include <linux/init_task.h>
65049 #include <linux/syscalls.h>
65050@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
65051
65052 int pid_max = PID_MAX_DEFAULT;
65053
65054-#define RESERVED_PIDS 300
65055+#define RESERVED_PIDS 500
65056
65057 int pid_max_min = RESERVED_PIDS + 1;
65058 int pid_max_max = PID_MAX_LIMIT;
65059@@ -418,10 +419,18 @@ EXPORT_SYMBOL(pid_task);
65060 */
65061 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
65062 {
65063+ struct task_struct *task;
65064+
65065 rcu_lockdep_assert(rcu_read_lock_held(),
65066 "find_task_by_pid_ns() needs rcu_read_lock()"
65067 " protection");
65068- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
65069+
65070+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
65071+
65072+ if (gr_pid_is_chrooted(task))
65073+ return NULL;
65074+
65075+ return task;
65076 }
65077
65078 struct task_struct *find_task_by_vpid(pid_t vnr)
65079@@ -429,6 +438,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
65080 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
65081 }
65082
65083+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
65084+{
65085+ rcu_lockdep_assert(rcu_read_lock_held(),
65086+ "find_task_by_pid_ns() needs rcu_read_lock()"
65087+ " protection");
65088+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
65089+}
65090+
65091 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
65092 {
65093 struct pid *pid;
65094diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
65095index e7cb76d..75eceb3 100644
65096--- a/kernel/posix-cpu-timers.c
65097+++ b/kernel/posix-cpu-timers.c
65098@@ -6,6 +6,7 @@
65099 #include <linux/posix-timers.h>
65100 #include <linux/errno.h>
65101 #include <linux/math64.h>
65102+#include <linux/security.h>
65103 #include <asm/uaccess.h>
65104 #include <linux/kernel_stat.h>
65105 #include <trace/events/timer.h>
65106@@ -1606,14 +1607,14 @@ struct k_clock clock_posix_cpu = {
65107
65108 static __init int init_posix_cpu_timers(void)
65109 {
65110- struct k_clock process = {
65111+ static struct k_clock process = {
65112 .clock_getres = process_cpu_clock_getres,
65113 .clock_get = process_cpu_clock_get,
65114 .timer_create = process_cpu_timer_create,
65115 .nsleep = process_cpu_nsleep,
65116 .nsleep_restart = process_cpu_nsleep_restart,
65117 };
65118- struct k_clock thread = {
65119+ static struct k_clock thread = {
65120 .clock_getres = thread_cpu_clock_getres,
65121 .clock_get = thread_cpu_clock_get,
65122 .timer_create = thread_cpu_timer_create,
65123diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
65124index 69185ae..cc2847a 100644
65125--- a/kernel/posix-timers.c
65126+++ b/kernel/posix-timers.c
65127@@ -43,6 +43,7 @@
65128 #include <linux/idr.h>
65129 #include <linux/posix-clock.h>
65130 #include <linux/posix-timers.h>
65131+#include <linux/grsecurity.h>
65132 #include <linux/syscalls.h>
65133 #include <linux/wait.h>
65134 #include <linux/workqueue.h>
65135@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
65136 * which we beg off on and pass to do_sys_settimeofday().
65137 */
65138
65139-static struct k_clock posix_clocks[MAX_CLOCKS];
65140+static struct k_clock *posix_clocks[MAX_CLOCKS];
65141
65142 /*
65143 * These ones are defined below.
65144@@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
65145 */
65146 static __init int init_posix_timers(void)
65147 {
65148- struct k_clock clock_realtime = {
65149+ static struct k_clock clock_realtime = {
65150 .clock_getres = hrtimer_get_res,
65151 .clock_get = posix_clock_realtime_get,
65152 .clock_set = posix_clock_realtime_set,
65153@@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
65154 .timer_get = common_timer_get,
65155 .timer_del = common_timer_del,
65156 };
65157- struct k_clock clock_monotonic = {
65158+ static struct k_clock clock_monotonic = {
65159 .clock_getres = hrtimer_get_res,
65160 .clock_get = posix_ktime_get_ts,
65161 .nsleep = common_nsleep,
65162@@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
65163 .timer_get = common_timer_get,
65164 .timer_del = common_timer_del,
65165 };
65166- struct k_clock clock_monotonic_raw = {
65167+ static struct k_clock clock_monotonic_raw = {
65168 .clock_getres = hrtimer_get_res,
65169 .clock_get = posix_get_monotonic_raw,
65170 };
65171- struct k_clock clock_realtime_coarse = {
65172+ static struct k_clock clock_realtime_coarse = {
65173 .clock_getres = posix_get_coarse_res,
65174 .clock_get = posix_get_realtime_coarse,
65175 };
65176- struct k_clock clock_monotonic_coarse = {
65177+ static struct k_clock clock_monotonic_coarse = {
65178 .clock_getres = posix_get_coarse_res,
65179 .clock_get = posix_get_monotonic_coarse,
65180 };
65181- struct k_clock clock_boottime = {
65182+ static struct k_clock clock_boottime = {
65183 .clock_getres = hrtimer_get_res,
65184 .clock_get = posix_get_boottime,
65185 .nsleep = common_nsleep,
65186@@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
65187 return;
65188 }
65189
65190- posix_clocks[clock_id] = *new_clock;
65191+ posix_clocks[clock_id] = new_clock;
65192 }
65193 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
65194
65195@@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
65196 return (id & CLOCKFD_MASK) == CLOCKFD ?
65197 &clock_posix_dynamic : &clock_posix_cpu;
65198
65199- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
65200+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
65201 return NULL;
65202- return &posix_clocks[id];
65203+ return posix_clocks[id];
65204 }
65205
65206 static int common_timer_create(struct k_itimer *new_timer)
65207@@ -959,6 +960,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
65208 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
65209 return -EFAULT;
65210
65211+ /* only the CLOCK_REALTIME clock can be set, all other clocks
65212+ have their clock_set fptr set to a nosettime dummy function
65213+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
65214+ call common_clock_set, which calls do_sys_settimeofday, which
65215+ we hook
65216+ */
65217+
65218 return kc->clock_set(which_clock, &new_tp);
65219 }
65220
65221diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
65222index d523593..68197a4 100644
65223--- a/kernel/power/poweroff.c
65224+++ b/kernel/power/poweroff.c
65225@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
65226 .enable_mask = SYSRQ_ENABLE_BOOT,
65227 };
65228
65229-static int pm_sysrq_init(void)
65230+static int __init pm_sysrq_init(void)
65231 {
65232 register_sysrq_key('o', &sysrq_poweroff_op);
65233 return 0;
65234diff --git a/kernel/power/process.c b/kernel/power/process.c
65235index addbbe5..f9e32e0 100644
65236--- a/kernel/power/process.c
65237+++ b/kernel/power/process.c
65238@@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_only)
65239 u64 elapsed_csecs64;
65240 unsigned int elapsed_csecs;
65241 bool wakeup = false;
65242+ bool timedout = false;
65243
65244 do_gettimeofday(&start);
65245
65246@@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_only)
65247
65248 while (true) {
65249 todo = 0;
65250+ if (time_after(jiffies, end_time))
65251+ timedout = true;
65252 read_lock(&tasklist_lock);
65253 do_each_thread(g, p) {
65254 if (frozen(p) || !freezable(p))
65255@@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_only)
65256 * try_to_stop() after schedule() in ptrace/signal
65257 * stop sees TIF_FREEZE.
65258 */
65259- if (!task_is_stopped_or_traced(p) &&
65260- !freezer_should_skip(p))
65261+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
65262 todo++;
65263+ if (timedout) {
65264+ printk(KERN_ERR "Task refusing to freeze:\n");
65265+ sched_show_task(p);
65266+ }
65267+ }
65268 } while_each_thread(g, p);
65269 read_unlock(&tasklist_lock);
65270
65271@@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_only)
65272 todo += wq_busy;
65273 }
65274
65275- if (!todo || time_after(jiffies, end_time))
65276+ if (!todo || timedout)
65277 break;
65278
65279 if (pm_wakeup_pending()) {
65280diff --git a/kernel/printk.c b/kernel/printk.c
65281index 7982a0a..2095fdc 100644
65282--- a/kernel/printk.c
65283+++ b/kernel/printk.c
65284@@ -313,6 +313,11 @@ static int check_syslog_permissions(int type, bool from_file)
65285 if (from_file && type != SYSLOG_ACTION_OPEN)
65286 return 0;
65287
65288+#ifdef CONFIG_GRKERNSEC_DMESG
65289+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
65290+ return -EPERM;
65291+#endif
65292+
65293 if (syslog_action_restricted(type)) {
65294 if (capable(CAP_SYSLOG))
65295 return 0;
65296diff --git a/kernel/profile.c b/kernel/profile.c
65297index 76b8e77..a2930e8 100644
65298--- a/kernel/profile.c
65299+++ b/kernel/profile.c
65300@@ -39,7 +39,7 @@ struct profile_hit {
65301 /* Oprofile timer tick hook */
65302 static int (*timer_hook)(struct pt_regs *) __read_mostly;
65303
65304-static atomic_t *prof_buffer;
65305+static atomic_unchecked_t *prof_buffer;
65306 static unsigned long prof_len, prof_shift;
65307
65308 int prof_on __read_mostly;
65309@@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
65310 hits[i].pc = 0;
65311 continue;
65312 }
65313- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
65314+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
65315 hits[i].hits = hits[i].pc = 0;
65316 }
65317 }
65318@@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
65319 * Add the current hit(s) and flush the write-queue out
65320 * to the global buffer:
65321 */
65322- atomic_add(nr_hits, &prof_buffer[pc]);
65323+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
65324 for (i = 0; i < NR_PROFILE_HIT; ++i) {
65325- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
65326+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
65327 hits[i].pc = hits[i].hits = 0;
65328 }
65329 out:
65330@@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
65331 {
65332 unsigned long pc;
65333 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
65334- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
65335+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
65336 }
65337 #endif /* !CONFIG_SMP */
65338
65339@@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
65340 return -EFAULT;
65341 buf++; p++; count--; read++;
65342 }
65343- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
65344+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
65345 if (copy_to_user(buf, (void *)pnt, count))
65346 return -EFAULT;
65347 read += count;
65348@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
65349 }
65350 #endif
65351 profile_discard_flip_buffers();
65352- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
65353+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
65354 return count;
65355 }
65356
65357diff --git a/kernel/ptrace.c b/kernel/ptrace.c
65358index 78ab24a..332c915 100644
65359--- a/kernel/ptrace.c
65360+++ b/kernel/ptrace.c
65361@@ -172,7 +172,8 @@ int ptrace_check_attach(struct task_struct *child, bool ignore_state)
65362 return ret;
65363 }
65364
65365-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
65366+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
65367+ unsigned int log)
65368 {
65369 const struct cred *cred = current_cred(), *tcred;
65370
65371@@ -198,7 +199,8 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
65372 cred->gid == tcred->sgid &&
65373 cred->gid == tcred->gid))
65374 goto ok;
65375- if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
65376+ if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
65377+ (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
65378 goto ok;
65379 rcu_read_unlock();
65380 return -EPERM;
65381@@ -207,7 +209,9 @@ ok:
65382 smp_rmb();
65383 if (task->mm)
65384 dumpable = get_dumpable(task->mm);
65385- if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
65386+ if (!dumpable &&
65387+ ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
65388+ (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
65389 return -EPERM;
65390
65391 return security_ptrace_access_check(task, mode);
65392@@ -217,7 +221,21 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
65393 {
65394 int err;
65395 task_lock(task);
65396- err = __ptrace_may_access(task, mode);
65397+ err = __ptrace_may_access(task, mode, 0);
65398+ task_unlock(task);
65399+ return !err;
65400+}
65401+
65402+bool ptrace_may_access_nolock(struct task_struct *task, unsigned int mode)
65403+{
65404+ return __ptrace_may_access(task, mode, 0);
65405+}
65406+
65407+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
65408+{
65409+ int err;
65410+ task_lock(task);
65411+ err = __ptrace_may_access(task, mode, 1);
65412 task_unlock(task);
65413 return !err;
65414 }
65415@@ -262,7 +280,7 @@ static int ptrace_attach(struct task_struct *task, long request,
65416 goto out;
65417
65418 task_lock(task);
65419- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
65420+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
65421 task_unlock(task);
65422 if (retval)
65423 goto unlock_creds;
65424@@ -277,7 +295,7 @@ static int ptrace_attach(struct task_struct *task, long request,
65425 task->ptrace = PT_PTRACED;
65426 if (seize)
65427 task->ptrace |= PT_SEIZED;
65428- if (task_ns_capable(task, CAP_SYS_PTRACE))
65429+ if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
65430 task->ptrace |= PT_PTRACE_CAP;
65431
65432 __ptrace_link(task, current);
65433@@ -483,7 +501,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
65434 break;
65435 return -EIO;
65436 }
65437- if (copy_to_user(dst, buf, retval))
65438+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
65439 return -EFAULT;
65440 copied += retval;
65441 src += retval;
65442@@ -680,7 +698,7 @@ int ptrace_request(struct task_struct *child, long request,
65443 bool seized = child->ptrace & PT_SEIZED;
65444 int ret = -EIO;
65445 siginfo_t siginfo, *si;
65446- void __user *datavp = (void __user *) data;
65447+ void __user *datavp = (__force void __user *) data;
65448 unsigned long __user *datalp = datavp;
65449 unsigned long flags;
65450
65451@@ -882,14 +900,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
65452 goto out;
65453 }
65454
65455+ if (gr_handle_ptrace(child, request)) {
65456+ ret = -EPERM;
65457+ goto out_put_task_struct;
65458+ }
65459+
65460 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
65461 ret = ptrace_attach(child, request, data);
65462 /*
65463 * Some architectures need to do book-keeping after
65464 * a ptrace attach.
65465 */
65466- if (!ret)
65467+ if (!ret) {
65468 arch_ptrace_attach(child);
65469+ gr_audit_ptrace(child);
65470+ }
65471 goto out_put_task_struct;
65472 }
65473
65474@@ -915,7 +940,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
65475 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
65476 if (copied != sizeof(tmp))
65477 return -EIO;
65478- return put_user(tmp, (unsigned long __user *)data);
65479+ return put_user(tmp, (__force unsigned long __user *)data);
65480 }
65481
65482 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
65483@@ -1025,14 +1050,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
65484 goto out;
65485 }
65486
65487+ if (gr_handle_ptrace(child, request)) {
65488+ ret = -EPERM;
65489+ goto out_put_task_struct;
65490+ }
65491+
65492 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
65493 ret = ptrace_attach(child, request, data);
65494 /*
65495 * Some architectures need to do book-keeping after
65496 * a ptrace attach.
65497 */
65498- if (!ret)
65499+ if (!ret) {
65500 arch_ptrace_attach(child);
65501+ gr_audit_ptrace(child);
65502+ }
65503 goto out_put_task_struct;
65504 }
65505
65506diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
65507index 764825c..3aa6ac4 100644
65508--- a/kernel/rcutorture.c
65509+++ b/kernel/rcutorture.c
65510@@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
65511 { 0 };
65512 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
65513 { 0 };
65514-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65515-static atomic_t n_rcu_torture_alloc;
65516-static atomic_t n_rcu_torture_alloc_fail;
65517-static atomic_t n_rcu_torture_free;
65518-static atomic_t n_rcu_torture_mberror;
65519-static atomic_t n_rcu_torture_error;
65520+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65521+static atomic_unchecked_t n_rcu_torture_alloc;
65522+static atomic_unchecked_t n_rcu_torture_alloc_fail;
65523+static atomic_unchecked_t n_rcu_torture_free;
65524+static atomic_unchecked_t n_rcu_torture_mberror;
65525+static atomic_unchecked_t n_rcu_torture_error;
65526 static long n_rcu_torture_boost_ktrerror;
65527 static long n_rcu_torture_boost_rterror;
65528 static long n_rcu_torture_boost_failure;
65529@@ -223,11 +223,11 @@ rcu_torture_alloc(void)
65530
65531 spin_lock_bh(&rcu_torture_lock);
65532 if (list_empty(&rcu_torture_freelist)) {
65533- atomic_inc(&n_rcu_torture_alloc_fail);
65534+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
65535 spin_unlock_bh(&rcu_torture_lock);
65536 return NULL;
65537 }
65538- atomic_inc(&n_rcu_torture_alloc);
65539+ atomic_inc_unchecked(&n_rcu_torture_alloc);
65540 p = rcu_torture_freelist.next;
65541 list_del_init(p);
65542 spin_unlock_bh(&rcu_torture_lock);
65543@@ -240,7 +240,7 @@ rcu_torture_alloc(void)
65544 static void
65545 rcu_torture_free(struct rcu_torture *p)
65546 {
65547- atomic_inc(&n_rcu_torture_free);
65548+ atomic_inc_unchecked(&n_rcu_torture_free);
65549 spin_lock_bh(&rcu_torture_lock);
65550 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
65551 spin_unlock_bh(&rcu_torture_lock);
65552@@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p)
65553 i = rp->rtort_pipe_count;
65554 if (i > RCU_TORTURE_PIPE_LEN)
65555 i = RCU_TORTURE_PIPE_LEN;
65556- atomic_inc(&rcu_torture_wcount[i]);
65557+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
65558 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65559 rp->rtort_mbtest = 0;
65560 rcu_torture_free(rp);
65561@@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
65562 i = rp->rtort_pipe_count;
65563 if (i > RCU_TORTURE_PIPE_LEN)
65564 i = RCU_TORTURE_PIPE_LEN;
65565- atomic_inc(&rcu_torture_wcount[i]);
65566+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
65567 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65568 rp->rtort_mbtest = 0;
65569 list_del(&rp->rtort_free);
65570@@ -872,7 +872,7 @@ rcu_torture_writer(void *arg)
65571 i = old_rp->rtort_pipe_count;
65572 if (i > RCU_TORTURE_PIPE_LEN)
65573 i = RCU_TORTURE_PIPE_LEN;
65574- atomic_inc(&rcu_torture_wcount[i]);
65575+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
65576 old_rp->rtort_pipe_count++;
65577 cur_ops->deferred_free(old_rp);
65578 }
65579@@ -940,7 +940,7 @@ static void rcu_torture_timer(unsigned long unused)
65580 return;
65581 }
65582 if (p->rtort_mbtest == 0)
65583- atomic_inc(&n_rcu_torture_mberror);
65584+ atomic_inc_unchecked(&n_rcu_torture_mberror);
65585 spin_lock(&rand_lock);
65586 cur_ops->read_delay(&rand);
65587 n_rcu_torture_timers++;
65588@@ -1001,7 +1001,7 @@ rcu_torture_reader(void *arg)
65589 continue;
65590 }
65591 if (p->rtort_mbtest == 0)
65592- atomic_inc(&n_rcu_torture_mberror);
65593+ atomic_inc_unchecked(&n_rcu_torture_mberror);
65594 cur_ops->read_delay(&rand);
65595 preempt_disable();
65596 pipe_count = p->rtort_pipe_count;
65597@@ -1060,16 +1060,16 @@ rcu_torture_printk(char *page)
65598 rcu_torture_current,
65599 rcu_torture_current_version,
65600 list_empty(&rcu_torture_freelist),
65601- atomic_read(&n_rcu_torture_alloc),
65602- atomic_read(&n_rcu_torture_alloc_fail),
65603- atomic_read(&n_rcu_torture_free),
65604- atomic_read(&n_rcu_torture_mberror),
65605+ atomic_read_unchecked(&n_rcu_torture_alloc),
65606+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
65607+ atomic_read_unchecked(&n_rcu_torture_free),
65608+ atomic_read_unchecked(&n_rcu_torture_mberror),
65609 n_rcu_torture_boost_ktrerror,
65610 n_rcu_torture_boost_rterror,
65611 n_rcu_torture_boost_failure,
65612 n_rcu_torture_boosts,
65613 n_rcu_torture_timers);
65614- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
65615+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
65616 n_rcu_torture_boost_ktrerror != 0 ||
65617 n_rcu_torture_boost_rterror != 0 ||
65618 n_rcu_torture_boost_failure != 0)
65619@@ -1077,7 +1077,7 @@ rcu_torture_printk(char *page)
65620 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
65621 if (i > 1) {
65622 cnt += sprintf(&page[cnt], "!!! ");
65623- atomic_inc(&n_rcu_torture_error);
65624+ atomic_inc_unchecked(&n_rcu_torture_error);
65625 WARN_ON_ONCE(1);
65626 }
65627 cnt += sprintf(&page[cnt], "Reader Pipe: ");
65628@@ -1091,7 +1091,7 @@ rcu_torture_printk(char *page)
65629 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
65630 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65631 cnt += sprintf(&page[cnt], " %d",
65632- atomic_read(&rcu_torture_wcount[i]));
65633+ atomic_read_unchecked(&rcu_torture_wcount[i]));
65634 }
65635 cnt += sprintf(&page[cnt], "\n");
65636 if (cur_ops->stats)
65637@@ -1401,7 +1401,7 @@ rcu_torture_cleanup(void)
65638
65639 if (cur_ops->cleanup)
65640 cur_ops->cleanup();
65641- if (atomic_read(&n_rcu_torture_error))
65642+ if (atomic_read_unchecked(&n_rcu_torture_error))
65643 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
65644 else
65645 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
65646@@ -1465,17 +1465,17 @@ rcu_torture_init(void)
65647
65648 rcu_torture_current = NULL;
65649 rcu_torture_current_version = 0;
65650- atomic_set(&n_rcu_torture_alloc, 0);
65651- atomic_set(&n_rcu_torture_alloc_fail, 0);
65652- atomic_set(&n_rcu_torture_free, 0);
65653- atomic_set(&n_rcu_torture_mberror, 0);
65654- atomic_set(&n_rcu_torture_error, 0);
65655+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
65656+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
65657+ atomic_set_unchecked(&n_rcu_torture_free, 0);
65658+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
65659+ atomic_set_unchecked(&n_rcu_torture_error, 0);
65660 n_rcu_torture_boost_ktrerror = 0;
65661 n_rcu_torture_boost_rterror = 0;
65662 n_rcu_torture_boost_failure = 0;
65663 n_rcu_torture_boosts = 0;
65664 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
65665- atomic_set(&rcu_torture_wcount[i], 0);
65666+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
65667 for_each_possible_cpu(cpu) {
65668 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65669 per_cpu(rcu_torture_count, cpu)[i] = 0;
65670diff --git a/kernel/rcutree.c b/kernel/rcutree.c
65671index 6b76d81..7afc1b3 100644
65672--- a/kernel/rcutree.c
65673+++ b/kernel/rcutree.c
65674@@ -367,9 +367,9 @@ void rcu_enter_nohz(void)
65675 trace_rcu_dyntick("Start");
65676 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
65677 smp_mb__before_atomic_inc(); /* See above. */
65678- atomic_inc(&rdtp->dynticks);
65679+ atomic_inc_unchecked(&rdtp->dynticks);
65680 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
65681- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
65682+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
65683 local_irq_restore(flags);
65684 }
65685
65686@@ -391,10 +391,10 @@ void rcu_exit_nohz(void)
65687 return;
65688 }
65689 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
65690- atomic_inc(&rdtp->dynticks);
65691+ atomic_inc_unchecked(&rdtp->dynticks);
65692 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
65693 smp_mb__after_atomic_inc(); /* See above. */
65694- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
65695+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
65696 trace_rcu_dyntick("End");
65697 local_irq_restore(flags);
65698 }
65699@@ -411,14 +411,14 @@ void rcu_nmi_enter(void)
65700 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
65701
65702 if (rdtp->dynticks_nmi_nesting == 0 &&
65703- (atomic_read(&rdtp->dynticks) & 0x1))
65704+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
65705 return;
65706 rdtp->dynticks_nmi_nesting++;
65707 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
65708- atomic_inc(&rdtp->dynticks);
65709+ atomic_inc_unchecked(&rdtp->dynticks);
65710 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
65711 smp_mb__after_atomic_inc(); /* See above. */
65712- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
65713+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
65714 }
65715
65716 /**
65717@@ -437,9 +437,9 @@ void rcu_nmi_exit(void)
65718 return;
65719 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
65720 smp_mb__before_atomic_inc(); /* See above. */
65721- atomic_inc(&rdtp->dynticks);
65722+ atomic_inc_unchecked(&rdtp->dynticks);
65723 smp_mb__after_atomic_inc(); /* Force delay to next write. */
65724- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
65725+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
65726 }
65727
65728 /**
65729@@ -474,7 +474,7 @@ void rcu_irq_exit(void)
65730 */
65731 static int dyntick_save_progress_counter(struct rcu_data *rdp)
65732 {
65733- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
65734+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
65735 return 0;
65736 }
65737
65738@@ -489,7 +489,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
65739 unsigned int curr;
65740 unsigned int snap;
65741
65742- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
65743+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
65744 snap = (unsigned int)rdp->dynticks_snap;
65745
65746 /*
65747@@ -1552,7 +1552,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
65748 /*
65749 * Do RCU core processing for the current CPU.
65750 */
65751-static void rcu_process_callbacks(struct softirq_action *unused)
65752+static void rcu_process_callbacks(void)
65753 {
65754 trace_rcu_utilization("Start RCU core");
65755 __rcu_process_callbacks(&rcu_sched_state,
65756diff --git a/kernel/rcutree.h b/kernel/rcutree.h
65757index 849ce9e..74bc9de 100644
65758--- a/kernel/rcutree.h
65759+++ b/kernel/rcutree.h
65760@@ -86,7 +86,7 @@
65761 struct rcu_dynticks {
65762 int dynticks_nesting; /* Track irq/process nesting level. */
65763 int dynticks_nmi_nesting; /* Track NMI nesting level. */
65764- atomic_t dynticks; /* Even value for dynticks-idle, else odd. */
65765+ atomic_unchecked_t dynticks; /* Even value for dynticks-idle, else odd. */
65766 };
65767
65768 /* RCU's kthread states for tracing. */
65769diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
65770index 4b9b9f8..2326053 100644
65771--- a/kernel/rcutree_plugin.h
65772+++ b/kernel/rcutree_plugin.h
65773@@ -842,7 +842,7 @@ void synchronize_rcu_expedited(void)
65774
65775 /* Clean up and exit. */
65776 smp_mb(); /* ensure expedited GP seen before counter increment. */
65777- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
65778+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
65779 unlock_mb_ret:
65780 mutex_unlock(&sync_rcu_preempt_exp_mutex);
65781 mb_ret:
65782@@ -1815,8 +1815,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
65783
65784 #else /* #ifndef CONFIG_SMP */
65785
65786-static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
65787-static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
65788+static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
65789+static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
65790
65791 static int synchronize_sched_expedited_cpu_stop(void *data)
65792 {
65793@@ -1871,7 +1871,7 @@ void synchronize_sched_expedited(void)
65794 int firstsnap, s, snap, trycount = 0;
65795
65796 /* Note that atomic_inc_return() implies full memory barrier. */
65797- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
65798+ firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
65799 get_online_cpus();
65800
65801 /*
65802@@ -1892,7 +1892,7 @@ void synchronize_sched_expedited(void)
65803 }
65804
65805 /* Check to see if someone else did our work for us. */
65806- s = atomic_read(&sync_sched_expedited_done);
65807+ s = atomic_read_unchecked(&sync_sched_expedited_done);
65808 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
65809 smp_mb(); /* ensure test happens before caller kfree */
65810 return;
65811@@ -1907,7 +1907,7 @@ void synchronize_sched_expedited(void)
65812 * grace period works for us.
65813 */
65814 get_online_cpus();
65815- snap = atomic_read(&sync_sched_expedited_started) - 1;
65816+ snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
65817 smp_mb(); /* ensure read is before try_stop_cpus(). */
65818 }
65819
65820@@ -1918,12 +1918,12 @@ void synchronize_sched_expedited(void)
65821 * than we did beat us to the punch.
65822 */
65823 do {
65824- s = atomic_read(&sync_sched_expedited_done);
65825+ s = atomic_read_unchecked(&sync_sched_expedited_done);
65826 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
65827 smp_mb(); /* ensure test happens before caller kfree */
65828 break;
65829 }
65830- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
65831+ } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
65832
65833 put_online_cpus();
65834 }
65835@@ -1985,7 +1985,7 @@ int rcu_needs_cpu(int cpu)
65836 for_each_online_cpu(thatcpu) {
65837 if (thatcpu == cpu)
65838 continue;
65839- snap = atomic_add_return(0, &per_cpu(rcu_dynticks,
65840+ snap = atomic_add_return_unchecked(0, &per_cpu(rcu_dynticks,
65841 thatcpu).dynticks);
65842 smp_mb(); /* Order sampling of snap with end of grace period. */
65843 if ((snap & 0x1) != 0) {
65844diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
65845index 9feffa4..54058df 100644
65846--- a/kernel/rcutree_trace.c
65847+++ b/kernel/rcutree_trace.c
65848@@ -69,7 +69,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
65849 rdp->qs_pending);
65850 #ifdef CONFIG_NO_HZ
65851 seq_printf(m, " dt=%d/%d/%d df=%lu",
65852- atomic_read(&rdp->dynticks->dynticks),
65853+ atomic_read_unchecked(&rdp->dynticks->dynticks),
65854 rdp->dynticks->dynticks_nesting,
65855 rdp->dynticks->dynticks_nmi_nesting,
65856 rdp->dynticks_fqs);
65857@@ -143,7 +143,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
65858 rdp->qs_pending);
65859 #ifdef CONFIG_NO_HZ
65860 seq_printf(m, ",%d,%d,%d,%lu",
65861- atomic_read(&rdp->dynticks->dynticks),
65862+ atomic_read_unchecked(&rdp->dynticks->dynticks),
65863 rdp->dynticks->dynticks_nesting,
65864 rdp->dynticks->dynticks_nmi_nesting,
65865 rdp->dynticks_fqs);
65866diff --git a/kernel/resource.c b/kernel/resource.c
65867index 7640b3a..5879283 100644
65868--- a/kernel/resource.c
65869+++ b/kernel/resource.c
65870@@ -141,8 +141,18 @@ static const struct file_operations proc_iomem_operations = {
65871
65872 static int __init ioresources_init(void)
65873 {
65874+#ifdef CONFIG_GRKERNSEC_PROC_ADD
65875+#ifdef CONFIG_GRKERNSEC_PROC_USER
65876+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
65877+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
65878+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65879+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
65880+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
65881+#endif
65882+#else
65883 proc_create("ioports", 0, NULL, &proc_ioports_operations);
65884 proc_create("iomem", 0, NULL, &proc_iomem_operations);
65885+#endif
65886 return 0;
65887 }
65888 __initcall(ioresources_init);
65889diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
65890index 3d9f31c..7fefc9e 100644
65891--- a/kernel/rtmutex-tester.c
65892+++ b/kernel/rtmutex-tester.c
65893@@ -20,7 +20,7 @@
65894 #define MAX_RT_TEST_MUTEXES 8
65895
65896 static spinlock_t rttest_lock;
65897-static atomic_t rttest_event;
65898+static atomic_unchecked_t rttest_event;
65899
65900 struct test_thread_data {
65901 int opcode;
65902@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
65903
65904 case RTTEST_LOCKCONT:
65905 td->mutexes[td->opdata] = 1;
65906- td->event = atomic_add_return(1, &rttest_event);
65907+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65908 return 0;
65909
65910 case RTTEST_RESET:
65911@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
65912 return 0;
65913
65914 case RTTEST_RESETEVENT:
65915- atomic_set(&rttest_event, 0);
65916+ atomic_set_unchecked(&rttest_event, 0);
65917 return 0;
65918
65919 default:
65920@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
65921 return ret;
65922
65923 td->mutexes[id] = 1;
65924- td->event = atomic_add_return(1, &rttest_event);
65925+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65926 rt_mutex_lock(&mutexes[id]);
65927- td->event = atomic_add_return(1, &rttest_event);
65928+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65929 td->mutexes[id] = 4;
65930 return 0;
65931
65932@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
65933 return ret;
65934
65935 td->mutexes[id] = 1;
65936- td->event = atomic_add_return(1, &rttest_event);
65937+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65938 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
65939- td->event = atomic_add_return(1, &rttest_event);
65940+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65941 td->mutexes[id] = ret ? 0 : 4;
65942 return ret ? -EINTR : 0;
65943
65944@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
65945 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
65946 return ret;
65947
65948- td->event = atomic_add_return(1, &rttest_event);
65949+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65950 rt_mutex_unlock(&mutexes[id]);
65951- td->event = atomic_add_return(1, &rttest_event);
65952+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65953 td->mutexes[id] = 0;
65954 return 0;
65955
65956@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
65957 break;
65958
65959 td->mutexes[dat] = 2;
65960- td->event = atomic_add_return(1, &rttest_event);
65961+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65962 break;
65963
65964 default:
65965@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
65966 return;
65967
65968 td->mutexes[dat] = 3;
65969- td->event = atomic_add_return(1, &rttest_event);
65970+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65971 break;
65972
65973 case RTTEST_LOCKNOWAIT:
65974@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
65975 return;
65976
65977 td->mutexes[dat] = 1;
65978- td->event = atomic_add_return(1, &rttest_event);
65979+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65980 return;
65981
65982 default:
65983diff --git a/kernel/sched.c b/kernel/sched.c
65984index d6b149c..896cbb8 100644
65985--- a/kernel/sched.c
65986+++ b/kernel/sched.c
65987@@ -4389,6 +4389,19 @@ pick_next_task(struct rq *rq)
65988 BUG(); /* the idle class will always have a runnable task */
65989 }
65990
65991+#ifdef CONFIG_GRKERNSEC_SETXID
65992+extern void gr_delayed_cred_worker(void);
65993+static inline void gr_cred_schedule(void)
65994+{
65995+ if (unlikely(current->delayed_cred))
65996+ gr_delayed_cred_worker();
65997+}
65998+#else
65999+static inline void gr_cred_schedule(void)
66000+{
66001+}
66002+#endif
66003+
66004 /*
66005 * __schedule() is the main scheduler function.
66006 */
66007@@ -4408,6 +4421,8 @@ need_resched:
66008
66009 schedule_debug(prev);
66010
66011+ gr_cred_schedule();
66012+
66013 if (sched_feat(HRTICK))
66014 hrtick_clear(rq);
66015
66016@@ -5098,6 +5113,8 @@ int can_nice(const struct task_struct *p, const int nice)
66017 /* convert nice value [19,-20] to rlimit style value [1,40] */
66018 int nice_rlim = 20 - nice;
66019
66020+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
66021+
66022 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
66023 capable(CAP_SYS_NICE));
66024 }
66025@@ -5131,7 +5148,8 @@ SYSCALL_DEFINE1(nice, int, increment)
66026 if (nice > 19)
66027 nice = 19;
66028
66029- if (increment < 0 && !can_nice(current, nice))
66030+ if (increment < 0 && (!can_nice(current, nice) ||
66031+ gr_handle_chroot_nice()))
66032 return -EPERM;
66033
66034 retval = security_task_setnice(current, nice);
66035@@ -5288,6 +5306,7 @@ recheck:
66036 unsigned long rlim_rtprio =
66037 task_rlimit(p, RLIMIT_RTPRIO);
66038
66039+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
66040 /* can't set/change the rt policy */
66041 if (policy != p->policy && !rlim_rtprio)
66042 return -EPERM;
66043diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c
66044index 429242f..d7cca82 100644
66045--- a/kernel/sched_autogroup.c
66046+++ b/kernel/sched_autogroup.c
66047@@ -7,7 +7,7 @@
66048
66049 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
66050 static struct autogroup autogroup_default;
66051-static atomic_t autogroup_seq_nr;
66052+static atomic_unchecked_t autogroup_seq_nr;
66053
66054 static void __init autogroup_init(struct task_struct *init_task)
66055 {
66056@@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
66057
66058 kref_init(&ag->kref);
66059 init_rwsem(&ag->lock);
66060- ag->id = atomic_inc_return(&autogroup_seq_nr);
66061+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
66062 ag->tg = tg;
66063 #ifdef CONFIG_RT_GROUP_SCHED
66064 /*
66065diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
66066index 8a39fa3..34f3dbc 100644
66067--- a/kernel/sched_fair.c
66068+++ b/kernel/sched_fair.c
66069@@ -4801,7 +4801,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
66070 * run_rebalance_domains is triggered when needed from the scheduler tick.
66071 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
66072 */
66073-static void run_rebalance_domains(struct softirq_action *h)
66074+static void run_rebalance_domains(void)
66075 {
66076 int this_cpu = smp_processor_id();
66077 struct rq *this_rq = cpu_rq(this_cpu);
66078diff --git a/kernel/signal.c b/kernel/signal.c
66079index 2065515..aed2987 100644
66080--- a/kernel/signal.c
66081+++ b/kernel/signal.c
66082@@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cachep;
66083
66084 int print_fatal_signals __read_mostly;
66085
66086-static void __user *sig_handler(struct task_struct *t, int sig)
66087+static __sighandler_t sig_handler(struct task_struct *t, int sig)
66088 {
66089 return t->sighand->action[sig - 1].sa.sa_handler;
66090 }
66091
66092-static int sig_handler_ignored(void __user *handler, int sig)
66093+static int sig_handler_ignored(__sighandler_t handler, int sig)
66094 {
66095 /* Is it explicitly or implicitly ignored? */
66096 return handler == SIG_IGN ||
66097@@ -60,7 +60,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
66098 static int sig_task_ignored(struct task_struct *t, int sig,
66099 int from_ancestor_ns)
66100 {
66101- void __user *handler;
66102+ __sighandler_t handler;
66103
66104 handler = sig_handler(t, sig);
66105
66106@@ -364,6 +364,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
66107 atomic_inc(&user->sigpending);
66108 rcu_read_unlock();
66109
66110+ if (!override_rlimit)
66111+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
66112+
66113 if (override_rlimit ||
66114 atomic_read(&user->sigpending) <=
66115 task_rlimit(t, RLIMIT_SIGPENDING)) {
66116@@ -488,7 +491,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
66117
66118 int unhandled_signal(struct task_struct *tsk, int sig)
66119 {
66120- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
66121+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
66122 if (is_global_init(tsk))
66123 return 1;
66124 if (handler != SIG_IGN && handler != SIG_DFL)
66125@@ -815,6 +818,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
66126 }
66127 }
66128
66129+ /* allow glibc communication via tgkill to other threads in our
66130+ thread group */
66131+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
66132+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
66133+ && gr_handle_signal(t, sig))
66134+ return -EPERM;
66135+
66136 return security_task_kill(t, info, sig, 0);
66137 }
66138
66139@@ -1165,7 +1175,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
66140 return send_signal(sig, info, p, 1);
66141 }
66142
66143-static int
66144+int
66145 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
66146 {
66147 return send_signal(sig, info, t, 0);
66148@@ -1202,6 +1212,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
66149 unsigned long int flags;
66150 int ret, blocked, ignored;
66151 struct k_sigaction *action;
66152+ int is_unhandled = 0;
66153
66154 spin_lock_irqsave(&t->sighand->siglock, flags);
66155 action = &t->sighand->action[sig-1];
66156@@ -1216,9 +1227,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
66157 }
66158 if (action->sa.sa_handler == SIG_DFL)
66159 t->signal->flags &= ~SIGNAL_UNKILLABLE;
66160+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
66161+ is_unhandled = 1;
66162 ret = specific_send_sig_info(sig, info, t);
66163 spin_unlock_irqrestore(&t->sighand->siglock, flags);
66164
66165+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
66166+ normal operation */
66167+ if (is_unhandled) {
66168+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
66169+ gr_handle_crash(t, sig);
66170+ }
66171+
66172 return ret;
66173 }
66174
66175@@ -1285,8 +1305,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
66176 ret = check_kill_permission(sig, info, p);
66177 rcu_read_unlock();
66178
66179- if (!ret && sig)
66180+ if (!ret && sig) {
66181 ret = do_send_sig_info(sig, info, p, true);
66182+ if (!ret)
66183+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
66184+ }
66185
66186 return ret;
66187 }
66188@@ -2754,7 +2777,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
66189 int error = -ESRCH;
66190
66191 rcu_read_lock();
66192- p = find_task_by_vpid(pid);
66193+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
66194+ /* allow glibc communication via tgkill to other threads in our
66195+ thread group */
66196+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
66197+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
66198+ p = find_task_by_vpid_unrestricted(pid);
66199+ else
66200+#endif
66201+ p = find_task_by_vpid(pid);
66202 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
66203 error = check_kill_permission(sig, info, p);
66204 /*
66205diff --git a/kernel/smp.c b/kernel/smp.c
66206index db197d6..17aef0b 100644
66207--- a/kernel/smp.c
66208+++ b/kernel/smp.c
66209@@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
66210 }
66211 EXPORT_SYMBOL(smp_call_function);
66212
66213-void ipi_call_lock(void)
66214+void ipi_call_lock(void) __acquires(call_function.lock)
66215 {
66216 raw_spin_lock(&call_function.lock);
66217 }
66218
66219-void ipi_call_unlock(void)
66220+void ipi_call_unlock(void) __releases(call_function.lock)
66221 {
66222 raw_spin_unlock(&call_function.lock);
66223 }
66224
66225-void ipi_call_lock_irq(void)
66226+void ipi_call_lock_irq(void) __acquires(call_function.lock)
66227 {
66228 raw_spin_lock_irq(&call_function.lock);
66229 }
66230
66231-void ipi_call_unlock_irq(void)
66232+void ipi_call_unlock_irq(void) __releases(call_function.lock)
66233 {
66234 raw_spin_unlock_irq(&call_function.lock);
66235 }
66236diff --git a/kernel/softirq.c b/kernel/softirq.c
66237index 2c71d91..1021f81 100644
66238--- a/kernel/softirq.c
66239+++ b/kernel/softirq.c
66240@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
66241
66242 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
66243
66244-char *softirq_to_name[NR_SOFTIRQS] = {
66245+const char * const softirq_to_name[NR_SOFTIRQS] = {
66246 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
66247 "TASKLET", "SCHED", "HRTIMER", "RCU"
66248 };
66249@@ -235,7 +235,7 @@ restart:
66250 kstat_incr_softirqs_this_cpu(vec_nr);
66251
66252 trace_softirq_entry(vec_nr);
66253- h->action(h);
66254+ h->action();
66255 trace_softirq_exit(vec_nr);
66256 if (unlikely(prev_count != preempt_count())) {
66257 printk(KERN_ERR "huh, entered softirq %u %s %p"
66258@@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
66259 local_irq_restore(flags);
66260 }
66261
66262-void open_softirq(int nr, void (*action)(struct softirq_action *))
66263+void open_softirq(int nr, void (*action)(void))
66264 {
66265- softirq_vec[nr].action = action;
66266+ pax_open_kernel();
66267+ *(void **)&softirq_vec[nr].action = action;
66268+ pax_close_kernel();
66269 }
66270
66271 /*
66272@@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
66273
66274 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
66275
66276-static void tasklet_action(struct softirq_action *a)
66277+static void tasklet_action(void)
66278 {
66279 struct tasklet_struct *list;
66280
66281@@ -476,7 +478,7 @@ static void tasklet_action(struct softirq_action *a)
66282 }
66283 }
66284
66285-static void tasklet_hi_action(struct softirq_action *a)
66286+static void tasklet_hi_action(void)
66287 {
66288 struct tasklet_struct *list;
66289
66290diff --git a/kernel/sys.c b/kernel/sys.c
66291index 481611f..0754d86 100644
66292--- a/kernel/sys.c
66293+++ b/kernel/sys.c
66294@@ -158,6 +158,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
66295 error = -EACCES;
66296 goto out;
66297 }
66298+
66299+ if (gr_handle_chroot_setpriority(p, niceval)) {
66300+ error = -EACCES;
66301+ goto out;
66302+ }
66303+
66304 no_nice = security_task_setnice(p, niceval);
66305 if (no_nice) {
66306 error = no_nice;
66307@@ -572,6 +578,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
66308 goto error;
66309 }
66310
66311+ if (gr_check_group_change(new->gid, new->egid, -1))
66312+ goto error;
66313+
66314 if (rgid != (gid_t) -1 ||
66315 (egid != (gid_t) -1 && egid != old->gid))
66316 new->sgid = new->egid;
66317@@ -601,6 +610,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
66318 old = current_cred();
66319
66320 retval = -EPERM;
66321+
66322+ if (gr_check_group_change(gid, gid, gid))
66323+ goto error;
66324+
66325 if (nsown_capable(CAP_SETGID))
66326 new->gid = new->egid = new->sgid = new->fsgid = gid;
66327 else if (gid == old->gid || gid == old->sgid)
66328@@ -618,7 +631,7 @@ error:
66329 /*
66330 * change the user struct in a credentials set to match the new UID
66331 */
66332-static int set_user(struct cred *new)
66333+int set_user(struct cred *new)
66334 {
66335 struct user_struct *new_user;
66336
66337@@ -688,6 +701,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
66338 goto error;
66339 }
66340
66341+ if (gr_check_user_change(new->uid, new->euid, -1))
66342+ goto error;
66343+
66344 if (new->uid != old->uid) {
66345 retval = set_user(new);
66346 if (retval < 0)
66347@@ -732,6 +748,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
66348 old = current_cred();
66349
66350 retval = -EPERM;
66351+
66352+ if (gr_check_crash_uid(uid))
66353+ goto error;
66354+ if (gr_check_user_change(uid, uid, uid))
66355+ goto error;
66356+
66357 if (nsown_capable(CAP_SETUID)) {
66358 new->suid = new->uid = uid;
66359 if (uid != old->uid) {
66360@@ -786,6 +808,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
66361 goto error;
66362 }
66363
66364+ if (gr_check_user_change(ruid, euid, -1))
66365+ goto error;
66366+
66367 if (ruid != (uid_t) -1) {
66368 new->uid = ruid;
66369 if (ruid != old->uid) {
66370@@ -850,6 +875,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
66371 goto error;
66372 }
66373
66374+ if (gr_check_group_change(rgid, egid, -1))
66375+ goto error;
66376+
66377 if (rgid != (gid_t) -1)
66378 new->gid = rgid;
66379 if (egid != (gid_t) -1)
66380@@ -896,6 +924,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
66381 old = current_cred();
66382 old_fsuid = old->fsuid;
66383
66384+ if (gr_check_user_change(-1, -1, uid))
66385+ goto error;
66386+
66387 if (uid == old->uid || uid == old->euid ||
66388 uid == old->suid || uid == old->fsuid ||
66389 nsown_capable(CAP_SETUID)) {
66390@@ -906,6 +937,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
66391 }
66392 }
66393
66394+error:
66395 abort_creds(new);
66396 return old_fsuid;
66397
66398@@ -932,12 +964,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
66399 if (gid == old->gid || gid == old->egid ||
66400 gid == old->sgid || gid == old->fsgid ||
66401 nsown_capable(CAP_SETGID)) {
66402+ if (gr_check_group_change(-1, -1, gid))
66403+ goto error;
66404+
66405 if (gid != old_fsgid) {
66406 new->fsgid = gid;
66407 goto change_okay;
66408 }
66409 }
66410
66411+error:
66412 abort_creds(new);
66413 return old_fsgid;
66414
66415@@ -1189,7 +1225,10 @@ static int override_release(char __user *release, int len)
66416 }
66417 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
66418 snprintf(buf, len, "2.6.%u%s", v, rest);
66419- ret = copy_to_user(release, buf, len);
66420+ if (len > sizeof(buf))
66421+ ret = -EFAULT;
66422+ else
66423+ ret = copy_to_user(release, buf, len);
66424 }
66425 return ret;
66426 }
66427@@ -1243,19 +1282,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
66428 return -EFAULT;
66429
66430 down_read(&uts_sem);
66431- error = __copy_to_user(&name->sysname, &utsname()->sysname,
66432+ error = __copy_to_user(name->sysname, &utsname()->sysname,
66433 __OLD_UTS_LEN);
66434 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
66435- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
66436+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
66437 __OLD_UTS_LEN);
66438 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
66439- error |= __copy_to_user(&name->release, &utsname()->release,
66440+ error |= __copy_to_user(name->release, &utsname()->release,
66441 __OLD_UTS_LEN);
66442 error |= __put_user(0, name->release + __OLD_UTS_LEN);
66443- error |= __copy_to_user(&name->version, &utsname()->version,
66444+ error |= __copy_to_user(name->version, &utsname()->version,
66445 __OLD_UTS_LEN);
66446 error |= __put_user(0, name->version + __OLD_UTS_LEN);
66447- error |= __copy_to_user(&name->machine, &utsname()->machine,
66448+ error |= __copy_to_user(name->machine, &utsname()->machine,
66449 __OLD_UTS_LEN);
66450 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
66451 up_read(&uts_sem);
66452@@ -1720,7 +1759,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
66453 error = get_dumpable(me->mm);
66454 break;
66455 case PR_SET_DUMPABLE:
66456- if (arg2 < 0 || arg2 > 1) {
66457+ if (arg2 > 1) {
66458 error = -EINVAL;
66459 break;
66460 }
66461diff --git a/kernel/sysctl.c b/kernel/sysctl.c
66462index ae27196..7506d69 100644
66463--- a/kernel/sysctl.c
66464+++ b/kernel/sysctl.c
66465@@ -86,6 +86,13 @@
66466
66467
66468 #if defined(CONFIG_SYSCTL)
66469+#include <linux/grsecurity.h>
66470+#include <linux/grinternal.h>
66471+
66472+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
66473+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
66474+ const int op);
66475+extern int gr_handle_chroot_sysctl(const int op);
66476
66477 /* External variables not in a header file. */
66478 extern int sysctl_overcommit_memory;
66479@@ -191,6 +198,7 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
66480 }
66481
66482 #endif
66483+extern struct ctl_table grsecurity_table[];
66484
66485 static struct ctl_table root_table[];
66486 static struct ctl_table_root sysctl_table_root;
66487@@ -220,6 +228,20 @@ extern struct ctl_table epoll_table[];
66488 int sysctl_legacy_va_layout;
66489 #endif
66490
66491+#ifdef CONFIG_PAX_SOFTMODE
66492+static ctl_table pax_table[] = {
66493+ {
66494+ .procname = "softmode",
66495+ .data = &pax_softmode,
66496+ .maxlen = sizeof(unsigned int),
66497+ .mode = 0600,
66498+ .proc_handler = &proc_dointvec,
66499+ },
66500+
66501+ { }
66502+};
66503+#endif
66504+
66505 /* The default sysctl tables: */
66506
66507 static struct ctl_table root_table[] = {
66508@@ -266,6 +288,22 @@ static int max_extfrag_threshold = 1000;
66509 #endif
66510
66511 static struct ctl_table kern_table[] = {
66512+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
66513+ {
66514+ .procname = "grsecurity",
66515+ .mode = 0500,
66516+ .child = grsecurity_table,
66517+ },
66518+#endif
66519+
66520+#ifdef CONFIG_PAX_SOFTMODE
66521+ {
66522+ .procname = "pax",
66523+ .mode = 0500,
66524+ .child = pax_table,
66525+ },
66526+#endif
66527+
66528 {
66529 .procname = "sched_child_runs_first",
66530 .data = &sysctl_sched_child_runs_first,
66531@@ -550,7 +588,7 @@ static struct ctl_table kern_table[] = {
66532 .data = &modprobe_path,
66533 .maxlen = KMOD_PATH_LEN,
66534 .mode = 0644,
66535- .proc_handler = proc_dostring,
66536+ .proc_handler = proc_dostring_modpriv,
66537 },
66538 {
66539 .procname = "modules_disabled",
66540@@ -717,16 +755,20 @@ static struct ctl_table kern_table[] = {
66541 .extra1 = &zero,
66542 .extra2 = &one,
66543 },
66544+#endif
66545 {
66546 .procname = "kptr_restrict",
66547 .data = &kptr_restrict,
66548 .maxlen = sizeof(int),
66549 .mode = 0644,
66550 .proc_handler = proc_dmesg_restrict,
66551+#ifdef CONFIG_GRKERNSEC_HIDESYM
66552+ .extra1 = &two,
66553+#else
66554 .extra1 = &zero,
66555+#endif
66556 .extra2 = &two,
66557 },
66558-#endif
66559 {
66560 .procname = "ngroups_max",
66561 .data = &ngroups_max,
66562@@ -1216,6 +1258,13 @@ static struct ctl_table vm_table[] = {
66563 .proc_handler = proc_dointvec_minmax,
66564 .extra1 = &zero,
66565 },
66566+ {
66567+ .procname = "heap_stack_gap",
66568+ .data = &sysctl_heap_stack_gap,
66569+ .maxlen = sizeof(sysctl_heap_stack_gap),
66570+ .mode = 0644,
66571+ .proc_handler = proc_doulongvec_minmax,
66572+ },
66573 #else
66574 {
66575 .procname = "nr_trim_pages",
66576@@ -1720,6 +1769,17 @@ static int test_perm(int mode, int op)
66577 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
66578 {
66579 int mode;
66580+ int error;
66581+
66582+ if (table->parent != NULL && table->parent->procname != NULL &&
66583+ table->procname != NULL &&
66584+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
66585+ return -EACCES;
66586+ if (gr_handle_chroot_sysctl(op))
66587+ return -EACCES;
66588+ error = gr_handle_sysctl(table, op);
66589+ if (error)
66590+ return error;
66591
66592 if (root->permissions)
66593 mode = root->permissions(root, current->nsproxy, table);
66594@@ -2124,6 +2184,16 @@ int proc_dostring(struct ctl_table *table, int write,
66595 buffer, lenp, ppos);
66596 }
66597
66598+int proc_dostring_modpriv(struct ctl_table *table, int write,
66599+ void __user *buffer, size_t *lenp, loff_t *ppos)
66600+{
66601+ if (write && !capable(CAP_SYS_MODULE))
66602+ return -EPERM;
66603+
66604+ return _proc_do_string(table->data, table->maxlen, write,
66605+ buffer, lenp, ppos);
66606+}
66607+
66608 static size_t proc_skip_spaces(char **buf)
66609 {
66610 size_t ret;
66611@@ -2229,6 +2299,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
66612 len = strlen(tmp);
66613 if (len > *size)
66614 len = *size;
66615+ if (len > sizeof(tmp))
66616+ len = sizeof(tmp);
66617 if (copy_to_user(*buf, tmp, len))
66618 return -EFAULT;
66619 *size -= len;
66620@@ -2545,8 +2617,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
66621 *i = val;
66622 } else {
66623 val = convdiv * (*i) / convmul;
66624- if (!first)
66625+ if (!first) {
66626 err = proc_put_char(&buffer, &left, '\t');
66627+ if (err)
66628+ break;
66629+ }
66630 err = proc_put_long(&buffer, &left, val, false);
66631 if (err)
66632 break;
66633@@ -2941,6 +3016,12 @@ int proc_dostring(struct ctl_table *table, int write,
66634 return -ENOSYS;
66635 }
66636
66637+int proc_dostring_modpriv(struct ctl_table *table, int write,
66638+ void __user *buffer, size_t *lenp, loff_t *ppos)
66639+{
66640+ return -ENOSYS;
66641+}
66642+
66643 int proc_dointvec(struct ctl_table *table, int write,
66644 void __user *buffer, size_t *lenp, loff_t *ppos)
66645 {
66646@@ -2997,6 +3078,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
66647 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
66648 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
66649 EXPORT_SYMBOL(proc_dostring);
66650+EXPORT_SYMBOL(proc_dostring_modpriv);
66651 EXPORT_SYMBOL(proc_doulongvec_minmax);
66652 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
66653 EXPORT_SYMBOL(register_sysctl_table);
66654diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
66655index a650694..aaeeb20 100644
66656--- a/kernel/sysctl_binary.c
66657+++ b/kernel/sysctl_binary.c
66658@@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
66659 int i;
66660
66661 set_fs(KERNEL_DS);
66662- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
66663+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
66664 set_fs(old_fs);
66665 if (result < 0)
66666 goto out_kfree;
66667@@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
66668 }
66669
66670 set_fs(KERNEL_DS);
66671- result = vfs_write(file, buffer, str - buffer, &pos);
66672+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
66673 set_fs(old_fs);
66674 if (result < 0)
66675 goto out_kfree;
66676@@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
66677 int i;
66678
66679 set_fs(KERNEL_DS);
66680- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
66681+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
66682 set_fs(old_fs);
66683 if (result < 0)
66684 goto out_kfree;
66685@@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
66686 }
66687
66688 set_fs(KERNEL_DS);
66689- result = vfs_write(file, buffer, str - buffer, &pos);
66690+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
66691 set_fs(old_fs);
66692 if (result < 0)
66693 goto out_kfree;
66694@@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
66695 int i;
66696
66697 set_fs(KERNEL_DS);
66698- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
66699+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
66700 set_fs(old_fs);
66701 if (result < 0)
66702 goto out;
66703@@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
66704 __le16 dnaddr;
66705
66706 set_fs(KERNEL_DS);
66707- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
66708+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
66709 set_fs(old_fs);
66710 if (result < 0)
66711 goto out;
66712@@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file,
66713 le16_to_cpu(dnaddr) & 0x3ff);
66714
66715 set_fs(KERNEL_DS);
66716- result = vfs_write(file, buf, len, &pos);
66717+ result = vfs_write(file, (const char __force_user *)buf, len, &pos);
66718 set_fs(old_fs);
66719 if (result < 0)
66720 goto out;
66721diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
66722index 362da65..ab8ef8c 100644
66723--- a/kernel/sysctl_check.c
66724+++ b/kernel/sysctl_check.c
66725@@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
66726 set_fail(&fail, table, "Directory with extra2");
66727 } else {
66728 if ((table->proc_handler == proc_dostring) ||
66729+ (table->proc_handler == proc_dostring_modpriv) ||
66730 (table->proc_handler == proc_dointvec) ||
66731 (table->proc_handler == proc_dointvec_minmax) ||
66732 (table->proc_handler == proc_dointvec_jiffies) ||
66733diff --git a/kernel/taskstats.c b/kernel/taskstats.c
66734index e660464..c8b9e67 100644
66735--- a/kernel/taskstats.c
66736+++ b/kernel/taskstats.c
66737@@ -27,9 +27,12 @@
66738 #include <linux/cgroup.h>
66739 #include <linux/fs.h>
66740 #include <linux/file.h>
66741+#include <linux/grsecurity.h>
66742 #include <net/genetlink.h>
66743 #include <linux/atomic.h>
66744
66745+extern int gr_is_taskstats_denied(int pid);
66746+
66747 /*
66748 * Maximum length of a cpumask that can be specified in
66749 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
66750@@ -556,6 +559,9 @@ err:
66751
66752 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
66753 {
66754+ if (gr_is_taskstats_denied(current->pid))
66755+ return -EACCES;
66756+
66757 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
66758 return cmd_attr_register_cpumask(info);
66759 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
66760diff --git a/kernel/time.c b/kernel/time.c
66761index 73e416d..cfc6f69 100644
66762--- a/kernel/time.c
66763+++ b/kernel/time.c
66764@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
66765 return error;
66766
66767 if (tz) {
66768+ /* we log in do_settimeofday called below, so don't log twice
66769+ */
66770+ if (!tv)
66771+ gr_log_timechange();
66772+
66773 /* SMP safe, global irq locking makes it work. */
66774 sys_tz = *tz;
66775 update_vsyscall_tz();
66776diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
66777index 8a46f5d..bbe6f9c 100644
66778--- a/kernel/time/alarmtimer.c
66779+++ b/kernel/time/alarmtimer.c
66780@@ -773,7 +773,7 @@ static int __init alarmtimer_init(void)
66781 struct platform_device *pdev;
66782 int error = 0;
66783 int i;
66784- struct k_clock alarm_clock = {
66785+ static struct k_clock alarm_clock = {
66786 .clock_getres = alarm_clock_getres,
66787 .clock_get = alarm_clock_get,
66788 .timer_create = alarm_timer_create,
66789diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
66790index fd4a7b1..fae5c2a 100644
66791--- a/kernel/time/tick-broadcast.c
66792+++ b/kernel/time/tick-broadcast.c
66793@@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
66794 * then clear the broadcast bit.
66795 */
66796 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
66797- int cpu = smp_processor_id();
66798+ cpu = smp_processor_id();
66799
66800 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
66801 tick_broadcast_clear_oneshot(cpu);
66802diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
66803index 2378413..be455fd 100644
66804--- a/kernel/time/timekeeping.c
66805+++ b/kernel/time/timekeeping.c
66806@@ -14,6 +14,7 @@
66807 #include <linux/init.h>
66808 #include <linux/mm.h>
66809 #include <linux/sched.h>
66810+#include <linux/grsecurity.h>
66811 #include <linux/syscore_ops.h>
66812 #include <linux/clocksource.h>
66813 #include <linux/jiffies.h>
66814@@ -365,6 +366,8 @@ int do_settimeofday(const struct timespec *tv)
66815 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
66816 return -EINVAL;
66817
66818+ gr_log_timechange();
66819+
66820 write_seqlock_irqsave(&xtime_lock, flags);
66821
66822 timekeeping_forward_now();
66823diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
66824index 3258455..f35227d 100644
66825--- a/kernel/time/timer_list.c
66826+++ b/kernel/time/timer_list.c
66827@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
66828
66829 static void print_name_offset(struct seq_file *m, void *sym)
66830 {
66831+#ifdef CONFIG_GRKERNSEC_HIDESYM
66832+ SEQ_printf(m, "<%p>", NULL);
66833+#else
66834 char symname[KSYM_NAME_LEN];
66835
66836 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
66837 SEQ_printf(m, "<%pK>", sym);
66838 else
66839 SEQ_printf(m, "%s", symname);
66840+#endif
66841 }
66842
66843 static void
66844@@ -112,7 +116,11 @@ next_one:
66845 static void
66846 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
66847 {
66848+#ifdef CONFIG_GRKERNSEC_HIDESYM
66849+ SEQ_printf(m, " .base: %p\n", NULL);
66850+#else
66851 SEQ_printf(m, " .base: %pK\n", base);
66852+#endif
66853 SEQ_printf(m, " .index: %d\n",
66854 base->index);
66855 SEQ_printf(m, " .resolution: %Lu nsecs\n",
66856@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
66857 {
66858 struct proc_dir_entry *pe;
66859
66860+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66861+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
66862+#else
66863 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
66864+#endif
66865 if (!pe)
66866 return -ENOMEM;
66867 return 0;
66868diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
66869index 0b537f2..9e71eca 100644
66870--- a/kernel/time/timer_stats.c
66871+++ b/kernel/time/timer_stats.c
66872@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
66873 static unsigned long nr_entries;
66874 static struct entry entries[MAX_ENTRIES];
66875
66876-static atomic_t overflow_count;
66877+static atomic_unchecked_t overflow_count;
66878
66879 /*
66880 * The entries are in a hash-table, for fast lookup:
66881@@ -140,7 +140,7 @@ static void reset_entries(void)
66882 nr_entries = 0;
66883 memset(entries, 0, sizeof(entries));
66884 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
66885- atomic_set(&overflow_count, 0);
66886+ atomic_set_unchecked(&overflow_count, 0);
66887 }
66888
66889 static struct entry *alloc_entry(void)
66890@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
66891 if (likely(entry))
66892 entry->count++;
66893 else
66894- atomic_inc(&overflow_count);
66895+ atomic_inc_unchecked(&overflow_count);
66896
66897 out_unlock:
66898 raw_spin_unlock_irqrestore(lock, flags);
66899@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
66900
66901 static void print_name_offset(struct seq_file *m, unsigned long addr)
66902 {
66903+#ifdef CONFIG_GRKERNSEC_HIDESYM
66904+ seq_printf(m, "<%p>", NULL);
66905+#else
66906 char symname[KSYM_NAME_LEN];
66907
66908 if (lookup_symbol_name(addr, symname) < 0)
66909 seq_printf(m, "<%p>", (void *)addr);
66910 else
66911 seq_printf(m, "%s", symname);
66912+#endif
66913 }
66914
66915 static int tstats_show(struct seq_file *m, void *v)
66916@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
66917
66918 seq_puts(m, "Timer Stats Version: v0.2\n");
66919 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
66920- if (atomic_read(&overflow_count))
66921+ if (atomic_read_unchecked(&overflow_count))
66922 seq_printf(m, "Overflow: %d entries\n",
66923- atomic_read(&overflow_count));
66924+ atomic_read_unchecked(&overflow_count));
66925
66926 for (i = 0; i < nr_entries; i++) {
66927 entry = entries + i;
66928@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
66929 {
66930 struct proc_dir_entry *pe;
66931
66932+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66933+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
66934+#else
66935 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
66936+#endif
66937 if (!pe)
66938 return -ENOMEM;
66939 return 0;
66940diff --git a/kernel/timer.c b/kernel/timer.c
66941index 9c3c62b..441690e 100644
66942--- a/kernel/timer.c
66943+++ b/kernel/timer.c
66944@@ -1304,7 +1304,7 @@ void update_process_times(int user_tick)
66945 /*
66946 * This function runs timers and the timer-tq in bottom half context.
66947 */
66948-static void run_timer_softirq(struct softirq_action *h)
66949+static void run_timer_softirq(void)
66950 {
66951 struct tvec_base *base = __this_cpu_read(tvec_bases);
66952
66953diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
66954index 16fc34a..efd8bb8 100644
66955--- a/kernel/trace/blktrace.c
66956+++ b/kernel/trace/blktrace.c
66957@@ -324,7 +324,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
66958 struct blk_trace *bt = filp->private_data;
66959 char buf[16];
66960
66961- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
66962+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
66963
66964 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
66965 }
66966@@ -389,7 +389,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
66967 return 1;
66968
66969 bt = buf->chan->private_data;
66970- atomic_inc(&bt->dropped);
66971+ atomic_inc_unchecked(&bt->dropped);
66972 return 0;
66973 }
66974
66975@@ -490,7 +490,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
66976
66977 bt->dir = dir;
66978 bt->dev = dev;
66979- atomic_set(&bt->dropped, 0);
66980+ atomic_set_unchecked(&bt->dropped, 0);
66981
66982 ret = -EIO;
66983 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
66984diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
66985index 25b4f4d..6f4772d 100644
66986--- a/kernel/trace/ftrace.c
66987+++ b/kernel/trace/ftrace.c
66988@@ -1587,12 +1587,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
66989 if (unlikely(ftrace_disabled))
66990 return 0;
66991
66992+ ret = ftrace_arch_code_modify_prepare();
66993+ FTRACE_WARN_ON(ret);
66994+ if (ret)
66995+ return 0;
66996+
66997 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
66998+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
66999 if (ret) {
67000 ftrace_bug(ret, ip);
67001- return 0;
67002 }
67003- return 1;
67004+ return ret ? 0 : 1;
67005 }
67006
67007 /*
67008@@ -2608,7 +2613,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
67009
67010 int
67011 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
67012- void *data)
67013+ void *data)
67014 {
67015 struct ftrace_func_probe *entry;
67016 struct ftrace_page *pg;
67017diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
67018index f2bd275..adaf3a2 100644
67019--- a/kernel/trace/trace.c
67020+++ b/kernel/trace/trace.c
67021@@ -4201,10 +4201,9 @@ static const struct file_operations tracing_dyn_info_fops = {
67022 };
67023 #endif
67024
67025-static struct dentry *d_tracer;
67026-
67027 struct dentry *tracing_init_dentry(void)
67028 {
67029+ static struct dentry *d_tracer;
67030 static int once;
67031
67032 if (d_tracer)
67033@@ -4224,10 +4223,9 @@ struct dentry *tracing_init_dentry(void)
67034 return d_tracer;
67035 }
67036
67037-static struct dentry *d_percpu;
67038-
67039 struct dentry *tracing_dentry_percpu(void)
67040 {
67041+ static struct dentry *d_percpu;
67042 static int once;
67043 struct dentry *d_tracer;
67044
67045diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
67046index c212a7f..7b02394 100644
67047--- a/kernel/trace/trace_events.c
67048+++ b/kernel/trace/trace_events.c
67049@@ -1299,10 +1299,6 @@ static LIST_HEAD(ftrace_module_file_list);
67050 struct ftrace_module_file_ops {
67051 struct list_head list;
67052 struct module *mod;
67053- struct file_operations id;
67054- struct file_operations enable;
67055- struct file_operations format;
67056- struct file_operations filter;
67057 };
67058
67059 static struct ftrace_module_file_ops *
67060@@ -1323,17 +1319,12 @@ trace_create_file_ops(struct module *mod)
67061
67062 file_ops->mod = mod;
67063
67064- file_ops->id = ftrace_event_id_fops;
67065- file_ops->id.owner = mod;
67066-
67067- file_ops->enable = ftrace_enable_fops;
67068- file_ops->enable.owner = mod;
67069-
67070- file_ops->filter = ftrace_event_filter_fops;
67071- file_ops->filter.owner = mod;
67072-
67073- file_ops->format = ftrace_event_format_fops;
67074- file_ops->format.owner = mod;
67075+ pax_open_kernel();
67076+ *(void **)&mod->trace_id.owner = mod;
67077+ *(void **)&mod->trace_enable.owner = mod;
67078+ *(void **)&mod->trace_filter.owner = mod;
67079+ *(void **)&mod->trace_format.owner = mod;
67080+ pax_close_kernel();
67081
67082 list_add(&file_ops->list, &ftrace_module_file_list);
67083
67084@@ -1357,8 +1348,8 @@ static void trace_module_add_events(struct module *mod)
67085
67086 for_each_event(call, start, end) {
67087 __trace_add_event_call(*call, mod,
67088- &file_ops->id, &file_ops->enable,
67089- &file_ops->filter, &file_ops->format);
67090+ &mod->trace_id, &mod->trace_enable,
67091+ &mod->trace_filter, &mod->trace_format);
67092 }
67093 }
67094
67095diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
67096index 00d527c..7c5b1a3 100644
67097--- a/kernel/trace/trace_kprobe.c
67098+++ b/kernel/trace/trace_kprobe.c
67099@@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
67100 long ret;
67101 int maxlen = get_rloc_len(*(u32 *)dest);
67102 u8 *dst = get_rloc_data(dest);
67103- u8 *src = addr;
67104+ const u8 __user *src = (const u8 __force_user *)addr;
67105 mm_segment_t old_fs = get_fs();
67106 if (!maxlen)
67107 return;
67108@@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
67109 pagefault_disable();
67110 do
67111 ret = __copy_from_user_inatomic(dst++, src++, 1);
67112- while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
67113+ while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
67114 dst[-1] = '\0';
67115 pagefault_enable();
67116 set_fs(old_fs);
67117@@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
67118 ((u8 *)get_rloc_data(dest))[0] = '\0';
67119 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
67120 } else
67121- *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
67122+ *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
67123 get_rloc_offs(*(u32 *)dest));
67124 }
67125 /* Return the length of string -- including null terminal byte */
67126@@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
67127 set_fs(KERNEL_DS);
67128 pagefault_disable();
67129 do {
67130- ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
67131+ ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
67132 len++;
67133 } while (c && ret == 0 && len < MAX_STRING_SIZE);
67134 pagefault_enable();
67135diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
67136index fd3c8aa..5f324a6 100644
67137--- a/kernel/trace/trace_mmiotrace.c
67138+++ b/kernel/trace/trace_mmiotrace.c
67139@@ -24,7 +24,7 @@ struct header_iter {
67140 static struct trace_array *mmio_trace_array;
67141 static bool overrun_detected;
67142 static unsigned long prev_overruns;
67143-static atomic_t dropped_count;
67144+static atomic_unchecked_t dropped_count;
67145
67146 static void mmio_reset_data(struct trace_array *tr)
67147 {
67148@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
67149
67150 static unsigned long count_overruns(struct trace_iterator *iter)
67151 {
67152- unsigned long cnt = atomic_xchg(&dropped_count, 0);
67153+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
67154 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
67155
67156 if (over > prev_overruns)
67157@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
67158 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
67159 sizeof(*entry), 0, pc);
67160 if (!event) {
67161- atomic_inc(&dropped_count);
67162+ atomic_inc_unchecked(&dropped_count);
67163 return;
67164 }
67165 entry = ring_buffer_event_data(event);
67166@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
67167 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
67168 sizeof(*entry), 0, pc);
67169 if (!event) {
67170- atomic_inc(&dropped_count);
67171+ atomic_inc_unchecked(&dropped_count);
67172 return;
67173 }
67174 entry = ring_buffer_event_data(event);
67175diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
67176index 5199930..26c73a0 100644
67177--- a/kernel/trace/trace_output.c
67178+++ b/kernel/trace/trace_output.c
67179@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
67180
67181 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
67182 if (!IS_ERR(p)) {
67183- p = mangle_path(s->buffer + s->len, p, "\n");
67184+ p = mangle_path(s->buffer + s->len, p, "\n\\");
67185 if (p) {
67186 s->len = p - s->buffer;
67187 return 1;
67188diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
67189index 77575b3..6e623d1 100644
67190--- a/kernel/trace/trace_stack.c
67191+++ b/kernel/trace/trace_stack.c
67192@@ -50,7 +50,7 @@ static inline void check_stack(void)
67193 return;
67194
67195 /* we do not handle interrupt stacks yet */
67196- if (!object_is_on_stack(&this_size))
67197+ if (!object_starts_on_stack(&this_size))
67198 return;
67199
67200 local_irq_save(flags);
67201diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
67202index 209b379..7f76423 100644
67203--- a/kernel/trace/trace_workqueue.c
67204+++ b/kernel/trace/trace_workqueue.c
67205@@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
67206 int cpu;
67207 pid_t pid;
67208 /* Can be inserted from interrupt or user context, need to be atomic */
67209- atomic_t inserted;
67210+ atomic_unchecked_t inserted;
67211 /*
67212 * Don't need to be atomic, works are serialized in a single workqueue thread
67213 * on a single CPU.
67214@@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
67215 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
67216 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
67217 if (node->pid == wq_thread->pid) {
67218- atomic_inc(&node->inserted);
67219+ atomic_inc_unchecked(&node->inserted);
67220 goto found;
67221 }
67222 }
67223@@ -210,7 +210,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
67224 tsk = get_pid_task(pid, PIDTYPE_PID);
67225 if (tsk) {
67226 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
67227- atomic_read(&cws->inserted), cws->executed,
67228+ atomic_read_unchecked(&cws->inserted), cws->executed,
67229 tsk->comm);
67230 put_task_struct(tsk);
67231 }
67232diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
67233index 82928f5..92da771 100644
67234--- a/lib/Kconfig.debug
67235+++ b/lib/Kconfig.debug
67236@@ -1103,6 +1103,7 @@ config LATENCYTOP
67237 depends on DEBUG_KERNEL
67238 depends on STACKTRACE_SUPPORT
67239 depends on PROC_FS
67240+ depends on !GRKERNSEC_HIDESYM
67241 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
67242 select KALLSYMS
67243 select KALLSYMS_ALL
67244diff --git a/lib/bitmap.c b/lib/bitmap.c
67245index 0d4a127..33a06c7 100644
67246--- a/lib/bitmap.c
67247+++ b/lib/bitmap.c
67248@@ -419,7 +419,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
67249 {
67250 int c, old_c, totaldigits, ndigits, nchunks, nbits;
67251 u32 chunk;
67252- const char __user __force *ubuf = (const char __user __force *)buf;
67253+ const char __user *ubuf = (const char __force_user *)buf;
67254
67255 bitmap_zero(maskp, nmaskbits);
67256
67257@@ -504,7 +504,7 @@ int bitmap_parse_user(const char __user *ubuf,
67258 {
67259 if (!access_ok(VERIFY_READ, ubuf, ulen))
67260 return -EFAULT;
67261- return __bitmap_parse((const char __force *)ubuf,
67262+ return __bitmap_parse((const char __force_kernel *)ubuf,
67263 ulen, 1, maskp, nmaskbits);
67264
67265 }
67266@@ -596,7 +596,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
67267 {
67268 unsigned a, b;
67269 int c, old_c, totaldigits;
67270- const char __user __force *ubuf = (const char __user __force *)buf;
67271+ const char __user *ubuf = (const char __force_user *)buf;
67272 int exp_digit, in_range;
67273
67274 totaldigits = c = 0;
67275@@ -696,7 +696,7 @@ int bitmap_parselist_user(const char __user *ubuf,
67276 {
67277 if (!access_ok(VERIFY_READ, ubuf, ulen))
67278 return -EFAULT;
67279- return __bitmap_parselist((const char __force *)ubuf,
67280+ return __bitmap_parselist((const char __force_kernel *)ubuf,
67281 ulen, 1, maskp, nmaskbits);
67282 }
67283 EXPORT_SYMBOL(bitmap_parselist_user);
67284diff --git a/lib/bug.c b/lib/bug.c
67285index 1955209..cbbb2ad 100644
67286--- a/lib/bug.c
67287+++ b/lib/bug.c
67288@@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
67289 return BUG_TRAP_TYPE_NONE;
67290
67291 bug = find_bug(bugaddr);
67292+ if (!bug)
67293+ return BUG_TRAP_TYPE_NONE;
67294
67295 file = NULL;
67296 line = 0;
67297diff --git a/lib/debugobjects.c b/lib/debugobjects.c
67298index a78b7c6..2c73084 100644
67299--- a/lib/debugobjects.c
67300+++ b/lib/debugobjects.c
67301@@ -284,7 +284,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
67302 if (limit > 4)
67303 return;
67304
67305- is_on_stack = object_is_on_stack(addr);
67306+ is_on_stack = object_starts_on_stack(addr);
67307 if (is_on_stack == onstack)
67308 return;
67309
67310diff --git a/lib/devres.c b/lib/devres.c
67311index 7c0e953..f642b5c 100644
67312--- a/lib/devres.c
67313+++ b/lib/devres.c
67314@@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
67315 void devm_iounmap(struct device *dev, void __iomem *addr)
67316 {
67317 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
67318- (void *)addr));
67319+ (void __force *)addr));
67320 iounmap(addr);
67321 }
67322 EXPORT_SYMBOL(devm_iounmap);
67323@@ -141,7 +141,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
67324 {
67325 ioport_unmap(addr);
67326 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
67327- devm_ioport_map_match, (void *)addr));
67328+ devm_ioport_map_match, (void __force *)addr));
67329 }
67330 EXPORT_SYMBOL(devm_ioport_unmap);
67331
67332diff --git a/lib/dma-debug.c b/lib/dma-debug.c
67333index fea790a..ebb0e82 100644
67334--- a/lib/dma-debug.c
67335+++ b/lib/dma-debug.c
67336@@ -925,7 +925,7 @@ out:
67337
67338 static void check_for_stack(struct device *dev, void *addr)
67339 {
67340- if (object_is_on_stack(addr))
67341+ if (object_starts_on_stack(addr))
67342 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
67343 "stack [addr=%p]\n", addr);
67344 }
67345diff --git a/lib/extable.c b/lib/extable.c
67346index 4cac81e..63e9b8f 100644
67347--- a/lib/extable.c
67348+++ b/lib/extable.c
67349@@ -13,6 +13,7 @@
67350 #include <linux/init.h>
67351 #include <linux/sort.h>
67352 #include <asm/uaccess.h>
67353+#include <asm/pgtable.h>
67354
67355 #ifndef ARCH_HAS_SORT_EXTABLE
67356 /*
67357@@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const void *b)
67358 void sort_extable(struct exception_table_entry *start,
67359 struct exception_table_entry *finish)
67360 {
67361+ pax_open_kernel();
67362 sort(start, finish - start, sizeof(struct exception_table_entry),
67363 cmp_ex, NULL);
67364+ pax_close_kernel();
67365 }
67366
67367 #ifdef CONFIG_MODULES
67368diff --git a/lib/inflate.c b/lib/inflate.c
67369index 013a761..c28f3fc 100644
67370--- a/lib/inflate.c
67371+++ b/lib/inflate.c
67372@@ -269,7 +269,7 @@ static void free(void *where)
67373 malloc_ptr = free_mem_ptr;
67374 }
67375 #else
67376-#define malloc(a) kmalloc(a, GFP_KERNEL)
67377+#define malloc(a) kmalloc((a), GFP_KERNEL)
67378 #define free(a) kfree(a)
67379 #endif
67380
67381diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
67382index bd2bea9..6b3c95e 100644
67383--- a/lib/is_single_threaded.c
67384+++ b/lib/is_single_threaded.c
67385@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
67386 struct task_struct *p, *t;
67387 bool ret;
67388
67389+ if (!mm)
67390+ return true;
67391+
67392 if (atomic_read(&task->signal->live) != 1)
67393 return false;
67394
67395diff --git a/lib/kref.c b/lib/kref.c
67396index 3efb882..8492f4c 100644
67397--- a/lib/kref.c
67398+++ b/lib/kref.c
67399@@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
67400 */
67401 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
67402 {
67403- WARN_ON(release == NULL);
67404+ BUG_ON(release == NULL);
67405 WARN_ON(release == (void (*)(struct kref *))kfree);
67406
67407 if (atomic_dec_and_test(&kref->refcount)) {
67408diff --git a/lib/radix-tree.c b/lib/radix-tree.c
67409index d9df745..e73c2fe 100644
67410--- a/lib/radix-tree.c
67411+++ b/lib/radix-tree.c
67412@@ -80,7 +80,7 @@ struct radix_tree_preload {
67413 int nr;
67414 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
67415 };
67416-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
67417+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
67418
67419 static inline void *ptr_to_indirect(void *ptr)
67420 {
67421diff --git a/lib/vsprintf.c b/lib/vsprintf.c
67422index 993599e..84dc70e 100644
67423--- a/lib/vsprintf.c
67424+++ b/lib/vsprintf.c
67425@@ -16,6 +16,9 @@
67426 * - scnprintf and vscnprintf
67427 */
67428
67429+#ifdef CONFIG_GRKERNSEC_HIDESYM
67430+#define __INCLUDED_BY_HIDESYM 1
67431+#endif
67432 #include <stdarg.h>
67433 #include <linux/module.h>
67434 #include <linux/types.h>
67435@@ -413,7 +416,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
67436 char sym[KSYM_SYMBOL_LEN];
67437 if (ext == 'B')
67438 sprint_backtrace(sym, value);
67439- else if (ext != 'f' && ext != 's')
67440+ else if (ext != 'f' && ext != 's' && ext != 'a')
67441 sprint_symbol(sym, value);
67442 else
67443 kallsyms_lookup(value, NULL, NULL, NULL, sym);
67444@@ -777,7 +780,11 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
67445 return string(buf, end, uuid, spec);
67446 }
67447
67448+#ifdef CONFIG_GRKERNSEC_HIDESYM
67449+int kptr_restrict __read_mostly = 2;
67450+#else
67451 int kptr_restrict __read_mostly;
67452+#endif
67453
67454 /*
67455 * Show a '%p' thing. A kernel extension is that the '%p' is followed
67456@@ -791,6 +798,8 @@ int kptr_restrict __read_mostly;
67457 * - 'S' For symbolic direct pointers with offset
67458 * - 's' For symbolic direct pointers without offset
67459 * - 'B' For backtraced symbolic direct pointers with offset
67460+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
67461+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
67462 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
67463 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
67464 * - 'M' For a 6-byte MAC address, it prints the address in the
67465@@ -835,12 +844,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
67466 {
67467 if (!ptr && *fmt != 'K') {
67468 /*
67469- * Print (null) with the same width as a pointer so it makes
67470+ * Print (nil) with the same width as a pointer so it makes
67471 * tabular output look nice.
67472 */
67473 if (spec.field_width == -1)
67474 spec.field_width = 2 * sizeof(void *);
67475- return string(buf, end, "(null)", spec);
67476+ return string(buf, end, "(nil)", spec);
67477 }
67478
67479 switch (*fmt) {
67480@@ -850,6 +859,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
67481 /* Fallthrough */
67482 case 'S':
67483 case 's':
67484+#ifdef CONFIG_GRKERNSEC_HIDESYM
67485+ break;
67486+#else
67487+ return symbol_string(buf, end, ptr, spec, *fmt);
67488+#endif
67489+ case 'A':
67490+ case 'a':
67491 case 'B':
67492 return symbol_string(buf, end, ptr, spec, *fmt);
67493 case 'R':
67494@@ -1608,11 +1624,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
67495 typeof(type) value; \
67496 if (sizeof(type) == 8) { \
67497 args = PTR_ALIGN(args, sizeof(u32)); \
67498- *(u32 *)&value = *(u32 *)args; \
67499- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
67500+ *(u32 *)&value = *(const u32 *)args; \
67501+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
67502 } else { \
67503 args = PTR_ALIGN(args, sizeof(type)); \
67504- value = *(typeof(type) *)args; \
67505+ value = *(const typeof(type) *)args; \
67506 } \
67507 args += sizeof(type); \
67508 value; \
67509@@ -1675,7 +1691,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
67510 case FORMAT_TYPE_STR: {
67511 const char *str_arg = args;
67512 args += strlen(str_arg) + 1;
67513- str = string(str, end, (char *)str_arg, spec);
67514+ str = string(str, end, str_arg, spec);
67515 break;
67516 }
67517
67518diff --git a/localversion-grsec b/localversion-grsec
67519new file mode 100644
67520index 0000000..7cd6065
67521--- /dev/null
67522+++ b/localversion-grsec
67523@@ -0,0 +1 @@
67524+-grsec
67525diff --git a/mm/Kconfig b/mm/Kconfig
67526index 011b110..b492af2 100644
67527--- a/mm/Kconfig
67528+++ b/mm/Kconfig
67529@@ -241,10 +241,10 @@ config KSM
67530 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
67531
67532 config DEFAULT_MMAP_MIN_ADDR
67533- int "Low address space to protect from user allocation"
67534+ int "Low address space to protect from user allocation"
67535 depends on MMU
67536- default 4096
67537- help
67538+ default 65536
67539+ help
67540 This is the portion of low virtual memory which should be protected
67541 from userspace allocation. Keeping a user from writing to low pages
67542 can help reduce the impact of kernel NULL pointer bugs.
67543diff --git a/mm/filemap.c b/mm/filemap.c
67544index 90286a4..f441caa 100644
67545--- a/mm/filemap.c
67546+++ b/mm/filemap.c
67547@@ -1770,7 +1770,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
67548 struct address_space *mapping = file->f_mapping;
67549
67550 if (!mapping->a_ops->readpage)
67551- return -ENOEXEC;
67552+ return -ENODEV;
67553 file_accessed(file);
67554 vma->vm_ops = &generic_file_vm_ops;
67555 vma->vm_flags |= VM_CAN_NONLINEAR;
67556@@ -2176,6 +2176,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
67557 *pos = i_size_read(inode);
67558
67559 if (limit != RLIM_INFINITY) {
67560+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
67561 if (*pos >= limit) {
67562 send_sig(SIGXFSZ, current, 0);
67563 return -EFBIG;
67564diff --git a/mm/fremap.c b/mm/fremap.c
67565index 9ed4fd4..c42648d 100644
67566--- a/mm/fremap.c
67567+++ b/mm/fremap.c
67568@@ -155,6 +155,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
67569 retry:
67570 vma = find_vma(mm, start);
67571
67572+#ifdef CONFIG_PAX_SEGMEXEC
67573+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
67574+ goto out;
67575+#endif
67576+
67577 /*
67578 * Make sure the vma is shared, that it supports prefaulting,
67579 * and that the remapped range is valid and fully within
67580diff --git a/mm/highmem.c b/mm/highmem.c
67581index 57d82c6..e9e0552 100644
67582--- a/mm/highmem.c
67583+++ b/mm/highmem.c
67584@@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
67585 * So no dangers, even with speculative execution.
67586 */
67587 page = pte_page(pkmap_page_table[i]);
67588+ pax_open_kernel();
67589 pte_clear(&init_mm, (unsigned long)page_address(page),
67590 &pkmap_page_table[i]);
67591-
67592+ pax_close_kernel();
67593 set_page_address(page, NULL);
67594 need_flush = 1;
67595 }
67596@@ -186,9 +187,11 @@ start:
67597 }
67598 }
67599 vaddr = PKMAP_ADDR(last_pkmap_nr);
67600+
67601+ pax_open_kernel();
67602 set_pte_at(&init_mm, vaddr,
67603 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
67604-
67605+ pax_close_kernel();
67606 pkmap_count[last_pkmap_nr] = 1;
67607 set_page_address(page, (void *)vaddr);
67608
67609diff --git a/mm/huge_memory.c b/mm/huge_memory.c
67610index 36b3d98..584cb54 100644
67611--- a/mm/huge_memory.c
67612+++ b/mm/huge_memory.c
67613@@ -703,7 +703,7 @@ out:
67614 * run pte_offset_map on the pmd, if an huge pmd could
67615 * materialize from under us from a different thread.
67616 */
67617- if (unlikely(__pte_alloc(mm, vma, pmd, address)))
67618+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
67619 return VM_FAULT_OOM;
67620 /* if an huge pmd materialized from under us just retry later */
67621 if (unlikely(pmd_trans_huge(*pmd)))
67622diff --git a/mm/hugetlb.c b/mm/hugetlb.c
67623index 2316840..b418671 100644
67624--- a/mm/hugetlb.c
67625+++ b/mm/hugetlb.c
67626@@ -2347,6 +2347,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
67627 return 1;
67628 }
67629
67630+#ifdef CONFIG_PAX_SEGMEXEC
67631+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
67632+{
67633+ struct mm_struct *mm = vma->vm_mm;
67634+ struct vm_area_struct *vma_m;
67635+ unsigned long address_m;
67636+ pte_t *ptep_m;
67637+
67638+ vma_m = pax_find_mirror_vma(vma);
67639+ if (!vma_m)
67640+ return;
67641+
67642+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67643+ address_m = address + SEGMEXEC_TASK_SIZE;
67644+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
67645+ get_page(page_m);
67646+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
67647+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
67648+}
67649+#endif
67650+
67651 /*
67652 * Hugetlb_cow() should be called with page lock of the original hugepage held.
67653 */
67654@@ -2450,6 +2471,11 @@ retry_avoidcopy:
67655 make_huge_pte(vma, new_page, 1));
67656 page_remove_rmap(old_page);
67657 hugepage_add_new_anon_rmap(new_page, vma, address);
67658+
67659+#ifdef CONFIG_PAX_SEGMEXEC
67660+ pax_mirror_huge_pte(vma, address, new_page);
67661+#endif
67662+
67663 /* Make the old page be freed below */
67664 new_page = old_page;
67665 mmu_notifier_invalidate_range_end(mm,
67666@@ -2601,6 +2627,10 @@ retry:
67667 && (vma->vm_flags & VM_SHARED)));
67668 set_huge_pte_at(mm, address, ptep, new_pte);
67669
67670+#ifdef CONFIG_PAX_SEGMEXEC
67671+ pax_mirror_huge_pte(vma, address, page);
67672+#endif
67673+
67674 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
67675 /* Optimization, do the COW without a second fault */
67676 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
67677@@ -2630,6 +2660,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
67678 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
67679 struct hstate *h = hstate_vma(vma);
67680
67681+#ifdef CONFIG_PAX_SEGMEXEC
67682+ struct vm_area_struct *vma_m;
67683+#endif
67684+
67685 ptep = huge_pte_offset(mm, address);
67686 if (ptep) {
67687 entry = huge_ptep_get(ptep);
67688@@ -2641,6 +2675,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
67689 VM_FAULT_SET_HINDEX(h - hstates);
67690 }
67691
67692+#ifdef CONFIG_PAX_SEGMEXEC
67693+ vma_m = pax_find_mirror_vma(vma);
67694+ if (vma_m) {
67695+ unsigned long address_m;
67696+
67697+ if (vma->vm_start > vma_m->vm_start) {
67698+ address_m = address;
67699+ address -= SEGMEXEC_TASK_SIZE;
67700+ vma = vma_m;
67701+ h = hstate_vma(vma);
67702+ } else
67703+ address_m = address + SEGMEXEC_TASK_SIZE;
67704+
67705+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
67706+ return VM_FAULT_OOM;
67707+ address_m &= HPAGE_MASK;
67708+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
67709+ }
67710+#endif
67711+
67712 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
67713 if (!ptep)
67714 return VM_FAULT_OOM;
67715diff --git a/mm/internal.h b/mm/internal.h
67716index 2189af4..f2ca332 100644
67717--- a/mm/internal.h
67718+++ b/mm/internal.h
67719@@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
67720 * in mm/page_alloc.c
67721 */
67722 extern void __free_pages_bootmem(struct page *page, unsigned int order);
67723+extern void free_compound_page(struct page *page);
67724 extern void prep_compound_page(struct page *page, unsigned long order);
67725 #ifdef CONFIG_MEMORY_FAILURE
67726 extern bool is_free_buddy_page(struct page *page);
67727diff --git a/mm/kmemleak.c b/mm/kmemleak.c
67728index f3b2a00..61da94d 100644
67729--- a/mm/kmemleak.c
67730+++ b/mm/kmemleak.c
67731@@ -357,7 +357,7 @@ static void print_unreferenced(struct seq_file *seq,
67732
67733 for (i = 0; i < object->trace_len; i++) {
67734 void *ptr = (void *)object->trace[i];
67735- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
67736+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
67737 }
67738 }
67739
67740diff --git a/mm/maccess.c b/mm/maccess.c
67741index d53adf9..03a24bf 100644
67742--- a/mm/maccess.c
67743+++ b/mm/maccess.c
67744@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
67745 set_fs(KERNEL_DS);
67746 pagefault_disable();
67747 ret = __copy_from_user_inatomic(dst,
67748- (__force const void __user *)src, size);
67749+ (const void __force_user *)src, size);
67750 pagefault_enable();
67751 set_fs(old_fs);
67752
67753@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
67754
67755 set_fs(KERNEL_DS);
67756 pagefault_disable();
67757- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
67758+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
67759 pagefault_enable();
67760 set_fs(old_fs);
67761
67762diff --git a/mm/madvise.c b/mm/madvise.c
67763index 74bf193..feb6fd3 100644
67764--- a/mm/madvise.c
67765+++ b/mm/madvise.c
67766@@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
67767 pgoff_t pgoff;
67768 unsigned long new_flags = vma->vm_flags;
67769
67770+#ifdef CONFIG_PAX_SEGMEXEC
67771+ struct vm_area_struct *vma_m;
67772+#endif
67773+
67774 switch (behavior) {
67775 case MADV_NORMAL:
67776 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
67777@@ -110,6 +114,13 @@ success:
67778 /*
67779 * vm_flags is protected by the mmap_sem held in write mode.
67780 */
67781+
67782+#ifdef CONFIG_PAX_SEGMEXEC
67783+ vma_m = pax_find_mirror_vma(vma);
67784+ if (vma_m)
67785+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
67786+#endif
67787+
67788 vma->vm_flags = new_flags;
67789
67790 out:
67791@@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
67792 struct vm_area_struct ** prev,
67793 unsigned long start, unsigned long end)
67794 {
67795+
67796+#ifdef CONFIG_PAX_SEGMEXEC
67797+ struct vm_area_struct *vma_m;
67798+#endif
67799+
67800 *prev = vma;
67801 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
67802 return -EINVAL;
67803@@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
67804 zap_page_range(vma, start, end - start, &details);
67805 } else
67806 zap_page_range(vma, start, end - start, NULL);
67807+
67808+#ifdef CONFIG_PAX_SEGMEXEC
67809+ vma_m = pax_find_mirror_vma(vma);
67810+ if (vma_m) {
67811+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
67812+ struct zap_details details = {
67813+ .nonlinear_vma = vma_m,
67814+ .last_index = ULONG_MAX,
67815+ };
67816+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
67817+ } else
67818+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
67819+ }
67820+#endif
67821+
67822 return 0;
67823 }
67824
67825@@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
67826 if (end < start)
67827 goto out;
67828
67829+#ifdef CONFIG_PAX_SEGMEXEC
67830+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
67831+ if (end > SEGMEXEC_TASK_SIZE)
67832+ goto out;
67833+ } else
67834+#endif
67835+
67836+ if (end > TASK_SIZE)
67837+ goto out;
67838+
67839 error = 0;
67840 if (end == start)
67841 goto out;
67842diff --git a/mm/memory-failure.c b/mm/memory-failure.c
67843index 06d3479..0778eef 100644
67844--- a/mm/memory-failure.c
67845+++ b/mm/memory-failure.c
67846@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
67847
67848 int sysctl_memory_failure_recovery __read_mostly = 1;
67849
67850-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
67851+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
67852
67853 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
67854
67855@@ -202,7 +202,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
67856 si.si_signo = SIGBUS;
67857 si.si_errno = 0;
67858 si.si_code = BUS_MCEERR_AO;
67859- si.si_addr = (void *)addr;
67860+ si.si_addr = (void __user *)addr;
67861 #ifdef __ARCH_SI_TRAPNO
67862 si.si_trapno = trapno;
67863 #endif
67864@@ -1010,7 +1010,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
67865 }
67866
67867 nr_pages = 1 << compound_trans_order(hpage);
67868- atomic_long_add(nr_pages, &mce_bad_pages);
67869+ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
67870
67871 /*
67872 * We need/can do nothing about count=0 pages.
67873@@ -1040,7 +1040,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
67874 if (!PageHWPoison(hpage)
67875 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
67876 || (p != hpage && TestSetPageHWPoison(hpage))) {
67877- atomic_long_sub(nr_pages, &mce_bad_pages);
67878+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67879 return 0;
67880 }
67881 set_page_hwpoison_huge_page(hpage);
67882@@ -1098,7 +1098,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
67883 }
67884 if (hwpoison_filter(p)) {
67885 if (TestClearPageHWPoison(p))
67886- atomic_long_sub(nr_pages, &mce_bad_pages);
67887+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67888 unlock_page(hpage);
67889 put_page(hpage);
67890 return 0;
67891@@ -1315,7 +1315,7 @@ int unpoison_memory(unsigned long pfn)
67892 return 0;
67893 }
67894 if (TestClearPageHWPoison(p))
67895- atomic_long_sub(nr_pages, &mce_bad_pages);
67896+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67897 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
67898 return 0;
67899 }
67900@@ -1329,7 +1329,7 @@ int unpoison_memory(unsigned long pfn)
67901 */
67902 if (TestClearPageHWPoison(page)) {
67903 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
67904- atomic_long_sub(nr_pages, &mce_bad_pages);
67905+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
67906 freeit = 1;
67907 if (PageHuge(page))
67908 clear_page_hwpoison_huge_page(page);
67909@@ -1442,7 +1442,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
67910 }
67911 done:
67912 if (!PageHWPoison(hpage))
67913- atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
67914+ atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
67915 set_page_hwpoison_huge_page(hpage);
67916 dequeue_hwpoisoned_huge_page(hpage);
67917 /* keep elevated page count for bad page */
67918@@ -1573,7 +1573,7 @@ int soft_offline_page(struct page *page, int flags)
67919 return ret;
67920
67921 done:
67922- atomic_long_add(1, &mce_bad_pages);
67923+ atomic_long_add_unchecked(1, &mce_bad_pages);
67924 SetPageHWPoison(page);
67925 /* keep elevated page count for bad page */
67926 return ret;
67927diff --git a/mm/memory.c b/mm/memory.c
67928index 829d437..3d3926a 100644
67929--- a/mm/memory.c
67930+++ b/mm/memory.c
67931@@ -457,8 +457,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
67932 return;
67933
67934 pmd = pmd_offset(pud, start);
67935+
67936+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
67937 pud_clear(pud);
67938 pmd_free_tlb(tlb, pmd, start);
67939+#endif
67940+
67941 }
67942
67943 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
67944@@ -489,9 +493,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
67945 if (end - 1 > ceiling - 1)
67946 return;
67947
67948+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
67949 pud = pud_offset(pgd, start);
67950 pgd_clear(pgd);
67951 pud_free_tlb(tlb, pud, start);
67952+#endif
67953+
67954 }
67955
67956 /*
67957@@ -1566,12 +1573,6 @@ no_page_table:
67958 return page;
67959 }
67960
67961-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
67962-{
67963- return stack_guard_page_start(vma, addr) ||
67964- stack_guard_page_end(vma, addr+PAGE_SIZE);
67965-}
67966-
67967 /**
67968 * __get_user_pages() - pin user pages in memory
67969 * @tsk: task_struct of target task
67970@@ -1644,10 +1645,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
67971 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
67972 i = 0;
67973
67974- do {
67975+ while (nr_pages) {
67976 struct vm_area_struct *vma;
67977
67978- vma = find_extend_vma(mm, start);
67979+ vma = find_vma(mm, start);
67980 if (!vma && in_gate_area(mm, start)) {
67981 unsigned long pg = start & PAGE_MASK;
67982 pgd_t *pgd;
67983@@ -1695,7 +1696,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
67984 goto next_page;
67985 }
67986
67987- if (!vma ||
67988+ if (!vma || start < vma->vm_start ||
67989 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
67990 !(vm_flags & vma->vm_flags))
67991 return i ? : -EFAULT;
67992@@ -1722,11 +1723,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
67993 int ret;
67994 unsigned int fault_flags = 0;
67995
67996- /* For mlock, just skip the stack guard page. */
67997- if (foll_flags & FOLL_MLOCK) {
67998- if (stack_guard_page(vma, start))
67999- goto next_page;
68000- }
68001 if (foll_flags & FOLL_WRITE)
68002 fault_flags |= FAULT_FLAG_WRITE;
68003 if (nonblocking)
68004@@ -1800,7 +1796,7 @@ next_page:
68005 start += PAGE_SIZE;
68006 nr_pages--;
68007 } while (nr_pages && start < vma->vm_end);
68008- } while (nr_pages);
68009+ }
68010 return i;
68011 }
68012 EXPORT_SYMBOL(__get_user_pages);
68013@@ -2007,6 +2003,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
68014 page_add_file_rmap(page);
68015 set_pte_at(mm, addr, pte, mk_pte(page, prot));
68016
68017+#ifdef CONFIG_PAX_SEGMEXEC
68018+ pax_mirror_file_pte(vma, addr, page, ptl);
68019+#endif
68020+
68021 retval = 0;
68022 pte_unmap_unlock(pte, ptl);
68023 return retval;
68024@@ -2041,10 +2041,22 @@ out:
68025 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
68026 struct page *page)
68027 {
68028+
68029+#ifdef CONFIG_PAX_SEGMEXEC
68030+ struct vm_area_struct *vma_m;
68031+#endif
68032+
68033 if (addr < vma->vm_start || addr >= vma->vm_end)
68034 return -EFAULT;
68035 if (!page_count(page))
68036 return -EINVAL;
68037+
68038+#ifdef CONFIG_PAX_SEGMEXEC
68039+ vma_m = pax_find_mirror_vma(vma);
68040+ if (vma_m)
68041+ vma_m->vm_flags |= VM_INSERTPAGE;
68042+#endif
68043+
68044 vma->vm_flags |= VM_INSERTPAGE;
68045 return insert_page(vma, addr, page, vma->vm_page_prot);
68046 }
68047@@ -2130,6 +2142,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
68048 unsigned long pfn)
68049 {
68050 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
68051+ BUG_ON(vma->vm_mirror);
68052
68053 if (addr < vma->vm_start || addr >= vma->vm_end)
68054 return -EFAULT;
68055@@ -2445,6 +2458,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
68056 copy_user_highpage(dst, src, va, vma);
68057 }
68058
68059+#ifdef CONFIG_PAX_SEGMEXEC
68060+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
68061+{
68062+ struct mm_struct *mm = vma->vm_mm;
68063+ spinlock_t *ptl;
68064+ pte_t *pte, entry;
68065+
68066+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
68067+ entry = *pte;
68068+ if (!pte_present(entry)) {
68069+ if (!pte_none(entry)) {
68070+ BUG_ON(pte_file(entry));
68071+ free_swap_and_cache(pte_to_swp_entry(entry));
68072+ pte_clear_not_present_full(mm, address, pte, 0);
68073+ }
68074+ } else {
68075+ struct page *page;
68076+
68077+ flush_cache_page(vma, address, pte_pfn(entry));
68078+ entry = ptep_clear_flush(vma, address, pte);
68079+ BUG_ON(pte_dirty(entry));
68080+ page = vm_normal_page(vma, address, entry);
68081+ if (page) {
68082+ update_hiwater_rss(mm);
68083+ if (PageAnon(page))
68084+ dec_mm_counter_fast(mm, MM_ANONPAGES);
68085+ else
68086+ dec_mm_counter_fast(mm, MM_FILEPAGES);
68087+ page_remove_rmap(page);
68088+ page_cache_release(page);
68089+ }
68090+ }
68091+ pte_unmap_unlock(pte, ptl);
68092+}
68093+
68094+/* PaX: if vma is mirrored, synchronize the mirror's PTE
68095+ *
68096+ * the ptl of the lower mapped page is held on entry and is not released on exit
68097+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
68098+ */
68099+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
68100+{
68101+ struct mm_struct *mm = vma->vm_mm;
68102+ unsigned long address_m;
68103+ spinlock_t *ptl_m;
68104+ struct vm_area_struct *vma_m;
68105+ pmd_t *pmd_m;
68106+ pte_t *pte_m, entry_m;
68107+
68108+ BUG_ON(!page_m || !PageAnon(page_m));
68109+
68110+ vma_m = pax_find_mirror_vma(vma);
68111+ if (!vma_m)
68112+ return;
68113+
68114+ BUG_ON(!PageLocked(page_m));
68115+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
68116+ address_m = address + SEGMEXEC_TASK_SIZE;
68117+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
68118+ pte_m = pte_offset_map(pmd_m, address_m);
68119+ ptl_m = pte_lockptr(mm, pmd_m);
68120+ if (ptl != ptl_m) {
68121+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
68122+ if (!pte_none(*pte_m))
68123+ goto out;
68124+ }
68125+
68126+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
68127+ page_cache_get(page_m);
68128+ page_add_anon_rmap(page_m, vma_m, address_m);
68129+ inc_mm_counter_fast(mm, MM_ANONPAGES);
68130+ set_pte_at(mm, address_m, pte_m, entry_m);
68131+ update_mmu_cache(vma_m, address_m, entry_m);
68132+out:
68133+ if (ptl != ptl_m)
68134+ spin_unlock(ptl_m);
68135+ pte_unmap(pte_m);
68136+ unlock_page(page_m);
68137+}
68138+
68139+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
68140+{
68141+ struct mm_struct *mm = vma->vm_mm;
68142+ unsigned long address_m;
68143+ spinlock_t *ptl_m;
68144+ struct vm_area_struct *vma_m;
68145+ pmd_t *pmd_m;
68146+ pte_t *pte_m, entry_m;
68147+
68148+ BUG_ON(!page_m || PageAnon(page_m));
68149+
68150+ vma_m = pax_find_mirror_vma(vma);
68151+ if (!vma_m)
68152+ return;
68153+
68154+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
68155+ address_m = address + SEGMEXEC_TASK_SIZE;
68156+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
68157+ pte_m = pte_offset_map(pmd_m, address_m);
68158+ ptl_m = pte_lockptr(mm, pmd_m);
68159+ if (ptl != ptl_m) {
68160+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
68161+ if (!pte_none(*pte_m))
68162+ goto out;
68163+ }
68164+
68165+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
68166+ page_cache_get(page_m);
68167+ page_add_file_rmap(page_m);
68168+ inc_mm_counter_fast(mm, MM_FILEPAGES);
68169+ set_pte_at(mm, address_m, pte_m, entry_m);
68170+ update_mmu_cache(vma_m, address_m, entry_m);
68171+out:
68172+ if (ptl != ptl_m)
68173+ spin_unlock(ptl_m);
68174+ pte_unmap(pte_m);
68175+}
68176+
68177+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
68178+{
68179+ struct mm_struct *mm = vma->vm_mm;
68180+ unsigned long address_m;
68181+ spinlock_t *ptl_m;
68182+ struct vm_area_struct *vma_m;
68183+ pmd_t *pmd_m;
68184+ pte_t *pte_m, entry_m;
68185+
68186+ vma_m = pax_find_mirror_vma(vma);
68187+ if (!vma_m)
68188+ return;
68189+
68190+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
68191+ address_m = address + SEGMEXEC_TASK_SIZE;
68192+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
68193+ pte_m = pte_offset_map(pmd_m, address_m);
68194+ ptl_m = pte_lockptr(mm, pmd_m);
68195+ if (ptl != ptl_m) {
68196+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
68197+ if (!pte_none(*pte_m))
68198+ goto out;
68199+ }
68200+
68201+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
68202+ set_pte_at(mm, address_m, pte_m, entry_m);
68203+out:
68204+ if (ptl != ptl_m)
68205+ spin_unlock(ptl_m);
68206+ pte_unmap(pte_m);
68207+}
68208+
68209+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
68210+{
68211+ struct page *page_m;
68212+ pte_t entry;
68213+
68214+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
68215+ goto out;
68216+
68217+ entry = *pte;
68218+ page_m = vm_normal_page(vma, address, entry);
68219+ if (!page_m)
68220+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
68221+ else if (PageAnon(page_m)) {
68222+ if (pax_find_mirror_vma(vma)) {
68223+ pte_unmap_unlock(pte, ptl);
68224+ lock_page(page_m);
68225+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
68226+ if (pte_same(entry, *pte))
68227+ pax_mirror_anon_pte(vma, address, page_m, ptl);
68228+ else
68229+ unlock_page(page_m);
68230+ }
68231+ } else
68232+ pax_mirror_file_pte(vma, address, page_m, ptl);
68233+
68234+out:
68235+ pte_unmap_unlock(pte, ptl);
68236+}
68237+#endif
68238+
68239 /*
68240 * This routine handles present pages, when users try to write
68241 * to a shared page. It is done by copying the page to a new address
68242@@ -2656,6 +2849,12 @@ gotten:
68243 */
68244 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
68245 if (likely(pte_same(*page_table, orig_pte))) {
68246+
68247+#ifdef CONFIG_PAX_SEGMEXEC
68248+ if (pax_find_mirror_vma(vma))
68249+ BUG_ON(!trylock_page(new_page));
68250+#endif
68251+
68252 if (old_page) {
68253 if (!PageAnon(old_page)) {
68254 dec_mm_counter_fast(mm, MM_FILEPAGES);
68255@@ -2707,6 +2906,10 @@ gotten:
68256 page_remove_rmap(old_page);
68257 }
68258
68259+#ifdef CONFIG_PAX_SEGMEXEC
68260+ pax_mirror_anon_pte(vma, address, new_page, ptl);
68261+#endif
68262+
68263 /* Free the old page.. */
68264 new_page = old_page;
68265 ret |= VM_FAULT_WRITE;
68266@@ -2986,6 +3189,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
68267 swap_free(entry);
68268 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
68269 try_to_free_swap(page);
68270+
68271+#ifdef CONFIG_PAX_SEGMEXEC
68272+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
68273+#endif
68274+
68275 unlock_page(page);
68276 if (swapcache) {
68277 /*
68278@@ -3009,6 +3217,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
68279
68280 /* No need to invalidate - it was non-present before */
68281 update_mmu_cache(vma, address, page_table);
68282+
68283+#ifdef CONFIG_PAX_SEGMEXEC
68284+ pax_mirror_anon_pte(vma, address, page, ptl);
68285+#endif
68286+
68287 unlock:
68288 pte_unmap_unlock(page_table, ptl);
68289 out:
68290@@ -3028,40 +3241,6 @@ out_release:
68291 }
68292
68293 /*
68294- * This is like a special single-page "expand_{down|up}wards()",
68295- * except we must first make sure that 'address{-|+}PAGE_SIZE'
68296- * doesn't hit another vma.
68297- */
68298-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
68299-{
68300- address &= PAGE_MASK;
68301- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
68302- struct vm_area_struct *prev = vma->vm_prev;
68303-
68304- /*
68305- * Is there a mapping abutting this one below?
68306- *
68307- * That's only ok if it's the same stack mapping
68308- * that has gotten split..
68309- */
68310- if (prev && prev->vm_end == address)
68311- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
68312-
68313- expand_downwards(vma, address - PAGE_SIZE);
68314- }
68315- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
68316- struct vm_area_struct *next = vma->vm_next;
68317-
68318- /* As VM_GROWSDOWN but s/below/above/ */
68319- if (next && next->vm_start == address + PAGE_SIZE)
68320- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
68321-
68322- expand_upwards(vma, address + PAGE_SIZE);
68323- }
68324- return 0;
68325-}
68326-
68327-/*
68328 * We enter with non-exclusive mmap_sem (to exclude vma changes,
68329 * but allow concurrent faults), and pte mapped but not yet locked.
68330 * We return with mmap_sem still held, but pte unmapped and unlocked.
68331@@ -3070,27 +3249,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
68332 unsigned long address, pte_t *page_table, pmd_t *pmd,
68333 unsigned int flags)
68334 {
68335- struct page *page;
68336+ struct page *page = NULL;
68337 spinlock_t *ptl;
68338 pte_t entry;
68339
68340- pte_unmap(page_table);
68341-
68342- /* Check if we need to add a guard page to the stack */
68343- if (check_stack_guard_page(vma, address) < 0)
68344- return VM_FAULT_SIGBUS;
68345-
68346- /* Use the zero-page for reads */
68347 if (!(flags & FAULT_FLAG_WRITE)) {
68348 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
68349 vma->vm_page_prot));
68350- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
68351+ ptl = pte_lockptr(mm, pmd);
68352+ spin_lock(ptl);
68353 if (!pte_none(*page_table))
68354 goto unlock;
68355 goto setpte;
68356 }
68357
68358 /* Allocate our own private page. */
68359+ pte_unmap(page_table);
68360+
68361 if (unlikely(anon_vma_prepare(vma)))
68362 goto oom;
68363 page = alloc_zeroed_user_highpage_movable(vma, address);
68364@@ -3109,6 +3284,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
68365 if (!pte_none(*page_table))
68366 goto release;
68367
68368+#ifdef CONFIG_PAX_SEGMEXEC
68369+ if (pax_find_mirror_vma(vma))
68370+ BUG_ON(!trylock_page(page));
68371+#endif
68372+
68373 inc_mm_counter_fast(mm, MM_ANONPAGES);
68374 page_add_new_anon_rmap(page, vma, address);
68375 setpte:
68376@@ -3116,6 +3296,12 @@ setpte:
68377
68378 /* No need to invalidate - it was non-present before */
68379 update_mmu_cache(vma, address, page_table);
68380+
68381+#ifdef CONFIG_PAX_SEGMEXEC
68382+ if (page)
68383+ pax_mirror_anon_pte(vma, address, page, ptl);
68384+#endif
68385+
68386 unlock:
68387 pte_unmap_unlock(page_table, ptl);
68388 return 0;
68389@@ -3259,6 +3445,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68390 */
68391 /* Only go through if we didn't race with anybody else... */
68392 if (likely(pte_same(*page_table, orig_pte))) {
68393+
68394+#ifdef CONFIG_PAX_SEGMEXEC
68395+ if (anon && pax_find_mirror_vma(vma))
68396+ BUG_ON(!trylock_page(page));
68397+#endif
68398+
68399 flush_icache_page(vma, page);
68400 entry = mk_pte(page, vma->vm_page_prot);
68401 if (flags & FAULT_FLAG_WRITE)
68402@@ -3278,6 +3470,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68403
68404 /* no need to invalidate: a not-present page won't be cached */
68405 update_mmu_cache(vma, address, page_table);
68406+
68407+#ifdef CONFIG_PAX_SEGMEXEC
68408+ if (anon)
68409+ pax_mirror_anon_pte(vma, address, page, ptl);
68410+ else
68411+ pax_mirror_file_pte(vma, address, page, ptl);
68412+#endif
68413+
68414 } else {
68415 if (cow_page)
68416 mem_cgroup_uncharge_page(cow_page);
68417@@ -3431,6 +3631,12 @@ int handle_pte_fault(struct mm_struct *mm,
68418 if (flags & FAULT_FLAG_WRITE)
68419 flush_tlb_fix_spurious_fault(vma, address);
68420 }
68421+
68422+#ifdef CONFIG_PAX_SEGMEXEC
68423+ pax_mirror_pte(vma, address, pte, pmd, ptl);
68424+ return 0;
68425+#endif
68426+
68427 unlock:
68428 pte_unmap_unlock(pte, ptl);
68429 return 0;
68430@@ -3447,6 +3653,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68431 pmd_t *pmd;
68432 pte_t *pte;
68433
68434+#ifdef CONFIG_PAX_SEGMEXEC
68435+ struct vm_area_struct *vma_m;
68436+#endif
68437+
68438 __set_current_state(TASK_RUNNING);
68439
68440 count_vm_event(PGFAULT);
68441@@ -3458,6 +3668,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68442 if (unlikely(is_vm_hugetlb_page(vma)))
68443 return hugetlb_fault(mm, vma, address, flags);
68444
68445+#ifdef CONFIG_PAX_SEGMEXEC
68446+ vma_m = pax_find_mirror_vma(vma);
68447+ if (vma_m) {
68448+ unsigned long address_m;
68449+ pgd_t *pgd_m;
68450+ pud_t *pud_m;
68451+ pmd_t *pmd_m;
68452+
68453+ if (vma->vm_start > vma_m->vm_start) {
68454+ address_m = address;
68455+ address -= SEGMEXEC_TASK_SIZE;
68456+ vma = vma_m;
68457+ } else
68458+ address_m = address + SEGMEXEC_TASK_SIZE;
68459+
68460+ pgd_m = pgd_offset(mm, address_m);
68461+ pud_m = pud_alloc(mm, pgd_m, address_m);
68462+ if (!pud_m)
68463+ return VM_FAULT_OOM;
68464+ pmd_m = pmd_alloc(mm, pud_m, address_m);
68465+ if (!pmd_m)
68466+ return VM_FAULT_OOM;
68467+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
68468+ return VM_FAULT_OOM;
68469+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
68470+ }
68471+#endif
68472+
68473 pgd = pgd_offset(mm, address);
68474 pud = pud_alloc(mm, pgd, address);
68475 if (!pud)
68476@@ -3487,7 +3725,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
68477 * run pte_offset_map on the pmd, if an huge pmd could
68478 * materialize from under us from a different thread.
68479 */
68480- if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
68481+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
68482 return VM_FAULT_OOM;
68483 /* if an huge pmd materialized from under us just retry later */
68484 if (unlikely(pmd_trans_huge(*pmd)))
68485@@ -3591,7 +3829,7 @@ static int __init gate_vma_init(void)
68486 gate_vma.vm_start = FIXADDR_USER_START;
68487 gate_vma.vm_end = FIXADDR_USER_END;
68488 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
68489- gate_vma.vm_page_prot = __P101;
68490+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
68491 /*
68492 * Make sure the vDSO gets into every core dump.
68493 * Dumping its contents makes post-mortem fully interpretable later
68494diff --git a/mm/mempolicy.c b/mm/mempolicy.c
68495index c3fdbcb..2e8ef90 100644
68496--- a/mm/mempolicy.c
68497+++ b/mm/mempolicy.c
68498@@ -640,6 +640,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
68499 unsigned long vmstart;
68500 unsigned long vmend;
68501
68502+#ifdef CONFIG_PAX_SEGMEXEC
68503+ struct vm_area_struct *vma_m;
68504+#endif
68505+
68506 vma = find_vma_prev(mm, start, &prev);
68507 if (!vma || vma->vm_start > start)
68508 return -EFAULT;
68509@@ -678,6 +682,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
68510 err = policy_vma(vma, new_pol);
68511 if (err)
68512 goto out;
68513+
68514+#ifdef CONFIG_PAX_SEGMEXEC
68515+ vma_m = pax_find_mirror_vma(vma);
68516+ if (vma_m) {
68517+ err = policy_vma(vma_m, new_pol);
68518+ if (err)
68519+ goto out;
68520+ }
68521+#endif
68522+
68523 }
68524
68525 out:
68526@@ -1111,6 +1125,17 @@ static long do_mbind(unsigned long start, unsigned long len,
68527
68528 if (end < start)
68529 return -EINVAL;
68530+
68531+#ifdef CONFIG_PAX_SEGMEXEC
68532+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
68533+ if (end > SEGMEXEC_TASK_SIZE)
68534+ return -EINVAL;
68535+ } else
68536+#endif
68537+
68538+ if (end > TASK_SIZE)
68539+ return -EINVAL;
68540+
68541 if (end == start)
68542 return 0;
68543
68544@@ -1329,6 +1354,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
68545 if (!mm)
68546 goto out;
68547
68548+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68549+ if (mm != current->mm &&
68550+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68551+ err = -EPERM;
68552+ goto out;
68553+ }
68554+#endif
68555+
68556 /*
68557 * Check if this process has the right to modify the specified
68558 * process. The right exists if the process has administrative
68559@@ -1338,8 +1371,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
68560 rcu_read_lock();
68561 tcred = __task_cred(task);
68562 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68563- cred->uid != tcred->suid && cred->uid != tcred->uid &&
68564- !capable(CAP_SYS_NICE)) {
68565+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68566 rcu_read_unlock();
68567 err = -EPERM;
68568 goto out;
68569diff --git a/mm/migrate.c b/mm/migrate.c
68570index 177aca4..ab3a744 100644
68571--- a/mm/migrate.c
68572+++ b/mm/migrate.c
68573@@ -1313,6 +1313,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
68574 if (!mm)
68575 return -EINVAL;
68576
68577+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68578+ if (mm != current->mm &&
68579+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68580+ err = -EPERM;
68581+ goto out;
68582+ }
68583+#endif
68584+
68585 /*
68586 * Check if this process has the right to modify the specified
68587 * process. The right exists if the process has administrative
68588@@ -1322,8 +1330,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
68589 rcu_read_lock();
68590 tcred = __task_cred(task);
68591 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68592- cred->uid != tcred->suid && cred->uid != tcred->uid &&
68593- !capable(CAP_SYS_NICE)) {
68594+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68595 rcu_read_unlock();
68596 err = -EPERM;
68597 goto out;
68598diff --git a/mm/mlock.c b/mm/mlock.c
68599index 4f4f53b..9511904 100644
68600--- a/mm/mlock.c
68601+++ b/mm/mlock.c
68602@@ -13,6 +13,7 @@
68603 #include <linux/pagemap.h>
68604 #include <linux/mempolicy.h>
68605 #include <linux/syscalls.h>
68606+#include <linux/security.h>
68607 #include <linux/sched.h>
68608 #include <linux/export.h>
68609 #include <linux/rmap.h>
68610@@ -385,6 +386,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
68611 return -EINVAL;
68612 if (end == start)
68613 return 0;
68614+ if (end > TASK_SIZE)
68615+ return -EINVAL;
68616+
68617 vma = find_vma_prev(current->mm, start, &prev);
68618 if (!vma || vma->vm_start > start)
68619 return -ENOMEM;
68620@@ -395,6 +399,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
68621 for (nstart = start ; ; ) {
68622 vm_flags_t newflags;
68623
68624+#ifdef CONFIG_PAX_SEGMEXEC
68625+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68626+ break;
68627+#endif
68628+
68629 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
68630
68631 newflags = vma->vm_flags | VM_LOCKED;
68632@@ -500,6 +509,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
68633 lock_limit >>= PAGE_SHIFT;
68634
68635 /* check against resource limits */
68636+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
68637 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
68638 error = do_mlock(start, len, 1);
68639 up_write(&current->mm->mmap_sem);
68640@@ -523,17 +533,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
68641 static int do_mlockall(int flags)
68642 {
68643 struct vm_area_struct * vma, * prev = NULL;
68644- unsigned int def_flags = 0;
68645
68646 if (flags & MCL_FUTURE)
68647- def_flags = VM_LOCKED;
68648- current->mm->def_flags = def_flags;
68649+ current->mm->def_flags |= VM_LOCKED;
68650+ else
68651+ current->mm->def_flags &= ~VM_LOCKED;
68652 if (flags == MCL_FUTURE)
68653 goto out;
68654
68655 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
68656 vm_flags_t newflags;
68657
68658+#ifdef CONFIG_PAX_SEGMEXEC
68659+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68660+ break;
68661+#endif
68662+
68663+ BUG_ON(vma->vm_end > TASK_SIZE);
68664 newflags = vma->vm_flags | VM_LOCKED;
68665 if (!(flags & MCL_CURRENT))
68666 newflags &= ~VM_LOCKED;
68667@@ -566,6 +582,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
68668 lock_limit >>= PAGE_SHIFT;
68669
68670 ret = -ENOMEM;
68671+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
68672 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
68673 capable(CAP_IPC_LOCK))
68674 ret = do_mlockall(flags);
68675diff --git a/mm/mmap.c b/mm/mmap.c
68676index eae90af..51ca80b 100644
68677--- a/mm/mmap.c
68678+++ b/mm/mmap.c
68679@@ -46,6 +46,16 @@
68680 #define arch_rebalance_pgtables(addr, len) (addr)
68681 #endif
68682
68683+static inline void verify_mm_writelocked(struct mm_struct *mm)
68684+{
68685+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
68686+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
68687+ up_read(&mm->mmap_sem);
68688+ BUG();
68689+ }
68690+#endif
68691+}
68692+
68693 static void unmap_region(struct mm_struct *mm,
68694 struct vm_area_struct *vma, struct vm_area_struct *prev,
68695 unsigned long start, unsigned long end);
68696@@ -71,22 +81,32 @@ static void unmap_region(struct mm_struct *mm,
68697 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
68698 *
68699 */
68700-pgprot_t protection_map[16] = {
68701+pgprot_t protection_map[16] __read_only = {
68702 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
68703 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
68704 };
68705
68706-pgprot_t vm_get_page_prot(unsigned long vm_flags)
68707+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
68708 {
68709- return __pgprot(pgprot_val(protection_map[vm_flags &
68710+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
68711 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
68712 pgprot_val(arch_vm_get_page_prot(vm_flags)));
68713+
68714+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68715+ if (!(__supported_pte_mask & _PAGE_NX) &&
68716+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
68717+ (vm_flags & (VM_READ | VM_WRITE)))
68718+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
68719+#endif
68720+
68721+ return prot;
68722 }
68723 EXPORT_SYMBOL(vm_get_page_prot);
68724
68725 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
68726 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
68727 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
68728+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
68729 /*
68730 * Make sure vm_committed_as in one cacheline and not cacheline shared with
68731 * other variables. It can be updated by several CPUs frequently.
68732@@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
68733 struct vm_area_struct *next = vma->vm_next;
68734
68735 might_sleep();
68736+ BUG_ON(vma->vm_mirror);
68737 if (vma->vm_ops && vma->vm_ops->close)
68738 vma->vm_ops->close(vma);
68739 if (vma->vm_file) {
68740@@ -272,6 +293,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
68741 * not page aligned -Ram Gupta
68742 */
68743 rlim = rlimit(RLIMIT_DATA);
68744+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
68745 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
68746 (mm->end_data - mm->start_data) > rlim)
68747 goto out;
68748@@ -689,6 +711,12 @@ static int
68749 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
68750 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68751 {
68752+
68753+#ifdef CONFIG_PAX_SEGMEXEC
68754+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
68755+ return 0;
68756+#endif
68757+
68758 if (is_mergeable_vma(vma, file, vm_flags) &&
68759 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
68760 if (vma->vm_pgoff == vm_pgoff)
68761@@ -708,6 +736,12 @@ static int
68762 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
68763 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68764 {
68765+
68766+#ifdef CONFIG_PAX_SEGMEXEC
68767+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
68768+ return 0;
68769+#endif
68770+
68771 if (is_mergeable_vma(vma, file, vm_flags) &&
68772 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
68773 pgoff_t vm_pglen;
68774@@ -750,13 +784,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
68775 struct vm_area_struct *vma_merge(struct mm_struct *mm,
68776 struct vm_area_struct *prev, unsigned long addr,
68777 unsigned long end, unsigned long vm_flags,
68778- struct anon_vma *anon_vma, struct file *file,
68779+ struct anon_vma *anon_vma, struct file *file,
68780 pgoff_t pgoff, struct mempolicy *policy)
68781 {
68782 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
68783 struct vm_area_struct *area, *next;
68784 int err;
68785
68786+#ifdef CONFIG_PAX_SEGMEXEC
68787+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
68788+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
68789+
68790+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
68791+#endif
68792+
68793 /*
68794 * We later require that vma->vm_flags == vm_flags,
68795 * so this tests vma->vm_flags & VM_SPECIAL, too.
68796@@ -772,6 +813,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
68797 if (next && next->vm_end == end) /* cases 6, 7, 8 */
68798 next = next->vm_next;
68799
68800+#ifdef CONFIG_PAX_SEGMEXEC
68801+ if (prev)
68802+ prev_m = pax_find_mirror_vma(prev);
68803+ if (area)
68804+ area_m = pax_find_mirror_vma(area);
68805+ if (next)
68806+ next_m = pax_find_mirror_vma(next);
68807+#endif
68808+
68809 /*
68810 * Can it merge with the predecessor?
68811 */
68812@@ -791,9 +841,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
68813 /* cases 1, 6 */
68814 err = vma_adjust(prev, prev->vm_start,
68815 next->vm_end, prev->vm_pgoff, NULL);
68816- } else /* cases 2, 5, 7 */
68817+
68818+#ifdef CONFIG_PAX_SEGMEXEC
68819+ if (!err && prev_m)
68820+ err = vma_adjust(prev_m, prev_m->vm_start,
68821+ next_m->vm_end, prev_m->vm_pgoff, NULL);
68822+#endif
68823+
68824+ } else { /* cases 2, 5, 7 */
68825 err = vma_adjust(prev, prev->vm_start,
68826 end, prev->vm_pgoff, NULL);
68827+
68828+#ifdef CONFIG_PAX_SEGMEXEC
68829+ if (!err && prev_m)
68830+ err = vma_adjust(prev_m, prev_m->vm_start,
68831+ end_m, prev_m->vm_pgoff, NULL);
68832+#endif
68833+
68834+ }
68835 if (err)
68836 return NULL;
68837 khugepaged_enter_vma_merge(prev);
68838@@ -807,12 +872,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
68839 mpol_equal(policy, vma_policy(next)) &&
68840 can_vma_merge_before(next, vm_flags,
68841 anon_vma, file, pgoff+pglen)) {
68842- if (prev && addr < prev->vm_end) /* case 4 */
68843+ if (prev && addr < prev->vm_end) { /* case 4 */
68844 err = vma_adjust(prev, prev->vm_start,
68845 addr, prev->vm_pgoff, NULL);
68846- else /* cases 3, 8 */
68847+
68848+#ifdef CONFIG_PAX_SEGMEXEC
68849+ if (!err && prev_m)
68850+ err = vma_adjust(prev_m, prev_m->vm_start,
68851+ addr_m, prev_m->vm_pgoff, NULL);
68852+#endif
68853+
68854+ } else { /* cases 3, 8 */
68855 err = vma_adjust(area, addr, next->vm_end,
68856 next->vm_pgoff - pglen, NULL);
68857+
68858+#ifdef CONFIG_PAX_SEGMEXEC
68859+ if (!err && area_m)
68860+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
68861+ next_m->vm_pgoff - pglen, NULL);
68862+#endif
68863+
68864+ }
68865 if (err)
68866 return NULL;
68867 khugepaged_enter_vma_merge(area);
68868@@ -921,14 +1001,11 @@ none:
68869 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
68870 struct file *file, long pages)
68871 {
68872- const unsigned long stack_flags
68873- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
68874-
68875 if (file) {
68876 mm->shared_vm += pages;
68877 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
68878 mm->exec_vm += pages;
68879- } else if (flags & stack_flags)
68880+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
68881 mm->stack_vm += pages;
68882 if (flags & (VM_RESERVED|VM_IO))
68883 mm->reserved_vm += pages;
68884@@ -955,7 +1032,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
68885 * (the exception is when the underlying filesystem is noexec
68886 * mounted, in which case we dont add PROT_EXEC.)
68887 */
68888- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
68889+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
68890 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
68891 prot |= PROT_EXEC;
68892
68893@@ -981,7 +1058,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
68894 /* Obtain the address to map to. we verify (or select) it and ensure
68895 * that it represents a valid section of the address space.
68896 */
68897- addr = get_unmapped_area(file, addr, len, pgoff, flags);
68898+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
68899 if (addr & ~PAGE_MASK)
68900 return addr;
68901
68902@@ -992,6 +1069,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
68903 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
68904 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
68905
68906+#ifdef CONFIG_PAX_MPROTECT
68907+ if (mm->pax_flags & MF_PAX_MPROTECT) {
68908+#ifndef CONFIG_PAX_MPROTECT_COMPAT
68909+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
68910+ gr_log_rwxmmap(file);
68911+
68912+#ifdef CONFIG_PAX_EMUPLT
68913+ vm_flags &= ~VM_EXEC;
68914+#else
68915+ return -EPERM;
68916+#endif
68917+
68918+ }
68919+
68920+ if (!(vm_flags & VM_EXEC))
68921+ vm_flags &= ~VM_MAYEXEC;
68922+#else
68923+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
68924+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
68925+#endif
68926+ else
68927+ vm_flags &= ~VM_MAYWRITE;
68928+ }
68929+#endif
68930+
68931+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68932+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
68933+ vm_flags &= ~VM_PAGEEXEC;
68934+#endif
68935+
68936 if (flags & MAP_LOCKED)
68937 if (!can_do_mlock())
68938 return -EPERM;
68939@@ -1003,6 +1110,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
68940 locked += mm->locked_vm;
68941 lock_limit = rlimit(RLIMIT_MEMLOCK);
68942 lock_limit >>= PAGE_SHIFT;
68943+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
68944 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
68945 return -EAGAIN;
68946 }
68947@@ -1073,6 +1181,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
68948 if (error)
68949 return error;
68950
68951+ if (!gr_acl_handle_mmap(file, prot))
68952+ return -EACCES;
68953+
68954 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
68955 }
68956 EXPORT_SYMBOL(do_mmap_pgoff);
68957@@ -1153,7 +1264,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
68958 vm_flags_t vm_flags = vma->vm_flags;
68959
68960 /* If it was private or non-writable, the write bit is already clear */
68961- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
68962+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
68963 return 0;
68964
68965 /* The backer wishes to know when pages are first written to? */
68966@@ -1202,14 +1313,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
68967 unsigned long charged = 0;
68968 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
68969
68970+#ifdef CONFIG_PAX_SEGMEXEC
68971+ struct vm_area_struct *vma_m = NULL;
68972+#endif
68973+
68974+ /*
68975+ * mm->mmap_sem is required to protect against another thread
68976+ * changing the mappings in case we sleep.
68977+ */
68978+ verify_mm_writelocked(mm);
68979+
68980 /* Clear old maps */
68981 error = -ENOMEM;
68982-munmap_back:
68983 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68984 if (vma && vma->vm_start < addr + len) {
68985 if (do_munmap(mm, addr, len))
68986 return -ENOMEM;
68987- goto munmap_back;
68988+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
68989+ BUG_ON(vma && vma->vm_start < addr + len);
68990 }
68991
68992 /* Check against address space limit. */
68993@@ -1258,6 +1379,16 @@ munmap_back:
68994 goto unacct_error;
68995 }
68996
68997+#ifdef CONFIG_PAX_SEGMEXEC
68998+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
68999+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69000+ if (!vma_m) {
69001+ error = -ENOMEM;
69002+ goto free_vma;
69003+ }
69004+ }
69005+#endif
69006+
69007 vma->vm_mm = mm;
69008 vma->vm_start = addr;
69009 vma->vm_end = addr + len;
69010@@ -1281,6 +1412,19 @@ munmap_back:
69011 error = file->f_op->mmap(file, vma);
69012 if (error)
69013 goto unmap_and_free_vma;
69014+
69015+#ifdef CONFIG_PAX_SEGMEXEC
69016+ if (vma_m && (vm_flags & VM_EXECUTABLE))
69017+ added_exe_file_vma(mm);
69018+#endif
69019+
69020+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
69021+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
69022+ vma->vm_flags |= VM_PAGEEXEC;
69023+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
69024+ }
69025+#endif
69026+
69027 if (vm_flags & VM_EXECUTABLE)
69028 added_exe_file_vma(mm);
69029
69030@@ -1316,6 +1460,11 @@ munmap_back:
69031 vma_link(mm, vma, prev, rb_link, rb_parent);
69032 file = vma->vm_file;
69033
69034+#ifdef CONFIG_PAX_SEGMEXEC
69035+ if (vma_m)
69036+ BUG_ON(pax_mirror_vma(vma_m, vma));
69037+#endif
69038+
69039 /* Once vma denies write, undo our temporary denial count */
69040 if (correct_wcount)
69041 atomic_inc(&inode->i_writecount);
69042@@ -1324,6 +1473,7 @@ out:
69043
69044 mm->total_vm += len >> PAGE_SHIFT;
69045 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
69046+ track_exec_limit(mm, addr, addr + len, vm_flags);
69047 if (vm_flags & VM_LOCKED) {
69048 if (!mlock_vma_pages_range(vma, addr, addr + len))
69049 mm->locked_vm += (len >> PAGE_SHIFT);
69050@@ -1341,6 +1491,12 @@ unmap_and_free_vma:
69051 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
69052 charged = 0;
69053 free_vma:
69054+
69055+#ifdef CONFIG_PAX_SEGMEXEC
69056+ if (vma_m)
69057+ kmem_cache_free(vm_area_cachep, vma_m);
69058+#endif
69059+
69060 kmem_cache_free(vm_area_cachep, vma);
69061 unacct_error:
69062 if (charged)
69063@@ -1348,6 +1504,44 @@ unacct_error:
69064 return error;
69065 }
69066
69067+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
69068+{
69069+ if (!vma) {
69070+#ifdef CONFIG_STACK_GROWSUP
69071+ if (addr > sysctl_heap_stack_gap)
69072+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
69073+ else
69074+ vma = find_vma(current->mm, 0);
69075+ if (vma && (vma->vm_flags & VM_GROWSUP))
69076+ return false;
69077+#endif
69078+ return true;
69079+ }
69080+
69081+ if (addr + len > vma->vm_start)
69082+ return false;
69083+
69084+ if (vma->vm_flags & VM_GROWSDOWN)
69085+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
69086+#ifdef CONFIG_STACK_GROWSUP
69087+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
69088+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
69089+#endif
69090+
69091+ return true;
69092+}
69093+
69094+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
69095+{
69096+ if (vma->vm_start < len)
69097+ return -ENOMEM;
69098+ if (!(vma->vm_flags & VM_GROWSDOWN))
69099+ return vma->vm_start - len;
69100+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
69101+ return vma->vm_start - len - sysctl_heap_stack_gap;
69102+ return -ENOMEM;
69103+}
69104+
69105 /* Get an address range which is currently unmapped.
69106 * For shmat() with addr=0.
69107 *
69108@@ -1374,18 +1568,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
69109 if (flags & MAP_FIXED)
69110 return addr;
69111
69112+#ifdef CONFIG_PAX_RANDMMAP
69113+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
69114+#endif
69115+
69116 if (addr) {
69117 addr = PAGE_ALIGN(addr);
69118- vma = find_vma(mm, addr);
69119- if (TASK_SIZE - len >= addr &&
69120- (!vma || addr + len <= vma->vm_start))
69121- return addr;
69122+ if (TASK_SIZE - len >= addr) {
69123+ vma = find_vma(mm, addr);
69124+ if (check_heap_stack_gap(vma, addr, len))
69125+ return addr;
69126+ }
69127 }
69128 if (len > mm->cached_hole_size) {
69129- start_addr = addr = mm->free_area_cache;
69130+ start_addr = addr = mm->free_area_cache;
69131 } else {
69132- start_addr = addr = TASK_UNMAPPED_BASE;
69133- mm->cached_hole_size = 0;
69134+ start_addr = addr = mm->mmap_base;
69135+ mm->cached_hole_size = 0;
69136 }
69137
69138 full_search:
69139@@ -1396,34 +1595,40 @@ full_search:
69140 * Start a new search - just in case we missed
69141 * some holes.
69142 */
69143- if (start_addr != TASK_UNMAPPED_BASE) {
69144- addr = TASK_UNMAPPED_BASE;
69145- start_addr = addr;
69146+ if (start_addr != mm->mmap_base) {
69147+ start_addr = addr = mm->mmap_base;
69148 mm->cached_hole_size = 0;
69149 goto full_search;
69150 }
69151 return -ENOMEM;
69152 }
69153- if (!vma || addr + len <= vma->vm_start) {
69154- /*
69155- * Remember the place where we stopped the search:
69156- */
69157- mm->free_area_cache = addr + len;
69158- return addr;
69159- }
69160+ if (check_heap_stack_gap(vma, addr, len))
69161+ break;
69162 if (addr + mm->cached_hole_size < vma->vm_start)
69163 mm->cached_hole_size = vma->vm_start - addr;
69164 addr = vma->vm_end;
69165 }
69166+
69167+ /*
69168+ * Remember the place where we stopped the search:
69169+ */
69170+ mm->free_area_cache = addr + len;
69171+ return addr;
69172 }
69173 #endif
69174
69175 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
69176 {
69177+
69178+#ifdef CONFIG_PAX_SEGMEXEC
69179+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
69180+ return;
69181+#endif
69182+
69183 /*
69184 * Is this a new hole at the lowest possible address?
69185 */
69186- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
69187+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
69188 mm->free_area_cache = addr;
69189 mm->cached_hole_size = ~0UL;
69190 }
69191@@ -1441,7 +1646,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69192 {
69193 struct vm_area_struct *vma;
69194 struct mm_struct *mm = current->mm;
69195- unsigned long addr = addr0;
69196+ unsigned long base = mm->mmap_base, addr = addr0;
69197
69198 /* requested length too big for entire address space */
69199 if (len > TASK_SIZE)
69200@@ -1450,13 +1655,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69201 if (flags & MAP_FIXED)
69202 return addr;
69203
69204+#ifdef CONFIG_PAX_RANDMMAP
69205+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
69206+#endif
69207+
69208 /* requesting a specific address */
69209 if (addr) {
69210 addr = PAGE_ALIGN(addr);
69211- vma = find_vma(mm, addr);
69212- if (TASK_SIZE - len >= addr &&
69213- (!vma || addr + len <= vma->vm_start))
69214- return addr;
69215+ if (TASK_SIZE - len >= addr) {
69216+ vma = find_vma(mm, addr);
69217+ if (check_heap_stack_gap(vma, addr, len))
69218+ return addr;
69219+ }
69220 }
69221
69222 /* check if free_area_cache is useful for us */
69223@@ -1471,7 +1681,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69224 /* make sure it can fit in the remaining address space */
69225 if (addr > len) {
69226 vma = find_vma(mm, addr-len);
69227- if (!vma || addr <= vma->vm_start)
69228+ if (check_heap_stack_gap(vma, addr - len, len))
69229 /* remember the address as a hint for next time */
69230 return (mm->free_area_cache = addr-len);
69231 }
69232@@ -1488,7 +1698,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69233 * return with success:
69234 */
69235 vma = find_vma(mm, addr);
69236- if (!vma || addr+len <= vma->vm_start)
69237+ if (check_heap_stack_gap(vma, addr, len))
69238 /* remember the address as a hint for next time */
69239 return (mm->free_area_cache = addr);
69240
69241@@ -1497,8 +1707,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
69242 mm->cached_hole_size = vma->vm_start - addr;
69243
69244 /* try just below the current vma->vm_start */
69245- addr = vma->vm_start-len;
69246- } while (len < vma->vm_start);
69247+ addr = skip_heap_stack_gap(vma, len);
69248+ } while (!IS_ERR_VALUE(addr));
69249
69250 bottomup:
69251 /*
69252@@ -1507,13 +1717,21 @@ bottomup:
69253 * can happen with large stack limits and large mmap()
69254 * allocations.
69255 */
69256+ mm->mmap_base = TASK_UNMAPPED_BASE;
69257+
69258+#ifdef CONFIG_PAX_RANDMMAP
69259+ if (mm->pax_flags & MF_PAX_RANDMMAP)
69260+ mm->mmap_base += mm->delta_mmap;
69261+#endif
69262+
69263+ mm->free_area_cache = mm->mmap_base;
69264 mm->cached_hole_size = ~0UL;
69265- mm->free_area_cache = TASK_UNMAPPED_BASE;
69266 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
69267 /*
69268 * Restore the topdown base:
69269 */
69270- mm->free_area_cache = mm->mmap_base;
69271+ mm->mmap_base = base;
69272+ mm->free_area_cache = base;
69273 mm->cached_hole_size = ~0UL;
69274
69275 return addr;
69276@@ -1522,6 +1740,12 @@ bottomup:
69277
69278 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
69279 {
69280+
69281+#ifdef CONFIG_PAX_SEGMEXEC
69282+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
69283+ return;
69284+#endif
69285+
69286 /*
69287 * Is this a new hole at the highest possible address?
69288 */
69289@@ -1529,8 +1753,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
69290 mm->free_area_cache = addr;
69291
69292 /* dont allow allocations above current base */
69293- if (mm->free_area_cache > mm->mmap_base)
69294+ if (mm->free_area_cache > mm->mmap_base) {
69295 mm->free_area_cache = mm->mmap_base;
69296+ mm->cached_hole_size = ~0UL;
69297+ }
69298 }
69299
69300 unsigned long
69301@@ -1638,6 +1864,28 @@ out:
69302 return prev ? prev->vm_next : vma;
69303 }
69304
69305+#ifdef CONFIG_PAX_SEGMEXEC
69306+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
69307+{
69308+ struct vm_area_struct *vma_m;
69309+
69310+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
69311+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
69312+ BUG_ON(vma->vm_mirror);
69313+ return NULL;
69314+ }
69315+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
69316+ vma_m = vma->vm_mirror;
69317+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
69318+ BUG_ON(vma->vm_file != vma_m->vm_file);
69319+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
69320+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
69321+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
69322+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
69323+ return vma_m;
69324+}
69325+#endif
69326+
69327 /*
69328 * Verify that the stack growth is acceptable and
69329 * update accounting. This is shared with both the
69330@@ -1654,6 +1902,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
69331 return -ENOMEM;
69332
69333 /* Stack limit test */
69334+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
69335 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
69336 return -ENOMEM;
69337
69338@@ -1664,6 +1913,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
69339 locked = mm->locked_vm + grow;
69340 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
69341 limit >>= PAGE_SHIFT;
69342+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
69343 if (locked > limit && !capable(CAP_IPC_LOCK))
69344 return -ENOMEM;
69345 }
69346@@ -1694,37 +1944,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
69347 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
69348 * vma is the last one with address > vma->vm_end. Have to extend vma.
69349 */
69350+#ifndef CONFIG_IA64
69351+static
69352+#endif
69353 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
69354 {
69355 int error;
69356+ bool locknext;
69357
69358 if (!(vma->vm_flags & VM_GROWSUP))
69359 return -EFAULT;
69360
69361+ /* Also guard against wrapping around to address 0. */
69362+ if (address < PAGE_ALIGN(address+1))
69363+ address = PAGE_ALIGN(address+1);
69364+ else
69365+ return -ENOMEM;
69366+
69367 /*
69368 * We must make sure the anon_vma is allocated
69369 * so that the anon_vma locking is not a noop.
69370 */
69371 if (unlikely(anon_vma_prepare(vma)))
69372 return -ENOMEM;
69373+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
69374+ if (locknext && anon_vma_prepare(vma->vm_next))
69375+ return -ENOMEM;
69376 vma_lock_anon_vma(vma);
69377+ if (locknext)
69378+ vma_lock_anon_vma(vma->vm_next);
69379
69380 /*
69381 * vma->vm_start/vm_end cannot change under us because the caller
69382 * is required to hold the mmap_sem in read mode. We need the
69383- * anon_vma lock to serialize against concurrent expand_stacks.
69384- * Also guard against wrapping around to address 0.
69385+ * anon_vma locks to serialize against concurrent expand_stacks
69386+ * and expand_upwards.
69387 */
69388- if (address < PAGE_ALIGN(address+4))
69389- address = PAGE_ALIGN(address+4);
69390- else {
69391- vma_unlock_anon_vma(vma);
69392- return -ENOMEM;
69393- }
69394 error = 0;
69395
69396 /* Somebody else might have raced and expanded it already */
69397- if (address > vma->vm_end) {
69398+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
69399+ error = -ENOMEM;
69400+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
69401 unsigned long size, grow;
69402
69403 size = address - vma->vm_start;
69404@@ -1739,6 +2000,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
69405 }
69406 }
69407 }
69408+ if (locknext)
69409+ vma_unlock_anon_vma(vma->vm_next);
69410 vma_unlock_anon_vma(vma);
69411 khugepaged_enter_vma_merge(vma);
69412 return error;
69413@@ -1752,6 +2015,8 @@ int expand_downwards(struct vm_area_struct *vma,
69414 unsigned long address)
69415 {
69416 int error;
69417+ bool lockprev = false;
69418+ struct vm_area_struct *prev;
69419
69420 /*
69421 * We must make sure the anon_vma is allocated
69422@@ -1765,6 +2030,15 @@ int expand_downwards(struct vm_area_struct *vma,
69423 if (error)
69424 return error;
69425
69426+ prev = vma->vm_prev;
69427+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
69428+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
69429+#endif
69430+ if (lockprev && anon_vma_prepare(prev))
69431+ return -ENOMEM;
69432+ if (lockprev)
69433+ vma_lock_anon_vma(prev);
69434+
69435 vma_lock_anon_vma(vma);
69436
69437 /*
69438@@ -1774,9 +2048,17 @@ int expand_downwards(struct vm_area_struct *vma,
69439 */
69440
69441 /* Somebody else might have raced and expanded it already */
69442- if (address < vma->vm_start) {
69443+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
69444+ error = -ENOMEM;
69445+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
69446 unsigned long size, grow;
69447
69448+#ifdef CONFIG_PAX_SEGMEXEC
69449+ struct vm_area_struct *vma_m;
69450+
69451+ vma_m = pax_find_mirror_vma(vma);
69452+#endif
69453+
69454 size = vma->vm_end - address;
69455 grow = (vma->vm_start - address) >> PAGE_SHIFT;
69456
69457@@ -1786,11 +2068,22 @@ int expand_downwards(struct vm_area_struct *vma,
69458 if (!error) {
69459 vma->vm_start = address;
69460 vma->vm_pgoff -= grow;
69461+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
69462+
69463+#ifdef CONFIG_PAX_SEGMEXEC
69464+ if (vma_m) {
69465+ vma_m->vm_start -= grow << PAGE_SHIFT;
69466+ vma_m->vm_pgoff -= grow;
69467+ }
69468+#endif
69469+
69470 perf_event_mmap(vma);
69471 }
69472 }
69473 }
69474 vma_unlock_anon_vma(vma);
69475+ if (lockprev)
69476+ vma_unlock_anon_vma(prev);
69477 khugepaged_enter_vma_merge(vma);
69478 return error;
69479 }
69480@@ -1860,6 +2153,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
69481 do {
69482 long nrpages = vma_pages(vma);
69483
69484+#ifdef CONFIG_PAX_SEGMEXEC
69485+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
69486+ vma = remove_vma(vma);
69487+ continue;
69488+ }
69489+#endif
69490+
69491 mm->total_vm -= nrpages;
69492 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
69493 vma = remove_vma(vma);
69494@@ -1905,6 +2205,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
69495 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
69496 vma->vm_prev = NULL;
69497 do {
69498+
69499+#ifdef CONFIG_PAX_SEGMEXEC
69500+ if (vma->vm_mirror) {
69501+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
69502+ vma->vm_mirror->vm_mirror = NULL;
69503+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
69504+ vma->vm_mirror = NULL;
69505+ }
69506+#endif
69507+
69508 rb_erase(&vma->vm_rb, &mm->mm_rb);
69509 mm->map_count--;
69510 tail_vma = vma;
69511@@ -1933,14 +2243,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69512 struct vm_area_struct *new;
69513 int err = -ENOMEM;
69514
69515+#ifdef CONFIG_PAX_SEGMEXEC
69516+ struct vm_area_struct *vma_m, *new_m = NULL;
69517+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
69518+#endif
69519+
69520 if (is_vm_hugetlb_page(vma) && (addr &
69521 ~(huge_page_mask(hstate_vma(vma)))))
69522 return -EINVAL;
69523
69524+#ifdef CONFIG_PAX_SEGMEXEC
69525+ vma_m = pax_find_mirror_vma(vma);
69526+#endif
69527+
69528 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69529 if (!new)
69530 goto out_err;
69531
69532+#ifdef CONFIG_PAX_SEGMEXEC
69533+ if (vma_m) {
69534+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69535+ if (!new_m) {
69536+ kmem_cache_free(vm_area_cachep, new);
69537+ goto out_err;
69538+ }
69539+ }
69540+#endif
69541+
69542 /* most fields are the same, copy all, and then fixup */
69543 *new = *vma;
69544
69545@@ -1953,6 +2282,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69546 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
69547 }
69548
69549+#ifdef CONFIG_PAX_SEGMEXEC
69550+ if (vma_m) {
69551+ *new_m = *vma_m;
69552+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
69553+ new_m->vm_mirror = new;
69554+ new->vm_mirror = new_m;
69555+
69556+ if (new_below)
69557+ new_m->vm_end = addr_m;
69558+ else {
69559+ new_m->vm_start = addr_m;
69560+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
69561+ }
69562+ }
69563+#endif
69564+
69565 pol = mpol_dup(vma_policy(vma));
69566 if (IS_ERR(pol)) {
69567 err = PTR_ERR(pol);
69568@@ -1978,6 +2323,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69569 else
69570 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
69571
69572+#ifdef CONFIG_PAX_SEGMEXEC
69573+ if (!err && vma_m) {
69574+ if (anon_vma_clone(new_m, vma_m))
69575+ goto out_free_mpol;
69576+
69577+ mpol_get(pol);
69578+ vma_set_policy(new_m, pol);
69579+
69580+ if (new_m->vm_file) {
69581+ get_file(new_m->vm_file);
69582+ if (vma_m->vm_flags & VM_EXECUTABLE)
69583+ added_exe_file_vma(mm);
69584+ }
69585+
69586+ if (new_m->vm_ops && new_m->vm_ops->open)
69587+ new_m->vm_ops->open(new_m);
69588+
69589+ if (new_below)
69590+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
69591+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
69592+ else
69593+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
69594+
69595+ if (err) {
69596+ if (new_m->vm_ops && new_m->vm_ops->close)
69597+ new_m->vm_ops->close(new_m);
69598+ if (new_m->vm_file) {
69599+ if (vma_m->vm_flags & VM_EXECUTABLE)
69600+ removed_exe_file_vma(mm);
69601+ fput(new_m->vm_file);
69602+ }
69603+ mpol_put(pol);
69604+ }
69605+ }
69606+#endif
69607+
69608 /* Success. */
69609 if (!err)
69610 return 0;
69611@@ -1990,10 +2371,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69612 removed_exe_file_vma(mm);
69613 fput(new->vm_file);
69614 }
69615- unlink_anon_vmas(new);
69616 out_free_mpol:
69617 mpol_put(pol);
69618 out_free_vma:
69619+
69620+#ifdef CONFIG_PAX_SEGMEXEC
69621+ if (new_m) {
69622+ unlink_anon_vmas(new_m);
69623+ kmem_cache_free(vm_area_cachep, new_m);
69624+ }
69625+#endif
69626+
69627+ unlink_anon_vmas(new);
69628 kmem_cache_free(vm_area_cachep, new);
69629 out_err:
69630 return err;
69631@@ -2006,6 +2395,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
69632 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
69633 unsigned long addr, int new_below)
69634 {
69635+
69636+#ifdef CONFIG_PAX_SEGMEXEC
69637+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
69638+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
69639+ if (mm->map_count >= sysctl_max_map_count-1)
69640+ return -ENOMEM;
69641+ } else
69642+#endif
69643+
69644 if (mm->map_count >= sysctl_max_map_count)
69645 return -ENOMEM;
69646
69647@@ -2017,11 +2415,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
69648 * work. This now handles partial unmappings.
69649 * Jeremy Fitzhardinge <jeremy@goop.org>
69650 */
69651+#ifdef CONFIG_PAX_SEGMEXEC
69652 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69653 {
69654+ int ret = __do_munmap(mm, start, len);
69655+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
69656+ return ret;
69657+
69658+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
69659+}
69660+
69661+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69662+#else
69663+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69664+#endif
69665+{
69666 unsigned long end;
69667 struct vm_area_struct *vma, *prev, *last;
69668
69669+ /*
69670+ * mm->mmap_sem is required to protect against another thread
69671+ * changing the mappings in case we sleep.
69672+ */
69673+ verify_mm_writelocked(mm);
69674+
69675 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
69676 return -EINVAL;
69677
69678@@ -2096,6 +2513,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69679 /* Fix up all other VM information */
69680 remove_vma_list(mm, vma);
69681
69682+ track_exec_limit(mm, start, end, 0UL);
69683+
69684 return 0;
69685 }
69686
69687@@ -2108,22 +2527,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
69688
69689 profile_munmap(addr);
69690
69691+#ifdef CONFIG_PAX_SEGMEXEC
69692+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
69693+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
69694+ return -EINVAL;
69695+#endif
69696+
69697 down_write(&mm->mmap_sem);
69698 ret = do_munmap(mm, addr, len);
69699 up_write(&mm->mmap_sem);
69700 return ret;
69701 }
69702
69703-static inline void verify_mm_writelocked(struct mm_struct *mm)
69704-{
69705-#ifdef CONFIG_DEBUG_VM
69706- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
69707- WARN_ON(1);
69708- up_read(&mm->mmap_sem);
69709- }
69710-#endif
69711-}
69712-
69713 /*
69714 * this is really a simplified "do_mmap". it only handles
69715 * anonymous maps. eventually we may be able to do some
69716@@ -2137,6 +2552,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
69717 struct rb_node ** rb_link, * rb_parent;
69718 pgoff_t pgoff = addr >> PAGE_SHIFT;
69719 int error;
69720+ unsigned long charged;
69721
69722 len = PAGE_ALIGN(len);
69723 if (!len)
69724@@ -2148,16 +2564,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
69725
69726 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
69727
69728+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
69729+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
69730+ flags &= ~VM_EXEC;
69731+
69732+#ifdef CONFIG_PAX_MPROTECT
69733+ if (mm->pax_flags & MF_PAX_MPROTECT)
69734+ flags &= ~VM_MAYEXEC;
69735+#endif
69736+
69737+ }
69738+#endif
69739+
69740 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
69741 if (error & ~PAGE_MASK)
69742 return error;
69743
69744+ charged = len >> PAGE_SHIFT;
69745+
69746 /*
69747 * mlock MCL_FUTURE?
69748 */
69749 if (mm->def_flags & VM_LOCKED) {
69750 unsigned long locked, lock_limit;
69751- locked = len >> PAGE_SHIFT;
69752+ locked = charged;
69753 locked += mm->locked_vm;
69754 lock_limit = rlimit(RLIMIT_MEMLOCK);
69755 lock_limit >>= PAGE_SHIFT;
69756@@ -2174,22 +2604,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
69757 /*
69758 * Clear old maps. this also does some error checking for us
69759 */
69760- munmap_back:
69761 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69762 if (vma && vma->vm_start < addr + len) {
69763 if (do_munmap(mm, addr, len))
69764 return -ENOMEM;
69765- goto munmap_back;
69766+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69767+ BUG_ON(vma && vma->vm_start < addr + len);
69768 }
69769
69770 /* Check against address space limits *after* clearing old maps... */
69771- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
69772+ if (!may_expand_vm(mm, charged))
69773 return -ENOMEM;
69774
69775 if (mm->map_count > sysctl_max_map_count)
69776 return -ENOMEM;
69777
69778- if (security_vm_enough_memory(len >> PAGE_SHIFT))
69779+ if (security_vm_enough_memory(charged))
69780 return -ENOMEM;
69781
69782 /* Can we just expand an old private anonymous mapping? */
69783@@ -2203,7 +2633,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
69784 */
69785 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69786 if (!vma) {
69787- vm_unacct_memory(len >> PAGE_SHIFT);
69788+ vm_unacct_memory(charged);
69789 return -ENOMEM;
69790 }
69791
69792@@ -2217,11 +2647,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
69793 vma_link(mm, vma, prev, rb_link, rb_parent);
69794 out:
69795 perf_event_mmap(vma);
69796- mm->total_vm += len >> PAGE_SHIFT;
69797+ mm->total_vm += charged;
69798 if (flags & VM_LOCKED) {
69799 if (!mlock_vma_pages_range(vma, addr, addr + len))
69800- mm->locked_vm += (len >> PAGE_SHIFT);
69801+ mm->locked_vm += charged;
69802 }
69803+ track_exec_limit(mm, addr, addr + len, flags);
69804 return addr;
69805 }
69806
69807@@ -2268,8 +2699,10 @@ void exit_mmap(struct mm_struct *mm)
69808 * Walk the list again, actually closing and freeing it,
69809 * with preemption enabled, without holding any MM locks.
69810 */
69811- while (vma)
69812+ while (vma) {
69813+ vma->vm_mirror = NULL;
69814 vma = remove_vma(vma);
69815+ }
69816
69817 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
69818 }
69819@@ -2283,6 +2716,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
69820 struct vm_area_struct * __vma, * prev;
69821 struct rb_node ** rb_link, * rb_parent;
69822
69823+#ifdef CONFIG_PAX_SEGMEXEC
69824+ struct vm_area_struct *vma_m = NULL;
69825+#endif
69826+
69827+ if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
69828+ return -EPERM;
69829+
69830 /*
69831 * The vm_pgoff of a purely anonymous vma should be irrelevant
69832 * until its first write fault, when page's anon_vma and index
69833@@ -2305,7 +2745,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
69834 if ((vma->vm_flags & VM_ACCOUNT) &&
69835 security_vm_enough_memory_mm(mm, vma_pages(vma)))
69836 return -ENOMEM;
69837+
69838+#ifdef CONFIG_PAX_SEGMEXEC
69839+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
69840+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69841+ if (!vma_m)
69842+ return -ENOMEM;
69843+ }
69844+#endif
69845+
69846 vma_link(mm, vma, prev, rb_link, rb_parent);
69847+
69848+#ifdef CONFIG_PAX_SEGMEXEC
69849+ if (vma_m)
69850+ BUG_ON(pax_mirror_vma(vma_m, vma));
69851+#endif
69852+
69853 return 0;
69854 }
69855
69856@@ -2323,6 +2778,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
69857 struct rb_node **rb_link, *rb_parent;
69858 struct mempolicy *pol;
69859
69860+ BUG_ON(vma->vm_mirror);
69861+
69862 /*
69863 * If anonymous vma has not yet been faulted, update new pgoff
69864 * to match new location, to increase its chance of merging.
69865@@ -2373,6 +2830,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
69866 return NULL;
69867 }
69868
69869+#ifdef CONFIG_PAX_SEGMEXEC
69870+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
69871+{
69872+ struct vm_area_struct *prev_m;
69873+ struct rb_node **rb_link_m, *rb_parent_m;
69874+ struct mempolicy *pol_m;
69875+
69876+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
69877+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
69878+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
69879+ *vma_m = *vma;
69880+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
69881+ if (anon_vma_clone(vma_m, vma))
69882+ return -ENOMEM;
69883+ pol_m = vma_policy(vma_m);
69884+ mpol_get(pol_m);
69885+ vma_set_policy(vma_m, pol_m);
69886+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
69887+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
69888+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
69889+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
69890+ if (vma_m->vm_file)
69891+ get_file(vma_m->vm_file);
69892+ if (vma_m->vm_ops && vma_m->vm_ops->open)
69893+ vma_m->vm_ops->open(vma_m);
69894+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
69895+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
69896+ vma_m->vm_mirror = vma;
69897+ vma->vm_mirror = vma_m;
69898+ return 0;
69899+}
69900+#endif
69901+
69902 /*
69903 * Return true if the calling process may expand its vm space by the passed
69904 * number of pages
69905@@ -2383,7 +2873,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
69906 unsigned long lim;
69907
69908 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
69909-
69910+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
69911 if (cur + npages > lim)
69912 return 0;
69913 return 1;
69914@@ -2454,6 +2944,22 @@ int install_special_mapping(struct mm_struct *mm,
69915 vma->vm_start = addr;
69916 vma->vm_end = addr + len;
69917
69918+#ifdef CONFIG_PAX_MPROTECT
69919+ if (mm->pax_flags & MF_PAX_MPROTECT) {
69920+#ifndef CONFIG_PAX_MPROTECT_COMPAT
69921+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
69922+ return -EPERM;
69923+ if (!(vm_flags & VM_EXEC))
69924+ vm_flags &= ~VM_MAYEXEC;
69925+#else
69926+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
69927+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
69928+#endif
69929+ else
69930+ vm_flags &= ~VM_MAYWRITE;
69931+ }
69932+#endif
69933+
69934 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
69935 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
69936
69937diff --git a/mm/mprotect.c b/mm/mprotect.c
69938index 5a688a2..27e031c 100644
69939--- a/mm/mprotect.c
69940+++ b/mm/mprotect.c
69941@@ -23,10 +23,16 @@
69942 #include <linux/mmu_notifier.h>
69943 #include <linux/migrate.h>
69944 #include <linux/perf_event.h>
69945+
69946+#ifdef CONFIG_PAX_MPROTECT
69947+#include <linux/elf.h>
69948+#endif
69949+
69950 #include <asm/uaccess.h>
69951 #include <asm/pgtable.h>
69952 #include <asm/cacheflush.h>
69953 #include <asm/tlbflush.h>
69954+#include <asm/mmu_context.h>
69955
69956 #ifndef pgprot_modify
69957 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
69958@@ -141,6 +147,48 @@ static void change_protection(struct vm_area_struct *vma,
69959 flush_tlb_range(vma, start, end);
69960 }
69961
69962+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69963+/* called while holding the mmap semaphor for writing except stack expansion */
69964+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
69965+{
69966+ unsigned long oldlimit, newlimit = 0UL;
69967+
69968+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
69969+ return;
69970+
69971+ spin_lock(&mm->page_table_lock);
69972+ oldlimit = mm->context.user_cs_limit;
69973+ if ((prot & VM_EXEC) && oldlimit < end)
69974+ /* USER_CS limit moved up */
69975+ newlimit = end;
69976+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
69977+ /* USER_CS limit moved down */
69978+ newlimit = start;
69979+
69980+ if (newlimit) {
69981+ mm->context.user_cs_limit = newlimit;
69982+
69983+#ifdef CONFIG_SMP
69984+ wmb();
69985+ cpus_clear(mm->context.cpu_user_cs_mask);
69986+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
69987+#endif
69988+
69989+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
69990+ }
69991+ spin_unlock(&mm->page_table_lock);
69992+ if (newlimit == end) {
69993+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
69994+
69995+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
69996+ if (is_vm_hugetlb_page(vma))
69997+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
69998+ else
69999+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
70000+ }
70001+}
70002+#endif
70003+
70004 int
70005 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
70006 unsigned long start, unsigned long end, unsigned long newflags)
70007@@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
70008 int error;
70009 int dirty_accountable = 0;
70010
70011+#ifdef CONFIG_PAX_SEGMEXEC
70012+ struct vm_area_struct *vma_m = NULL;
70013+ unsigned long start_m, end_m;
70014+
70015+ start_m = start + SEGMEXEC_TASK_SIZE;
70016+ end_m = end + SEGMEXEC_TASK_SIZE;
70017+#endif
70018+
70019 if (newflags == oldflags) {
70020 *pprev = vma;
70021 return 0;
70022 }
70023
70024+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
70025+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
70026+
70027+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
70028+ return -ENOMEM;
70029+
70030+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
70031+ return -ENOMEM;
70032+ }
70033+
70034 /*
70035 * If we make a private mapping writable we increase our commit;
70036 * but (without finer accounting) cannot reduce our commit if we
70037@@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
70038 }
70039 }
70040
70041+#ifdef CONFIG_PAX_SEGMEXEC
70042+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
70043+ if (start != vma->vm_start) {
70044+ error = split_vma(mm, vma, start, 1);
70045+ if (error)
70046+ goto fail;
70047+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
70048+ *pprev = (*pprev)->vm_next;
70049+ }
70050+
70051+ if (end != vma->vm_end) {
70052+ error = split_vma(mm, vma, end, 0);
70053+ if (error)
70054+ goto fail;
70055+ }
70056+
70057+ if (pax_find_mirror_vma(vma)) {
70058+ error = __do_munmap(mm, start_m, end_m - start_m);
70059+ if (error)
70060+ goto fail;
70061+ } else {
70062+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
70063+ if (!vma_m) {
70064+ error = -ENOMEM;
70065+ goto fail;
70066+ }
70067+ vma->vm_flags = newflags;
70068+ error = pax_mirror_vma(vma_m, vma);
70069+ if (error) {
70070+ vma->vm_flags = oldflags;
70071+ goto fail;
70072+ }
70073+ }
70074+ }
70075+#endif
70076+
70077 /*
70078 * First try to merge with previous and/or next vma.
70079 */
70080@@ -204,9 +306,21 @@ success:
70081 * vm_flags and vm_page_prot are protected by the mmap_sem
70082 * held in write mode.
70083 */
70084+
70085+#ifdef CONFIG_PAX_SEGMEXEC
70086+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
70087+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
70088+#endif
70089+
70090 vma->vm_flags = newflags;
70091+
70092+#ifdef CONFIG_PAX_MPROTECT
70093+ if (mm->binfmt && mm->binfmt->handle_mprotect)
70094+ mm->binfmt->handle_mprotect(vma, newflags);
70095+#endif
70096+
70097 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
70098- vm_get_page_prot(newflags));
70099+ vm_get_page_prot(vma->vm_flags));
70100
70101 if (vma_wants_writenotify(vma)) {
70102 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
70103@@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70104 end = start + len;
70105 if (end <= start)
70106 return -ENOMEM;
70107+
70108+#ifdef CONFIG_PAX_SEGMEXEC
70109+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
70110+ if (end > SEGMEXEC_TASK_SIZE)
70111+ return -EINVAL;
70112+ } else
70113+#endif
70114+
70115+ if (end > TASK_SIZE)
70116+ return -EINVAL;
70117+
70118 if (!arch_validate_prot(prot))
70119 return -EINVAL;
70120
70121@@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70122 /*
70123 * Does the application expect PROT_READ to imply PROT_EXEC:
70124 */
70125- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
70126+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
70127 prot |= PROT_EXEC;
70128
70129 vm_flags = calc_vm_prot_bits(prot);
70130@@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70131 if (start > vma->vm_start)
70132 prev = vma;
70133
70134+#ifdef CONFIG_PAX_MPROTECT
70135+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
70136+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
70137+#endif
70138+
70139 for (nstart = start ; ; ) {
70140 unsigned long newflags;
70141
70142@@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70143
70144 /* newflags >> 4 shift VM_MAY% in place of VM_% */
70145 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
70146+ if (prot & (PROT_WRITE | PROT_EXEC))
70147+ gr_log_rwxmprotect(vma->vm_file);
70148+
70149+ error = -EACCES;
70150+ goto out;
70151+ }
70152+
70153+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
70154 error = -EACCES;
70155 goto out;
70156 }
70157@@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
70158 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
70159 if (error)
70160 goto out;
70161+
70162+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
70163+
70164 nstart = tmp;
70165
70166 if (nstart < prev->vm_end)
70167diff --git a/mm/mremap.c b/mm/mremap.c
70168index d6959cb..18a402a 100644
70169--- a/mm/mremap.c
70170+++ b/mm/mremap.c
70171@@ -106,6 +106,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
70172 continue;
70173 pte = ptep_get_and_clear(mm, old_addr, old_pte);
70174 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
70175+
70176+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
70177+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
70178+ pte = pte_exprotect(pte);
70179+#endif
70180+
70181 set_pte_at(mm, new_addr, new_pte, pte);
70182 }
70183
70184@@ -290,6 +296,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
70185 if (is_vm_hugetlb_page(vma))
70186 goto Einval;
70187
70188+#ifdef CONFIG_PAX_SEGMEXEC
70189+ if (pax_find_mirror_vma(vma))
70190+ goto Einval;
70191+#endif
70192+
70193 /* We can't remap across vm area boundaries */
70194 if (old_len > vma->vm_end - addr)
70195 goto Efault;
70196@@ -346,20 +357,25 @@ static unsigned long mremap_to(unsigned long addr,
70197 unsigned long ret = -EINVAL;
70198 unsigned long charged = 0;
70199 unsigned long map_flags;
70200+ unsigned long pax_task_size = TASK_SIZE;
70201
70202 if (new_addr & ~PAGE_MASK)
70203 goto out;
70204
70205- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
70206+#ifdef CONFIG_PAX_SEGMEXEC
70207+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
70208+ pax_task_size = SEGMEXEC_TASK_SIZE;
70209+#endif
70210+
70211+ pax_task_size -= PAGE_SIZE;
70212+
70213+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
70214 goto out;
70215
70216 /* Check if the location we're moving into overlaps the
70217 * old location at all, and fail if it does.
70218 */
70219- if ((new_addr <= addr) && (new_addr+new_len) > addr)
70220- goto out;
70221-
70222- if ((addr <= new_addr) && (addr+old_len) > new_addr)
70223+ if (addr + old_len > new_addr && new_addr + new_len > addr)
70224 goto out;
70225
70226 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
70227@@ -431,6 +447,7 @@ unsigned long do_mremap(unsigned long addr,
70228 struct vm_area_struct *vma;
70229 unsigned long ret = -EINVAL;
70230 unsigned long charged = 0;
70231+ unsigned long pax_task_size = TASK_SIZE;
70232
70233 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
70234 goto out;
70235@@ -449,6 +466,17 @@ unsigned long do_mremap(unsigned long addr,
70236 if (!new_len)
70237 goto out;
70238
70239+#ifdef CONFIG_PAX_SEGMEXEC
70240+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
70241+ pax_task_size = SEGMEXEC_TASK_SIZE;
70242+#endif
70243+
70244+ pax_task_size -= PAGE_SIZE;
70245+
70246+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
70247+ old_len > pax_task_size || addr > pax_task_size-old_len)
70248+ goto out;
70249+
70250 if (flags & MREMAP_FIXED) {
70251 if (flags & MREMAP_MAYMOVE)
70252 ret = mremap_to(addr, old_len, new_addr, new_len);
70253@@ -498,6 +526,7 @@ unsigned long do_mremap(unsigned long addr,
70254 addr + new_len);
70255 }
70256 ret = addr;
70257+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
70258 goto out;
70259 }
70260 }
70261@@ -524,7 +553,13 @@ unsigned long do_mremap(unsigned long addr,
70262 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
70263 if (ret)
70264 goto out;
70265+
70266+ map_flags = vma->vm_flags;
70267 ret = move_vma(vma, addr, old_len, new_len, new_addr);
70268+ if (!(ret & ~PAGE_MASK)) {
70269+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
70270+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
70271+ }
70272 }
70273 out:
70274 if (ret & ~PAGE_MASK)
70275diff --git a/mm/nobootmem.c b/mm/nobootmem.c
70276index 7fa41b4..6087460 100644
70277--- a/mm/nobootmem.c
70278+++ b/mm/nobootmem.c
70279@@ -110,19 +110,30 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
70280 unsigned long __init free_all_memory_core_early(int nodeid)
70281 {
70282 int i;
70283- u64 start, end;
70284+ u64 start, end, startrange, endrange;
70285 unsigned long count = 0;
70286- struct range *range = NULL;
70287+ struct range *range = NULL, rangerange = { 0, 0 };
70288 int nr_range;
70289
70290 nr_range = get_free_all_memory_range(&range, nodeid);
70291+ startrange = __pa(range) >> PAGE_SHIFT;
70292+ endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
70293
70294 for (i = 0; i < nr_range; i++) {
70295 start = range[i].start;
70296 end = range[i].end;
70297+ if (start <= endrange && startrange < end) {
70298+ BUG_ON(rangerange.start | rangerange.end);
70299+ rangerange = range[i];
70300+ continue;
70301+ }
70302 count += end - start;
70303 __free_pages_memory(start, end);
70304 }
70305+ start = rangerange.start;
70306+ end = rangerange.end;
70307+ count += end - start;
70308+ __free_pages_memory(start, end);
70309
70310 return count;
70311 }
70312diff --git a/mm/nommu.c b/mm/nommu.c
70313index b982290..7d73f53 100644
70314--- a/mm/nommu.c
70315+++ b/mm/nommu.c
70316@@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
70317 int sysctl_overcommit_ratio = 50; /* default is 50% */
70318 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
70319 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
70320-int heap_stack_gap = 0;
70321
70322 atomic_long_t mmap_pages_allocated;
70323
70324@@ -825,15 +824,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
70325 EXPORT_SYMBOL(find_vma);
70326
70327 /*
70328- * find a VMA
70329- * - we don't extend stack VMAs under NOMMU conditions
70330- */
70331-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
70332-{
70333- return find_vma(mm, addr);
70334-}
70335-
70336-/*
70337 * expand a stack to a given address
70338 * - not supported under NOMMU conditions
70339 */
70340@@ -1553,6 +1543,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
70341
70342 /* most fields are the same, copy all, and then fixup */
70343 *new = *vma;
70344+ INIT_LIST_HEAD(&new->anon_vma_chain);
70345 *region = *vma->vm_region;
70346 new->vm_region = region;
70347
70348diff --git a/mm/page_alloc.c b/mm/page_alloc.c
70349index 485be89..c059ad3 100644
70350--- a/mm/page_alloc.c
70351+++ b/mm/page_alloc.c
70352@@ -341,7 +341,7 @@ out:
70353 * This usage means that zero-order pages may not be compound.
70354 */
70355
70356-static void free_compound_page(struct page *page)
70357+void free_compound_page(struct page *page)
70358 {
70359 __free_pages_ok(page, compound_order(page));
70360 }
70361@@ -654,6 +654,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
70362 int i;
70363 int bad = 0;
70364
70365+#ifdef CONFIG_PAX_MEMORY_SANITIZE
70366+ unsigned long index = 1UL << order;
70367+#endif
70368+
70369 trace_mm_page_free_direct(page, order);
70370 kmemcheck_free_shadow(page, order);
70371
70372@@ -669,6 +673,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
70373 debug_check_no_obj_freed(page_address(page),
70374 PAGE_SIZE << order);
70375 }
70376+
70377+#ifdef CONFIG_PAX_MEMORY_SANITIZE
70378+ for (; index; --index)
70379+ sanitize_highpage(page + index - 1);
70380+#endif
70381+
70382 arch_free_page(page, order);
70383 kernel_map_pages(page, 1 << order, 0);
70384
70385@@ -784,8 +794,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
70386 arch_alloc_page(page, order);
70387 kernel_map_pages(page, 1 << order, 1);
70388
70389+#ifndef CONFIG_PAX_MEMORY_SANITIZE
70390 if (gfp_flags & __GFP_ZERO)
70391 prep_zero_page(page, order, gfp_flags);
70392+#endif
70393
70394 if (order && (gfp_flags & __GFP_COMP))
70395 prep_compound_page(page, order);
70396@@ -3357,7 +3369,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
70397 unsigned long pfn;
70398
70399 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
70400+#ifdef CONFIG_X86_32
70401+ /* boot failures in VMware 8 on 32bit vanilla since
70402+ this change */
70403+ if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
70404+#else
70405 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
70406+#endif
70407 return 1;
70408 }
70409 return 0;
70410diff --git a/mm/percpu.c b/mm/percpu.c
70411index 716eb4a..8d10419 100644
70412--- a/mm/percpu.c
70413+++ b/mm/percpu.c
70414@@ -121,7 +121,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
70415 static unsigned int pcpu_high_unit_cpu __read_mostly;
70416
70417 /* the address of the first chunk which starts with the kernel static area */
70418-void *pcpu_base_addr __read_mostly;
70419+void *pcpu_base_addr __read_only;
70420 EXPORT_SYMBOL_GPL(pcpu_base_addr);
70421
70422 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
70423diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
70424index e920aa3..78fe584 100644
70425--- a/mm/process_vm_access.c
70426+++ b/mm/process_vm_access.c
70427@@ -13,6 +13,7 @@
70428 #include <linux/uio.h>
70429 #include <linux/sched.h>
70430 #include <linux/highmem.h>
70431+#include <linux/security.h>
70432 #include <linux/ptrace.h>
70433 #include <linux/slab.h>
70434 #include <linux/syscalls.h>
70435@@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
70436 size_t iov_l_curr_offset = 0;
70437 ssize_t iov_len;
70438
70439+ return -ENOSYS; // PaX: until properly audited
70440+
70441 /*
70442 * Work out how many pages of struct pages we're going to need
70443 * when eventually calling get_user_pages
70444 */
70445 for (i = 0; i < riovcnt; i++) {
70446 iov_len = rvec[i].iov_len;
70447- if (iov_len > 0) {
70448- nr_pages_iov = ((unsigned long)rvec[i].iov_base
70449- + iov_len)
70450- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
70451- / PAGE_SIZE + 1;
70452- nr_pages = max(nr_pages, nr_pages_iov);
70453- }
70454+ if (iov_len <= 0)
70455+ continue;
70456+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
70457+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
70458+ nr_pages = max(nr_pages, nr_pages_iov);
70459 }
70460
70461 if (nr_pages == 0)
70462@@ -298,8 +299,13 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
70463 goto free_proc_pages;
70464 }
70465
70466+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
70467+ rc = -EPERM;
70468+ goto put_task_struct;
70469+ }
70470+
70471 task_lock(task);
70472- if (__ptrace_may_access(task, PTRACE_MODE_ATTACH)) {
70473+ if (ptrace_may_access_nolock(task, PTRACE_MODE_ATTACH)) {
70474 task_unlock(task);
70475 rc = -EPERM;
70476 goto put_task_struct;
70477diff --git a/mm/rmap.c b/mm/rmap.c
70478index a4fd368..e0ffec7 100644
70479--- a/mm/rmap.c
70480+++ b/mm/rmap.c
70481@@ -152,6 +152,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70482 struct anon_vma *anon_vma = vma->anon_vma;
70483 struct anon_vma_chain *avc;
70484
70485+#ifdef CONFIG_PAX_SEGMEXEC
70486+ struct anon_vma_chain *avc_m = NULL;
70487+#endif
70488+
70489 might_sleep();
70490 if (unlikely(!anon_vma)) {
70491 struct mm_struct *mm = vma->vm_mm;
70492@@ -161,6 +165,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70493 if (!avc)
70494 goto out_enomem;
70495
70496+#ifdef CONFIG_PAX_SEGMEXEC
70497+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
70498+ if (!avc_m)
70499+ goto out_enomem_free_avc;
70500+#endif
70501+
70502 anon_vma = find_mergeable_anon_vma(vma);
70503 allocated = NULL;
70504 if (!anon_vma) {
70505@@ -174,6 +184,21 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70506 /* page_table_lock to protect against threads */
70507 spin_lock(&mm->page_table_lock);
70508 if (likely(!vma->anon_vma)) {
70509+
70510+#ifdef CONFIG_PAX_SEGMEXEC
70511+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
70512+
70513+ if (vma_m) {
70514+ BUG_ON(vma_m->anon_vma);
70515+ vma_m->anon_vma = anon_vma;
70516+ avc_m->anon_vma = anon_vma;
70517+ avc_m->vma = vma;
70518+ list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
70519+ list_add(&avc_m->same_anon_vma, &anon_vma->head);
70520+ avc_m = NULL;
70521+ }
70522+#endif
70523+
70524 vma->anon_vma = anon_vma;
70525 avc->anon_vma = anon_vma;
70526 avc->vma = vma;
70527@@ -187,12 +212,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
70528
70529 if (unlikely(allocated))
70530 put_anon_vma(allocated);
70531+
70532+#ifdef CONFIG_PAX_SEGMEXEC
70533+ if (unlikely(avc_m))
70534+ anon_vma_chain_free(avc_m);
70535+#endif
70536+
70537 if (unlikely(avc))
70538 anon_vma_chain_free(avc);
70539 }
70540 return 0;
70541
70542 out_enomem_free_avc:
70543+
70544+#ifdef CONFIG_PAX_SEGMEXEC
70545+ if (avc_m)
70546+ anon_vma_chain_free(avc_m);
70547+#endif
70548+
70549 anon_vma_chain_free(avc);
70550 out_enomem:
70551 return -ENOMEM;
70552@@ -243,7 +280,7 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
70553 * Attach the anon_vmas from src to dst.
70554 * Returns 0 on success, -ENOMEM on failure.
70555 */
70556-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
70557+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
70558 {
70559 struct anon_vma_chain *avc, *pavc;
70560 struct anon_vma *root = NULL;
70561@@ -276,7 +313,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
70562 * the corresponding VMA in the parent process is attached to.
70563 * Returns 0 on success, non-zero on failure.
70564 */
70565-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
70566+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
70567 {
70568 struct anon_vma_chain *avc;
70569 struct anon_vma *anon_vma;
70570diff --git a/mm/shmem.c b/mm/shmem.c
70571index 6c253f7..367e20a 100644
70572--- a/mm/shmem.c
70573+++ b/mm/shmem.c
70574@@ -31,7 +31,7 @@
70575 #include <linux/export.h>
70576 #include <linux/swap.h>
70577
70578-static struct vfsmount *shm_mnt;
70579+struct vfsmount *shm_mnt;
70580
70581 #ifdef CONFIG_SHMEM
70582 /*
70583@@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
70584 #define BOGO_DIRENT_SIZE 20
70585
70586 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
70587-#define SHORT_SYMLINK_LEN 128
70588+#define SHORT_SYMLINK_LEN 64
70589
70590 struct shmem_xattr {
70591 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
70592@@ -2180,8 +2180,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
70593 int err = -ENOMEM;
70594
70595 /* Round up to L1_CACHE_BYTES to resist false sharing */
70596- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
70597- L1_CACHE_BYTES), GFP_KERNEL);
70598+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
70599 if (!sbinfo)
70600 return -ENOMEM;
70601
70602diff --git a/mm/slab.c b/mm/slab.c
70603index 83311c9a..fcf8f86 100644
70604--- a/mm/slab.c
70605+++ b/mm/slab.c
70606@@ -151,7 +151,7 @@
70607
70608 /* Legal flag mask for kmem_cache_create(). */
70609 #if DEBUG
70610-# define CREATE_MASK (SLAB_RED_ZONE | \
70611+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
70612 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
70613 SLAB_CACHE_DMA | \
70614 SLAB_STORE_USER | \
70615@@ -159,7 +159,7 @@
70616 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
70617 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
70618 #else
70619-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
70620+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
70621 SLAB_CACHE_DMA | \
70622 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
70623 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
70624@@ -288,7 +288,7 @@ struct kmem_list3 {
70625 * Need this for bootstrapping a per node allocator.
70626 */
70627 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
70628-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
70629+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
70630 #define CACHE_CACHE 0
70631 #define SIZE_AC MAX_NUMNODES
70632 #define SIZE_L3 (2 * MAX_NUMNODES)
70633@@ -389,10 +389,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
70634 if ((x)->max_freeable < i) \
70635 (x)->max_freeable = i; \
70636 } while (0)
70637-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
70638-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
70639-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
70640-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
70641+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
70642+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
70643+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
70644+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
70645 #else
70646 #define STATS_INC_ACTIVE(x) do { } while (0)
70647 #define STATS_DEC_ACTIVE(x) do { } while (0)
70648@@ -538,7 +538,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
70649 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
70650 */
70651 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
70652- const struct slab *slab, void *obj)
70653+ const struct slab *slab, const void *obj)
70654 {
70655 u32 offset = (obj - slab->s_mem);
70656 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
70657@@ -564,7 +564,7 @@ struct cache_names {
70658 static struct cache_names __initdata cache_names[] = {
70659 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
70660 #include <linux/kmalloc_sizes.h>
70661- {NULL,}
70662+ {NULL}
70663 #undef CACHE
70664 };
70665
70666@@ -1572,7 +1572,7 @@ void __init kmem_cache_init(void)
70667 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
70668 sizes[INDEX_AC].cs_size,
70669 ARCH_KMALLOC_MINALIGN,
70670- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70671+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70672 NULL);
70673
70674 if (INDEX_AC != INDEX_L3) {
70675@@ -1580,7 +1580,7 @@ void __init kmem_cache_init(void)
70676 kmem_cache_create(names[INDEX_L3].name,
70677 sizes[INDEX_L3].cs_size,
70678 ARCH_KMALLOC_MINALIGN,
70679- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70680+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70681 NULL);
70682 }
70683
70684@@ -1598,7 +1598,7 @@ void __init kmem_cache_init(void)
70685 sizes->cs_cachep = kmem_cache_create(names->name,
70686 sizes->cs_size,
70687 ARCH_KMALLOC_MINALIGN,
70688- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70689+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70690 NULL);
70691 }
70692 #ifdef CONFIG_ZONE_DMA
70693@@ -4322,10 +4322,10 @@ static int s_show(struct seq_file *m, void *p)
70694 }
70695 /* cpu stats */
70696 {
70697- unsigned long allochit = atomic_read(&cachep->allochit);
70698- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
70699- unsigned long freehit = atomic_read(&cachep->freehit);
70700- unsigned long freemiss = atomic_read(&cachep->freemiss);
70701+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
70702+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
70703+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
70704+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
70705
70706 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
70707 allochit, allocmiss, freehit, freemiss);
70708@@ -4584,13 +4584,62 @@ static int __init slab_proc_init(void)
70709 {
70710 proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
70711 #ifdef CONFIG_DEBUG_SLAB_LEAK
70712- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
70713+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
70714 #endif
70715 return 0;
70716 }
70717 module_init(slab_proc_init);
70718 #endif
70719
70720+void check_object_size(const void *ptr, unsigned long n, bool to)
70721+{
70722+
70723+#ifdef CONFIG_PAX_USERCOPY
70724+ struct page *page;
70725+ struct kmem_cache *cachep = NULL;
70726+ struct slab *slabp;
70727+ unsigned int objnr;
70728+ unsigned long offset;
70729+ const char *type;
70730+
70731+ if (!n)
70732+ return;
70733+
70734+ type = "<null>";
70735+ if (ZERO_OR_NULL_PTR(ptr))
70736+ goto report;
70737+
70738+ if (!virt_addr_valid(ptr))
70739+ return;
70740+
70741+ page = virt_to_head_page(ptr);
70742+
70743+ type = "<process stack>";
70744+ if (!PageSlab(page)) {
70745+ if (object_is_on_stack(ptr, n) == -1)
70746+ goto report;
70747+ return;
70748+ }
70749+
70750+ cachep = page_get_cache(page);
70751+ type = cachep->name;
70752+ if (!(cachep->flags & SLAB_USERCOPY))
70753+ goto report;
70754+
70755+ slabp = page_get_slab(page);
70756+ objnr = obj_to_index(cachep, slabp, ptr);
70757+ BUG_ON(objnr >= cachep->num);
70758+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
70759+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
70760+ return;
70761+
70762+report:
70763+ pax_report_usercopy(ptr, n, to, type);
70764+#endif
70765+
70766+}
70767+EXPORT_SYMBOL(check_object_size);
70768+
70769 /**
70770 * ksize - get the actual amount of memory allocated for a given object
70771 * @objp: Pointer to the object
70772diff --git a/mm/slob.c b/mm/slob.c
70773index 8105be4..579da9d 100644
70774--- a/mm/slob.c
70775+++ b/mm/slob.c
70776@@ -29,7 +29,7 @@
70777 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
70778 * alloc_pages() directly, allocating compound pages so the page order
70779 * does not have to be separately tracked, and also stores the exact
70780- * allocation size in page->private so that it can be used to accurately
70781+ * allocation size in slob_page->size so that it can be used to accurately
70782 * provide ksize(). These objects are detected in kfree() because slob_page()
70783 * is false for them.
70784 *
70785@@ -58,6 +58,7 @@
70786 */
70787
70788 #include <linux/kernel.h>
70789+#include <linux/sched.h>
70790 #include <linux/slab.h>
70791 #include <linux/mm.h>
70792 #include <linux/swap.h> /* struct reclaim_state */
70793@@ -102,7 +103,8 @@ struct slob_page {
70794 unsigned long flags; /* mandatory */
70795 atomic_t _count; /* mandatory */
70796 slobidx_t units; /* free units left in page */
70797- unsigned long pad[2];
70798+ unsigned long pad[1];
70799+ unsigned long size; /* size when >=PAGE_SIZE */
70800 slob_t *free; /* first free slob_t in page */
70801 struct list_head list; /* linked list of free pages */
70802 };
70803@@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
70804 */
70805 static inline int is_slob_page(struct slob_page *sp)
70806 {
70807- return PageSlab((struct page *)sp);
70808+ return PageSlab((struct page *)sp) && !sp->size;
70809 }
70810
70811 static inline void set_slob_page(struct slob_page *sp)
70812@@ -150,7 +152,7 @@ static inline void clear_slob_page(struct slob_page *sp)
70813
70814 static inline struct slob_page *slob_page(const void *addr)
70815 {
70816- return (struct slob_page *)virt_to_page(addr);
70817+ return (struct slob_page *)virt_to_head_page(addr);
70818 }
70819
70820 /*
70821@@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
70822 /*
70823 * Return the size of a slob block.
70824 */
70825-static slobidx_t slob_units(slob_t *s)
70826+static slobidx_t slob_units(const slob_t *s)
70827 {
70828 if (s->units > 0)
70829 return s->units;
70830@@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
70831 /*
70832 * Return the next free slob block pointer after this one.
70833 */
70834-static slob_t *slob_next(slob_t *s)
70835+static slob_t *slob_next(const slob_t *s)
70836 {
70837 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
70838 slobidx_t next;
70839@@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
70840 /*
70841 * Returns true if s is the last free block in its page.
70842 */
70843-static int slob_last(slob_t *s)
70844+static int slob_last(const slob_t *s)
70845 {
70846 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
70847 }
70848@@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
70849 if (!page)
70850 return NULL;
70851
70852+ set_slob_page(page);
70853 return page_address(page);
70854 }
70855
70856@@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
70857 if (!b)
70858 return NULL;
70859 sp = slob_page(b);
70860- set_slob_page(sp);
70861
70862 spin_lock_irqsave(&slob_lock, flags);
70863 sp->units = SLOB_UNITS(PAGE_SIZE);
70864 sp->free = b;
70865+ sp->size = 0;
70866 INIT_LIST_HEAD(&sp->list);
70867 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
70868 set_slob_page_free(sp, slob_list);
70869@@ -476,10 +479,9 @@ out:
70870 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
70871 */
70872
70873-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70874+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
70875 {
70876- unsigned int *m;
70877- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70878+ slob_t *m;
70879 void *ret;
70880
70881 gfp &= gfp_allowed_mask;
70882@@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70883
70884 if (!m)
70885 return NULL;
70886- *m = size;
70887+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
70888+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
70889+ m[0].units = size;
70890+ m[1].units = align;
70891 ret = (void *)m + align;
70892
70893 trace_kmalloc_node(_RET_IP_, ret,
70894@@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70895 gfp |= __GFP_COMP;
70896 ret = slob_new_pages(gfp, order, node);
70897 if (ret) {
70898- struct page *page;
70899- page = virt_to_page(ret);
70900- page->private = size;
70901+ struct slob_page *sp;
70902+ sp = slob_page(ret);
70903+ sp->size = size;
70904 }
70905
70906 trace_kmalloc_node(_RET_IP_, ret,
70907 size, PAGE_SIZE << order, gfp, node);
70908 }
70909
70910- kmemleak_alloc(ret, size, 1, gfp);
70911+ return ret;
70912+}
70913+
70914+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70915+{
70916+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70917+ void *ret = __kmalloc_node_align(size, gfp, node, align);
70918+
70919+ if (!ZERO_OR_NULL_PTR(ret))
70920+ kmemleak_alloc(ret, size, 1, gfp);
70921 return ret;
70922 }
70923 EXPORT_SYMBOL(__kmalloc_node);
70924@@ -533,13 +547,92 @@ void kfree(const void *block)
70925 sp = slob_page(block);
70926 if (is_slob_page(sp)) {
70927 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70928- unsigned int *m = (unsigned int *)(block - align);
70929- slob_free(m, *m + align);
70930- } else
70931+ slob_t *m = (slob_t *)(block - align);
70932+ slob_free(m, m[0].units + align);
70933+ } else {
70934+ clear_slob_page(sp);
70935+ free_slob_page(sp);
70936+ sp->size = 0;
70937 put_page(&sp->page);
70938+ }
70939 }
70940 EXPORT_SYMBOL(kfree);
70941
70942+void check_object_size(const void *ptr, unsigned long n, bool to)
70943+{
70944+
70945+#ifdef CONFIG_PAX_USERCOPY
70946+ struct slob_page *sp;
70947+ const slob_t *free;
70948+ const void *base;
70949+ unsigned long flags;
70950+ const char *type;
70951+
70952+ if (!n)
70953+ return;
70954+
70955+ type = "<null>";
70956+ if (ZERO_OR_NULL_PTR(ptr))
70957+ goto report;
70958+
70959+ if (!virt_addr_valid(ptr))
70960+ return;
70961+
70962+ type = "<process stack>";
70963+ sp = slob_page(ptr);
70964+ if (!PageSlab((struct page*)sp)) {
70965+ if (object_is_on_stack(ptr, n) == -1)
70966+ goto report;
70967+ return;
70968+ }
70969+
70970+ type = "<slob>";
70971+ if (sp->size) {
70972+ base = page_address(&sp->page);
70973+ if (base <= ptr && n <= sp->size - (ptr - base))
70974+ return;
70975+ goto report;
70976+ }
70977+
70978+ /* some tricky double walking to find the chunk */
70979+ spin_lock_irqsave(&slob_lock, flags);
70980+ base = (void *)((unsigned long)ptr & PAGE_MASK);
70981+ free = sp->free;
70982+
70983+ while (!slob_last(free) && (void *)free <= ptr) {
70984+ base = free + slob_units(free);
70985+ free = slob_next(free);
70986+ }
70987+
70988+ while (base < (void *)free) {
70989+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
70990+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
70991+ int offset;
70992+
70993+ if (ptr < base + align)
70994+ break;
70995+
70996+ offset = ptr - base - align;
70997+ if (offset >= m) {
70998+ base += size;
70999+ continue;
71000+ }
71001+
71002+ if (n > m - offset)
71003+ break;
71004+
71005+ spin_unlock_irqrestore(&slob_lock, flags);
71006+ return;
71007+ }
71008+
71009+ spin_unlock_irqrestore(&slob_lock, flags);
71010+report:
71011+ pax_report_usercopy(ptr, n, to, type);
71012+#endif
71013+
71014+}
71015+EXPORT_SYMBOL(check_object_size);
71016+
71017 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
71018 size_t ksize(const void *block)
71019 {
71020@@ -552,10 +645,10 @@ size_t ksize(const void *block)
71021 sp = slob_page(block);
71022 if (is_slob_page(sp)) {
71023 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
71024- unsigned int *m = (unsigned int *)(block - align);
71025- return SLOB_UNITS(*m) * SLOB_UNIT;
71026+ slob_t *m = (slob_t *)(block - align);
71027+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
71028 } else
71029- return sp->page.private;
71030+ return sp->size;
71031 }
71032 EXPORT_SYMBOL(ksize);
71033
71034@@ -571,8 +664,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
71035 {
71036 struct kmem_cache *c;
71037
71038+#ifdef CONFIG_PAX_USERCOPY
71039+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
71040+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
71041+#else
71042 c = slob_alloc(sizeof(struct kmem_cache),
71043 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
71044+#endif
71045
71046 if (c) {
71047 c->name = name;
71048@@ -614,17 +712,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
71049
71050 lockdep_trace_alloc(flags);
71051
71052+#ifdef CONFIG_PAX_USERCOPY
71053+ b = __kmalloc_node_align(c->size, flags, node, c->align);
71054+#else
71055 if (c->size < PAGE_SIZE) {
71056 b = slob_alloc(c->size, flags, c->align, node);
71057 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
71058 SLOB_UNITS(c->size) * SLOB_UNIT,
71059 flags, node);
71060 } else {
71061+ struct slob_page *sp;
71062+
71063 b = slob_new_pages(flags, get_order(c->size), node);
71064+ sp = slob_page(b);
71065+ sp->size = c->size;
71066 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
71067 PAGE_SIZE << get_order(c->size),
71068 flags, node);
71069 }
71070+#endif
71071
71072 if (c->ctor)
71073 c->ctor(b);
71074@@ -636,10 +742,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
71075
71076 static void __kmem_cache_free(void *b, int size)
71077 {
71078- if (size < PAGE_SIZE)
71079+ struct slob_page *sp = slob_page(b);
71080+
71081+ if (is_slob_page(sp))
71082 slob_free(b, size);
71083- else
71084+ else {
71085+ clear_slob_page(sp);
71086+ free_slob_page(sp);
71087+ sp->size = 0;
71088 slob_free_pages(b, get_order(size));
71089+ }
71090 }
71091
71092 static void kmem_rcu_free(struct rcu_head *head)
71093@@ -652,17 +764,31 @@ static void kmem_rcu_free(struct rcu_head *head)
71094
71095 void kmem_cache_free(struct kmem_cache *c, void *b)
71096 {
71097+ int size = c->size;
71098+
71099+#ifdef CONFIG_PAX_USERCOPY
71100+ if (size + c->align < PAGE_SIZE) {
71101+ size += c->align;
71102+ b -= c->align;
71103+ }
71104+#endif
71105+
71106 kmemleak_free_recursive(b, c->flags);
71107 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
71108 struct slob_rcu *slob_rcu;
71109- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
71110- slob_rcu->size = c->size;
71111+ slob_rcu = b + (size - sizeof(struct slob_rcu));
71112+ slob_rcu->size = size;
71113 call_rcu(&slob_rcu->head, kmem_rcu_free);
71114 } else {
71115- __kmem_cache_free(b, c->size);
71116+ __kmem_cache_free(b, size);
71117 }
71118
71119+#ifdef CONFIG_PAX_USERCOPY
71120+ trace_kfree(_RET_IP_, b);
71121+#else
71122 trace_kmem_cache_free(_RET_IP_, b);
71123+#endif
71124+
71125 }
71126 EXPORT_SYMBOL(kmem_cache_free);
71127
71128diff --git a/mm/slub.c b/mm/slub.c
71129index 1a919f0..1739c9b 100644
71130--- a/mm/slub.c
71131+++ b/mm/slub.c
71132@@ -208,7 +208,7 @@ struct track {
71133
71134 enum track_item { TRACK_ALLOC, TRACK_FREE };
71135
71136-#ifdef CONFIG_SYSFS
71137+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71138 static int sysfs_slab_add(struct kmem_cache *);
71139 static int sysfs_slab_alias(struct kmem_cache *, const char *);
71140 static void sysfs_slab_remove(struct kmem_cache *);
71141@@ -530,7 +530,7 @@ static void print_track(const char *s, struct track *t)
71142 if (!t->addr)
71143 return;
71144
71145- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
71146+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
71147 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
71148 #ifdef CONFIG_STACKTRACE
71149 {
71150@@ -2559,6 +2559,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
71151
71152 page = virt_to_head_page(x);
71153
71154+ BUG_ON(!PageSlab(page));
71155+
71156 slab_free(s, page, x, _RET_IP_);
71157
71158 trace_kmem_cache_free(_RET_IP_, x);
71159@@ -2592,7 +2594,7 @@ static int slub_min_objects;
71160 * Merge control. If this is set then no merging of slab caches will occur.
71161 * (Could be removed. This was introduced to pacify the merge skeptics.)
71162 */
71163-static int slub_nomerge;
71164+static int slub_nomerge = 1;
71165
71166 /*
71167 * Calculate the order of allocation given an slab object size.
71168@@ -3042,7 +3044,7 @@ static int kmem_cache_open(struct kmem_cache *s,
71169 else
71170 s->cpu_partial = 30;
71171
71172- s->refcount = 1;
71173+ atomic_set(&s->refcount, 1);
71174 #ifdef CONFIG_NUMA
71175 s->remote_node_defrag_ratio = 1000;
71176 #endif
71177@@ -3146,8 +3148,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
71178 void kmem_cache_destroy(struct kmem_cache *s)
71179 {
71180 down_write(&slub_lock);
71181- s->refcount--;
71182- if (!s->refcount) {
71183+ if (atomic_dec_and_test(&s->refcount)) {
71184 list_del(&s->list);
71185 up_write(&slub_lock);
71186 if (kmem_cache_close(s)) {
71187@@ -3358,6 +3359,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
71188 EXPORT_SYMBOL(__kmalloc_node);
71189 #endif
71190
71191+void check_object_size(const void *ptr, unsigned long n, bool to)
71192+{
71193+
71194+#ifdef CONFIG_PAX_USERCOPY
71195+ struct page *page;
71196+ struct kmem_cache *s = NULL;
71197+ unsigned long offset;
71198+ const char *type;
71199+
71200+ if (!n)
71201+ return;
71202+
71203+ type = "<null>";
71204+ if (ZERO_OR_NULL_PTR(ptr))
71205+ goto report;
71206+
71207+ if (!virt_addr_valid(ptr))
71208+ return;
71209+
71210+ page = virt_to_head_page(ptr);
71211+
71212+ type = "<process stack>";
71213+ if (!PageSlab(page)) {
71214+ if (object_is_on_stack(ptr, n) == -1)
71215+ goto report;
71216+ return;
71217+ }
71218+
71219+ s = page->slab;
71220+ type = s->name;
71221+ if (!(s->flags & SLAB_USERCOPY))
71222+ goto report;
71223+
71224+ offset = (ptr - page_address(page)) % s->size;
71225+ if (offset <= s->objsize && n <= s->objsize - offset)
71226+ return;
71227+
71228+report:
71229+ pax_report_usercopy(ptr, n, to, type);
71230+#endif
71231+
71232+}
71233+EXPORT_SYMBOL(check_object_size);
71234+
71235 size_t ksize(const void *object)
71236 {
71237 struct page *page;
71238@@ -3632,7 +3677,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
71239 int node;
71240
71241 list_add(&s->list, &slab_caches);
71242- s->refcount = -1;
71243+ atomic_set(&s->refcount, -1);
71244
71245 for_each_node_state(node, N_NORMAL_MEMORY) {
71246 struct kmem_cache_node *n = get_node(s, node);
71247@@ -3749,17 +3794,17 @@ void __init kmem_cache_init(void)
71248
71249 /* Caches that are not of the two-to-the-power-of size */
71250 if (KMALLOC_MIN_SIZE <= 32) {
71251- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
71252+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
71253 caches++;
71254 }
71255
71256 if (KMALLOC_MIN_SIZE <= 64) {
71257- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
71258+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
71259 caches++;
71260 }
71261
71262 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
71263- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
71264+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
71265 caches++;
71266 }
71267
71268@@ -3827,7 +3872,7 @@ static int slab_unmergeable(struct kmem_cache *s)
71269 /*
71270 * We may have set a slab to be unmergeable during bootstrap.
71271 */
71272- if (s->refcount < 0)
71273+ if (atomic_read(&s->refcount) < 0)
71274 return 1;
71275
71276 return 0;
71277@@ -3886,7 +3931,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
71278 down_write(&slub_lock);
71279 s = find_mergeable(size, align, flags, name, ctor);
71280 if (s) {
71281- s->refcount++;
71282+ atomic_inc(&s->refcount);
71283 /*
71284 * Adjust the object sizes so that we clear
71285 * the complete object on kzalloc.
71286@@ -3895,7 +3940,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
71287 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
71288
71289 if (sysfs_slab_alias(s, name)) {
71290- s->refcount--;
71291+ atomic_dec(&s->refcount);
71292 goto err;
71293 }
71294 up_write(&slub_lock);
71295@@ -4023,7 +4068,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
71296 }
71297 #endif
71298
71299-#ifdef CONFIG_SYSFS
71300+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71301 static int count_inuse(struct page *page)
71302 {
71303 return page->inuse;
71304@@ -4410,12 +4455,12 @@ static void resiliency_test(void)
71305 validate_slab_cache(kmalloc_caches[9]);
71306 }
71307 #else
71308-#ifdef CONFIG_SYSFS
71309+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71310 static void resiliency_test(void) {};
71311 #endif
71312 #endif
71313
71314-#ifdef CONFIG_SYSFS
71315+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71316 enum slab_stat_type {
71317 SL_ALL, /* All slabs */
71318 SL_PARTIAL, /* Only partially allocated slabs */
71319@@ -4656,7 +4701,7 @@ SLAB_ATTR_RO(ctor);
71320
71321 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
71322 {
71323- return sprintf(buf, "%d\n", s->refcount - 1);
71324+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
71325 }
71326 SLAB_ATTR_RO(aliases);
71327
71328@@ -5223,6 +5268,7 @@ static char *create_unique_id(struct kmem_cache *s)
71329 return name;
71330 }
71331
71332+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71333 static int sysfs_slab_add(struct kmem_cache *s)
71334 {
71335 int err;
71336@@ -5285,6 +5331,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
71337 kobject_del(&s->kobj);
71338 kobject_put(&s->kobj);
71339 }
71340+#endif
71341
71342 /*
71343 * Need to buffer aliases during bootup until sysfs becomes
71344@@ -5298,6 +5345,7 @@ struct saved_alias {
71345
71346 static struct saved_alias *alias_list;
71347
71348+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
71349 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
71350 {
71351 struct saved_alias *al;
71352@@ -5320,6 +5368,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
71353 alias_list = al;
71354 return 0;
71355 }
71356+#endif
71357
71358 static int __init slab_sysfs_init(void)
71359 {
71360diff --git a/mm/swap.c b/mm/swap.c
71361index a91caf7..b887e735 100644
71362--- a/mm/swap.c
71363+++ b/mm/swap.c
71364@@ -31,6 +31,7 @@
71365 #include <linux/backing-dev.h>
71366 #include <linux/memcontrol.h>
71367 #include <linux/gfp.h>
71368+#include <linux/hugetlb.h>
71369
71370 #include "internal.h"
71371
71372@@ -71,6 +72,8 @@ static void __put_compound_page(struct page *page)
71373
71374 __page_cache_release(page);
71375 dtor = get_compound_page_dtor(page);
71376+ if (!PageHuge(page))
71377+ BUG_ON(dtor != free_compound_page);
71378 (*dtor)(page);
71379 }
71380
71381diff --git a/mm/swapfile.c b/mm/swapfile.c
71382index b1cd120..aaae885 100644
71383--- a/mm/swapfile.c
71384+++ b/mm/swapfile.c
71385@@ -61,7 +61,7 @@ static DEFINE_MUTEX(swapon_mutex);
71386
71387 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
71388 /* Activity counter to indicate that a swapon or swapoff has occurred */
71389-static atomic_t proc_poll_event = ATOMIC_INIT(0);
71390+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
71391
71392 static inline unsigned char swap_count(unsigned char ent)
71393 {
71394@@ -1670,7 +1670,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
71395 }
71396 filp_close(swap_file, NULL);
71397 err = 0;
71398- atomic_inc(&proc_poll_event);
71399+ atomic_inc_unchecked(&proc_poll_event);
71400 wake_up_interruptible(&proc_poll_wait);
71401
71402 out_dput:
71403@@ -1686,8 +1686,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
71404
71405 poll_wait(file, &proc_poll_wait, wait);
71406
71407- if (seq->poll_event != atomic_read(&proc_poll_event)) {
71408- seq->poll_event = atomic_read(&proc_poll_event);
71409+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
71410+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
71411 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
71412 }
71413
71414@@ -1785,7 +1785,7 @@ static int swaps_open(struct inode *inode, struct file *file)
71415 return ret;
71416
71417 seq = file->private_data;
71418- seq->poll_event = atomic_read(&proc_poll_event);
71419+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
71420 return 0;
71421 }
71422
71423@@ -2123,7 +2123,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
71424 (p->flags & SWP_DISCARDABLE) ? "D" : "");
71425
71426 mutex_unlock(&swapon_mutex);
71427- atomic_inc(&proc_poll_event);
71428+ atomic_inc_unchecked(&proc_poll_event);
71429 wake_up_interruptible(&proc_poll_wait);
71430
71431 if (S_ISREG(inode->i_mode))
71432diff --git a/mm/util.c b/mm/util.c
71433index 136ac4f..5117eef 100644
71434--- a/mm/util.c
71435+++ b/mm/util.c
71436@@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
71437 * allocated buffer. Use this if you don't want to free the buffer immediately
71438 * like, for example, with RCU.
71439 */
71440+#undef __krealloc
71441 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
71442 {
71443 void *ret;
71444@@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
71445 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
71446 * %NULL pointer, the object pointed to is freed.
71447 */
71448+#undef krealloc
71449 void *krealloc(const void *p, size_t new_size, gfp_t flags)
71450 {
71451 void *ret;
71452@@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
71453 void arch_pick_mmap_layout(struct mm_struct *mm)
71454 {
71455 mm->mmap_base = TASK_UNMAPPED_BASE;
71456+
71457+#ifdef CONFIG_PAX_RANDMMAP
71458+ if (mm->pax_flags & MF_PAX_RANDMMAP)
71459+ mm->mmap_base += mm->delta_mmap;
71460+#endif
71461+
71462 mm->get_unmapped_area = arch_get_unmapped_area;
71463 mm->unmap_area = arch_unmap_area;
71464 }
71465diff --git a/mm/vmalloc.c b/mm/vmalloc.c
71466index 27be2f0..0aef2c2 100644
71467--- a/mm/vmalloc.c
71468+++ b/mm/vmalloc.c
71469@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
71470
71471 pte = pte_offset_kernel(pmd, addr);
71472 do {
71473- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
71474- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
71475+
71476+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71477+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
71478+ BUG_ON(!pte_exec(*pte));
71479+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
71480+ continue;
71481+ }
71482+#endif
71483+
71484+ {
71485+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
71486+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
71487+ }
71488 } while (pte++, addr += PAGE_SIZE, addr != end);
71489 }
71490
71491@@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
71492 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
71493 {
71494 pte_t *pte;
71495+ int ret = -ENOMEM;
71496
71497 /*
71498 * nr is a running index into the array which helps higher level
71499@@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
71500 pte = pte_alloc_kernel(pmd, addr);
71501 if (!pte)
71502 return -ENOMEM;
71503+
71504+ pax_open_kernel();
71505 do {
71506 struct page *page = pages[*nr];
71507
71508- if (WARN_ON(!pte_none(*pte)))
71509- return -EBUSY;
71510- if (WARN_ON(!page))
71511- return -ENOMEM;
71512+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71513+ if (pgprot_val(prot) & _PAGE_NX)
71514+#endif
71515+
71516+ if (WARN_ON(!pte_none(*pte))) {
71517+ ret = -EBUSY;
71518+ goto out;
71519+ }
71520+ if (WARN_ON(!page)) {
71521+ ret = -ENOMEM;
71522+ goto out;
71523+ }
71524 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
71525 (*nr)++;
71526 } while (pte++, addr += PAGE_SIZE, addr != end);
71527- return 0;
71528+ ret = 0;
71529+out:
71530+ pax_close_kernel();
71531+ return ret;
71532 }
71533
71534 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
71535@@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void *x)
71536 * and fall back on vmalloc() if that fails. Others
71537 * just put it in the vmalloc space.
71538 */
71539-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
71540+#ifdef CONFIG_MODULES
71541+#ifdef MODULES_VADDR
71542 unsigned long addr = (unsigned long)x;
71543 if (addr >= MODULES_VADDR && addr < MODULES_END)
71544 return 1;
71545 #endif
71546+
71547+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71548+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
71549+ return 1;
71550+#endif
71551+
71552+#endif
71553+
71554 return is_vmalloc_addr(x);
71555 }
71556
71557@@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
71558
71559 if (!pgd_none(*pgd)) {
71560 pud_t *pud = pud_offset(pgd, addr);
71561+#ifdef CONFIG_X86
71562+ if (!pud_large(*pud))
71563+#endif
71564 if (!pud_none(*pud)) {
71565 pmd_t *pmd = pmd_offset(pud, addr);
71566+#ifdef CONFIG_X86
71567+ if (!pmd_large(*pmd))
71568+#endif
71569 if (!pmd_none(*pmd)) {
71570 pte_t *ptep, pte;
71571
71572@@ -1294,6 +1334,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
71573 struct vm_struct *area;
71574
71575 BUG_ON(in_interrupt());
71576+
71577+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71578+ if (flags & VM_KERNEXEC) {
71579+ if (start != VMALLOC_START || end != VMALLOC_END)
71580+ return NULL;
71581+ start = (unsigned long)MODULES_EXEC_VADDR;
71582+ end = (unsigned long)MODULES_EXEC_END;
71583+ }
71584+#endif
71585+
71586 if (flags & VM_IOREMAP) {
71587 int bit = fls(size);
71588
71589@@ -1526,6 +1576,11 @@ void *vmap(struct page **pages, unsigned int count,
71590 if (count > totalram_pages)
71591 return NULL;
71592
71593+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71594+ if (!(pgprot_val(prot) & _PAGE_NX))
71595+ flags |= VM_KERNEXEC;
71596+#endif
71597+
71598 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
71599 __builtin_return_address(0));
71600 if (!area)
71601@@ -1627,6 +1682,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
71602 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
71603 goto fail;
71604
71605+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71606+ if (!(pgprot_val(prot) & _PAGE_NX))
71607+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
71608+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
71609+ else
71610+#endif
71611+
71612 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
71613 start, end, node, gfp_mask, caller);
71614 if (!area)
71615@@ -1679,6 +1741,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
71616 gfp_mask, prot, node, caller);
71617 }
71618
71619+#undef __vmalloc
71620 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
71621 {
71622 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
71623@@ -1702,6 +1765,7 @@ static inline void *__vmalloc_node_flags(unsigned long size,
71624 * For tight control over page level allocator and protection flags
71625 * use __vmalloc() instead.
71626 */
71627+#undef vmalloc
71628 void *vmalloc(unsigned long size)
71629 {
71630 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
71631@@ -1718,6 +1782,7 @@ EXPORT_SYMBOL(vmalloc);
71632 * For tight control over page level allocator and protection flags
71633 * use __vmalloc() instead.
71634 */
71635+#undef vzalloc
71636 void *vzalloc(unsigned long size)
71637 {
71638 return __vmalloc_node_flags(size, -1,
71639@@ -1732,6 +1797,7 @@ EXPORT_SYMBOL(vzalloc);
71640 * The resulting memory area is zeroed so it can be mapped to userspace
71641 * without leaking data.
71642 */
71643+#undef vmalloc_user
71644 void *vmalloc_user(unsigned long size)
71645 {
71646 struct vm_struct *area;
71647@@ -1759,6 +1825,7 @@ EXPORT_SYMBOL(vmalloc_user);
71648 * For tight control over page level allocator and protection flags
71649 * use __vmalloc() instead.
71650 */
71651+#undef vmalloc_node
71652 void *vmalloc_node(unsigned long size, int node)
71653 {
71654 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
71655@@ -1778,6 +1845,7 @@ EXPORT_SYMBOL(vmalloc_node);
71656 * For tight control over page level allocator and protection flags
71657 * use __vmalloc_node() instead.
71658 */
71659+#undef vzalloc_node
71660 void *vzalloc_node(unsigned long size, int node)
71661 {
71662 return __vmalloc_node_flags(size, node,
71663@@ -1800,10 +1868,10 @@ EXPORT_SYMBOL(vzalloc_node);
71664 * For tight control over page level allocator and protection flags
71665 * use __vmalloc() instead.
71666 */
71667-
71668+#undef vmalloc_exec
71669 void *vmalloc_exec(unsigned long size)
71670 {
71671- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
71672+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
71673 -1, __builtin_return_address(0));
71674 }
71675
71676@@ -1822,6 +1890,7 @@ void *vmalloc_exec(unsigned long size)
71677 * Allocate enough 32bit PA addressable pages to cover @size from the
71678 * page level allocator and map them into contiguous kernel virtual space.
71679 */
71680+#undef vmalloc_32
71681 void *vmalloc_32(unsigned long size)
71682 {
71683 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
71684@@ -1836,6 +1905,7 @@ EXPORT_SYMBOL(vmalloc_32);
71685 * The resulting memory area is 32bit addressable and zeroed so it can be
71686 * mapped to userspace without leaking data.
71687 */
71688+#undef vmalloc_32_user
71689 void *vmalloc_32_user(unsigned long size)
71690 {
71691 struct vm_struct *area;
71692@@ -2098,6 +2168,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
71693 unsigned long uaddr = vma->vm_start;
71694 unsigned long usize = vma->vm_end - vma->vm_start;
71695
71696+ BUG_ON(vma->vm_mirror);
71697+
71698 if ((PAGE_SIZE-1) & (unsigned long)addr)
71699 return -EINVAL;
71700
71701diff --git a/mm/vmstat.c b/mm/vmstat.c
71702index 8fd603b..cf0d930 100644
71703--- a/mm/vmstat.c
71704+++ b/mm/vmstat.c
71705@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
71706 *
71707 * vm_stat contains the global counters
71708 */
71709-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
71710+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
71711 EXPORT_SYMBOL(vm_stat);
71712
71713 #ifdef CONFIG_SMP
71714@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
71715 v = p->vm_stat_diff[i];
71716 p->vm_stat_diff[i] = 0;
71717 local_irq_restore(flags);
71718- atomic_long_add(v, &zone->vm_stat[i]);
71719+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
71720 global_diff[i] += v;
71721 #ifdef CONFIG_NUMA
71722 /* 3 seconds idle till flush */
71723@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
71724
71725 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
71726 if (global_diff[i])
71727- atomic_long_add(global_diff[i], &vm_stat[i]);
71728+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
71729 }
71730
71731 #endif
71732@@ -1208,10 +1208,20 @@ static int __init setup_vmstat(void)
71733 start_cpu_timer(cpu);
71734 #endif
71735 #ifdef CONFIG_PROC_FS
71736- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
71737- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
71738- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
71739- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
71740+ {
71741+ mode_t gr_mode = S_IRUGO;
71742+#ifdef CONFIG_GRKERNSEC_PROC_ADD
71743+ gr_mode = S_IRUSR;
71744+#endif
71745+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
71746+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
71747+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
71748+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
71749+#else
71750+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
71751+#endif
71752+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
71753+ }
71754 #endif
71755 return 0;
71756 }
71757diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
71758index 5471628..cef8398 100644
71759--- a/net/8021q/vlan.c
71760+++ b/net/8021q/vlan.c
71761@@ -588,8 +588,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
71762 err = -EPERM;
71763 if (!capable(CAP_NET_ADMIN))
71764 break;
71765- if ((args.u.name_type >= 0) &&
71766- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
71767+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
71768 struct vlan_net *vn;
71769
71770 vn = net_generic(net, vlan_net_id);
71771diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
71772index fdfdb57..38d368c 100644
71773--- a/net/9p/trans_fd.c
71774+++ b/net/9p/trans_fd.c
71775@@ -423,7 +423,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
71776 oldfs = get_fs();
71777 set_fs(get_ds());
71778 /* The cast to a user pointer is valid due to the set_fs() */
71779- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
71780+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
71781 set_fs(oldfs);
71782
71783 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
71784diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
71785index f41f026..fe76ea8 100644
71786--- a/net/atm/atm_misc.c
71787+++ b/net/atm/atm_misc.c
71788@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
71789 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
71790 return 1;
71791 atm_return(vcc, truesize);
71792- atomic_inc(&vcc->stats->rx_drop);
71793+ atomic_inc_unchecked(&vcc->stats->rx_drop);
71794 return 0;
71795 }
71796 EXPORT_SYMBOL(atm_charge);
71797@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
71798 }
71799 }
71800 atm_return(vcc, guess);
71801- atomic_inc(&vcc->stats->rx_drop);
71802+ atomic_inc_unchecked(&vcc->stats->rx_drop);
71803 return NULL;
71804 }
71805 EXPORT_SYMBOL(atm_alloc_charge);
71806@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
71807
71808 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
71809 {
71810-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
71811+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
71812 __SONET_ITEMS
71813 #undef __HANDLE_ITEM
71814 }
71815@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
71816
71817 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
71818 {
71819-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
71820+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
71821 __SONET_ITEMS
71822 #undef __HANDLE_ITEM
71823 }
71824diff --git a/net/atm/lec.h b/net/atm/lec.h
71825index dfc0719..47c5322 100644
71826--- a/net/atm/lec.h
71827+++ b/net/atm/lec.h
71828@@ -48,7 +48,7 @@ struct lane2_ops {
71829 const u8 *tlvs, u32 sizeoftlvs);
71830 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
71831 const u8 *tlvs, u32 sizeoftlvs);
71832-};
71833+} __no_const;
71834
71835 /*
71836 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
71837diff --git a/net/atm/mpc.h b/net/atm/mpc.h
71838index 0919a88..a23d54e 100644
71839--- a/net/atm/mpc.h
71840+++ b/net/atm/mpc.h
71841@@ -33,7 +33,7 @@ struct mpoa_client {
71842 struct mpc_parameters parameters; /* parameters for this client */
71843
71844 const struct net_device_ops *old_ops;
71845- struct net_device_ops new_ops;
71846+ net_device_ops_no_const new_ops;
71847 };
71848
71849
71850diff --git a/net/atm/proc.c b/net/atm/proc.c
71851index 0d020de..011c7bb 100644
71852--- a/net/atm/proc.c
71853+++ b/net/atm/proc.c
71854@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
71855 const struct k_atm_aal_stats *stats)
71856 {
71857 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
71858- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
71859- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
71860- atomic_read(&stats->rx_drop));
71861+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
71862+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
71863+ atomic_read_unchecked(&stats->rx_drop));
71864 }
71865
71866 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
71867diff --git a/net/atm/resources.c b/net/atm/resources.c
71868index 23f45ce..c748f1a 100644
71869--- a/net/atm/resources.c
71870+++ b/net/atm/resources.c
71871@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
71872 static void copy_aal_stats(struct k_atm_aal_stats *from,
71873 struct atm_aal_stats *to)
71874 {
71875-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
71876+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
71877 __AAL_STAT_ITEMS
71878 #undef __HANDLE_ITEM
71879 }
71880@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
71881 static void subtract_aal_stats(struct k_atm_aal_stats *from,
71882 struct atm_aal_stats *to)
71883 {
71884-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
71885+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
71886 __AAL_STAT_ITEMS
71887 #undef __HANDLE_ITEM
71888 }
71889diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
71890index 3512e25..2b33401 100644
71891--- a/net/batman-adv/bat_iv_ogm.c
71892+++ b/net/batman-adv/bat_iv_ogm.c
71893@@ -541,7 +541,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
71894
71895 /* change sequence number to network order */
71896 batman_ogm_packet->seqno =
71897- htonl((uint32_t)atomic_read(&hard_iface->seqno));
71898+ htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
71899
71900 batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
71901 batman_ogm_packet->tt_crc = htons((uint16_t)
71902@@ -561,7 +561,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
71903 else
71904 batman_ogm_packet->gw_flags = NO_FLAGS;
71905
71906- atomic_inc(&hard_iface->seqno);
71907+ atomic_inc_unchecked(&hard_iface->seqno);
71908
71909 slide_own_bcast_window(hard_iface);
71910 bat_ogm_queue_add(bat_priv, hard_iface->packet_buff,
71911@@ -922,7 +922,7 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
71912 return;
71913
71914 /* could be changed by schedule_own_packet() */
71915- if_incoming_seqno = atomic_read(&if_incoming->seqno);
71916+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
71917
71918 has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
71919
71920diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
71921index 7704df4..beb4e16 100644
71922--- a/net/batman-adv/hard-interface.c
71923+++ b/net/batman-adv/hard-interface.c
71924@@ -326,8 +326,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
71925 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
71926 dev_add_pack(&hard_iface->batman_adv_ptype);
71927
71928- atomic_set(&hard_iface->seqno, 1);
71929- atomic_set(&hard_iface->frag_seqno, 1);
71930+ atomic_set_unchecked(&hard_iface->seqno, 1);
71931+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
71932 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
71933 hard_iface->net_dev->name);
71934
71935diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
71936index f9cc957..efd9dae 100644
71937--- a/net/batman-adv/soft-interface.c
71938+++ b/net/batman-adv/soft-interface.c
71939@@ -634,7 +634,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
71940
71941 /* set broadcast sequence number */
71942 bcast_packet->seqno =
71943- htonl(atomic_inc_return(&bat_priv->bcast_seqno));
71944+ htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
71945
71946 add_bcast_packet_to_list(bat_priv, skb, 1);
71947
71948@@ -828,7 +828,7 @@ struct net_device *softif_create(const char *name)
71949 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
71950
71951 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
71952- atomic_set(&bat_priv->bcast_seqno, 1);
71953+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
71954 atomic_set(&bat_priv->ttvn, 0);
71955 atomic_set(&bat_priv->tt_local_changes, 0);
71956 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
71957diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
71958index ab8d0fe..ceba3fd 100644
71959--- a/net/batman-adv/types.h
71960+++ b/net/batman-adv/types.h
71961@@ -38,8 +38,8 @@ struct hard_iface {
71962 int16_t if_num;
71963 char if_status;
71964 struct net_device *net_dev;
71965- atomic_t seqno;
71966- atomic_t frag_seqno;
71967+ atomic_unchecked_t seqno;
71968+ atomic_unchecked_t frag_seqno;
71969 unsigned char *packet_buff;
71970 int packet_len;
71971 struct kobject *hardif_obj;
71972@@ -154,7 +154,7 @@ struct bat_priv {
71973 atomic_t orig_interval; /* uint */
71974 atomic_t hop_penalty; /* uint */
71975 atomic_t log_level; /* uint */
71976- atomic_t bcast_seqno;
71977+ atomic_unchecked_t bcast_seqno;
71978 atomic_t bcast_queue_left;
71979 atomic_t batman_queue_left;
71980 atomic_t ttvn; /* translation table version number */
71981diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
71982index 07d1c1d..7e9bea9 100644
71983--- a/net/batman-adv/unicast.c
71984+++ b/net/batman-adv/unicast.c
71985@@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
71986 frag1->flags = UNI_FRAG_HEAD | large_tail;
71987 frag2->flags = large_tail;
71988
71989- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
71990+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
71991 frag1->seqno = htons(seqno - 1);
71992 frag2->seqno = htons(seqno);
71993
71994diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
71995index c1c597e..05ebb40 100644
71996--- a/net/bluetooth/hci_conn.c
71997+++ b/net/bluetooth/hci_conn.c
71998@@ -234,7 +234,7 @@ void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
71999 memset(&cp, 0, sizeof(cp));
72000
72001 cp.handle = cpu_to_le16(conn->handle);
72002- memcpy(cp.ltk, ltk, sizeof(ltk));
72003+ memcpy(cp.ltk, ltk, sizeof(cp.ltk));
72004
72005 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
72006 }
72007diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
72008index 17b5b1c..826d872 100644
72009--- a/net/bluetooth/l2cap_core.c
72010+++ b/net/bluetooth/l2cap_core.c
72011@@ -2176,8 +2176,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
72012 break;
72013
72014 case L2CAP_CONF_RFC:
72015- if (olen == sizeof(rfc))
72016- memcpy(&rfc, (void *)val, olen);
72017+ if (olen != sizeof(rfc))
72018+ break;
72019+
72020+ memcpy(&rfc, (void *)val, olen);
72021
72022 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
72023 rfc.mode != chan->mode)
72024@@ -2265,8 +2267,10 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
72025
72026 switch (type) {
72027 case L2CAP_CONF_RFC:
72028- if (olen == sizeof(rfc))
72029- memcpy(&rfc, (void *)val, olen);
72030+ if (olen != sizeof(rfc))
72031+ break;
72032+
72033+ memcpy(&rfc, (void *)val, olen);
72034 goto done;
72035 }
72036 }
72037diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
72038index a5f4e57..910ee6d 100644
72039--- a/net/bridge/br_multicast.c
72040+++ b/net/bridge/br_multicast.c
72041@@ -1485,7 +1485,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
72042 nexthdr = ip6h->nexthdr;
72043 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
72044
72045- if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
72046+ if (nexthdr != IPPROTO_ICMPV6)
72047 return 0;
72048
72049 /* Okay, we found ICMPv6 header */
72050diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
72051index 5864cc4..121f3a3 100644
72052--- a/net/bridge/netfilter/ebtables.c
72053+++ b/net/bridge/netfilter/ebtables.c
72054@@ -1513,7 +1513,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
72055 tmp.valid_hooks = t->table->valid_hooks;
72056 }
72057 mutex_unlock(&ebt_mutex);
72058- if (copy_to_user(user, &tmp, *len) != 0){
72059+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
72060 BUGPRINT("c2u Didn't work\n");
72061 ret = -EFAULT;
72062 break;
72063diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
72064index a986280..13444a1 100644
72065--- a/net/caif/caif_socket.c
72066+++ b/net/caif/caif_socket.c
72067@@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
72068 #ifdef CONFIG_DEBUG_FS
72069 struct debug_fs_counter {
72070 atomic_t caif_nr_socks;
72071- atomic_t caif_sock_create;
72072- atomic_t num_connect_req;
72073- atomic_t num_connect_resp;
72074- atomic_t num_connect_fail_resp;
72075- atomic_t num_disconnect;
72076- atomic_t num_remote_shutdown_ind;
72077- atomic_t num_tx_flow_off_ind;
72078- atomic_t num_tx_flow_on_ind;
72079- atomic_t num_rx_flow_off;
72080- atomic_t num_rx_flow_on;
72081+ atomic_unchecked_t caif_sock_create;
72082+ atomic_unchecked_t num_connect_req;
72083+ atomic_unchecked_t num_connect_resp;
72084+ atomic_unchecked_t num_connect_fail_resp;
72085+ atomic_unchecked_t num_disconnect;
72086+ atomic_unchecked_t num_remote_shutdown_ind;
72087+ atomic_unchecked_t num_tx_flow_off_ind;
72088+ atomic_unchecked_t num_tx_flow_on_ind;
72089+ atomic_unchecked_t num_rx_flow_off;
72090+ atomic_unchecked_t num_rx_flow_on;
72091 };
72092 static struct debug_fs_counter cnt;
72093 #define dbfs_atomic_inc(v) atomic_inc_return(v)
72094+#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
72095 #define dbfs_atomic_dec(v) atomic_dec_return(v)
72096 #else
72097 #define dbfs_atomic_inc(v) 0
72098@@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72099 atomic_read(&cf_sk->sk.sk_rmem_alloc),
72100 sk_rcvbuf_lowwater(cf_sk));
72101 set_rx_flow_off(cf_sk);
72102- dbfs_atomic_inc(&cnt.num_rx_flow_off);
72103+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
72104 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
72105 }
72106
72107@@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72108 set_rx_flow_off(cf_sk);
72109 if (net_ratelimit())
72110 pr_debug("sending flow OFF due to rmem_schedule\n");
72111- dbfs_atomic_inc(&cnt.num_rx_flow_off);
72112+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
72113 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
72114 }
72115 skb->dev = NULL;
72116@@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer *layr,
72117 switch (flow) {
72118 case CAIF_CTRLCMD_FLOW_ON_IND:
72119 /* OK from modem to start sending again */
72120- dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
72121+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
72122 set_tx_flow_on(cf_sk);
72123 cf_sk->sk.sk_state_change(&cf_sk->sk);
72124 break;
72125
72126 case CAIF_CTRLCMD_FLOW_OFF_IND:
72127 /* Modem asks us to shut up */
72128- dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
72129+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
72130 set_tx_flow_off(cf_sk);
72131 cf_sk->sk.sk_state_change(&cf_sk->sk);
72132 break;
72133@@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
72134 /* We're now connected */
72135 caif_client_register_refcnt(&cf_sk->layer,
72136 cfsk_hold, cfsk_put);
72137- dbfs_atomic_inc(&cnt.num_connect_resp);
72138+ dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
72139 cf_sk->sk.sk_state = CAIF_CONNECTED;
72140 set_tx_flow_on(cf_sk);
72141 cf_sk->sk.sk_state_change(&cf_sk->sk);
72142@@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
72143
72144 case CAIF_CTRLCMD_INIT_FAIL_RSP:
72145 /* Connect request failed */
72146- dbfs_atomic_inc(&cnt.num_connect_fail_resp);
72147+ dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
72148 cf_sk->sk.sk_err = ECONNREFUSED;
72149 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
72150 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
72151@@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
72152
72153 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
72154 /* Modem has closed this connection, or device is down. */
72155- dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
72156+ dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
72157 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
72158 cf_sk->sk.sk_err = ECONNRESET;
72159 set_rx_flow_on(cf_sk);
72160@@ -297,7 +298,7 @@ static void caif_check_flow_release(struct sock *sk)
72161 return;
72162
72163 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
72164- dbfs_atomic_inc(&cnt.num_rx_flow_on);
72165+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
72166 set_rx_flow_on(cf_sk);
72167 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
72168 }
72169@@ -854,7 +855,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
72170 /*ifindex = id of the interface.*/
72171 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
72172
72173- dbfs_atomic_inc(&cnt.num_connect_req);
72174+ dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
72175 cf_sk->layer.receive = caif_sktrecv_cb;
72176
72177 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
72178@@ -943,7 +944,7 @@ static int caif_release(struct socket *sock)
72179 spin_unlock_bh(&sk->sk_receive_queue.lock);
72180 sock->sk = NULL;
72181
72182- dbfs_atomic_inc(&cnt.num_disconnect);
72183+ dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
72184
72185 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
72186 if (cf_sk->debugfs_socket_dir != NULL)
72187@@ -1122,7 +1123,7 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
72188 cf_sk->conn_req.protocol = protocol;
72189 /* Increase the number of sockets created. */
72190 dbfs_atomic_inc(&cnt.caif_nr_socks);
72191- num = dbfs_atomic_inc(&cnt.caif_sock_create);
72192+ num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
72193 #ifdef CONFIG_DEBUG_FS
72194 if (!IS_ERR(debugfsdir)) {
72195
72196diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
72197index 5cf5222..6f704ad 100644
72198--- a/net/caif/cfctrl.c
72199+++ b/net/caif/cfctrl.c
72200@@ -9,6 +9,7 @@
72201 #include <linux/stddef.h>
72202 #include <linux/spinlock.h>
72203 #include <linux/slab.h>
72204+#include <linux/sched.h>
72205 #include <net/caif/caif_layer.h>
72206 #include <net/caif/cfpkt.h>
72207 #include <net/caif/cfctrl.h>
72208@@ -42,8 +43,8 @@ struct cflayer *cfctrl_create(void)
72209 memset(&dev_info, 0, sizeof(dev_info));
72210 dev_info.id = 0xff;
72211 cfsrvl_init(&this->serv, 0, &dev_info, false);
72212- atomic_set(&this->req_seq_no, 1);
72213- atomic_set(&this->rsp_seq_no, 1);
72214+ atomic_set_unchecked(&this->req_seq_no, 1);
72215+ atomic_set_unchecked(&this->rsp_seq_no, 1);
72216 this->serv.layer.receive = cfctrl_recv;
72217 sprintf(this->serv.layer.name, "ctrl");
72218 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
72219@@ -129,8 +130,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
72220 struct cfctrl_request_info *req)
72221 {
72222 spin_lock_bh(&ctrl->info_list_lock);
72223- atomic_inc(&ctrl->req_seq_no);
72224- req->sequence_no = atomic_read(&ctrl->req_seq_no);
72225+ atomic_inc_unchecked(&ctrl->req_seq_no);
72226+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
72227 list_add_tail(&req->list, &ctrl->list);
72228 spin_unlock_bh(&ctrl->info_list_lock);
72229 }
72230@@ -148,7 +149,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
72231 if (p != first)
72232 pr_warn("Requests are not received in order\n");
72233
72234- atomic_set(&ctrl->rsp_seq_no,
72235+ atomic_set_unchecked(&ctrl->rsp_seq_no,
72236 p->sequence_no);
72237 list_del(&p->list);
72238 goto out;
72239diff --git a/net/can/gw.c b/net/can/gw.c
72240index 3d79b12..8de85fa 100644
72241--- a/net/can/gw.c
72242+++ b/net/can/gw.c
72243@@ -96,7 +96,7 @@ struct cf_mod {
72244 struct {
72245 void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
72246 void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
72247- } csumfunc;
72248+ } __no_const csumfunc;
72249 };
72250
72251
72252diff --git a/net/compat.c b/net/compat.c
72253index 6def90e..c6992fa 100644
72254--- a/net/compat.c
72255+++ b/net/compat.c
72256@@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
72257 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
72258 __get_user(kmsg->msg_flags, &umsg->msg_flags))
72259 return -EFAULT;
72260- kmsg->msg_name = compat_ptr(tmp1);
72261- kmsg->msg_iov = compat_ptr(tmp2);
72262- kmsg->msg_control = compat_ptr(tmp3);
72263+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
72264+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
72265+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
72266 return 0;
72267 }
72268
72269@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
72270
72271 if (kern_msg->msg_namelen) {
72272 if (mode == VERIFY_READ) {
72273- int err = move_addr_to_kernel(kern_msg->msg_name,
72274+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
72275 kern_msg->msg_namelen,
72276 kern_address);
72277 if (err < 0)
72278@@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
72279 kern_msg->msg_name = NULL;
72280
72281 tot_len = iov_from_user_compat_to_kern(kern_iov,
72282- (struct compat_iovec __user *)kern_msg->msg_iov,
72283+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
72284 kern_msg->msg_iovlen);
72285 if (tot_len >= 0)
72286 kern_msg->msg_iov = kern_iov;
72287@@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
72288
72289 #define CMSG_COMPAT_FIRSTHDR(msg) \
72290 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
72291- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
72292+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
72293 (struct compat_cmsghdr __user *)NULL)
72294
72295 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
72296 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
72297 (ucmlen) <= (unsigned long) \
72298 ((mhdr)->msg_controllen - \
72299- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
72300+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
72301
72302 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
72303 struct compat_cmsghdr __user *cmsg, int cmsg_len)
72304 {
72305 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
72306- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
72307+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
72308 msg->msg_controllen)
72309 return NULL;
72310 return (struct compat_cmsghdr __user *)ptr;
72311@@ -221,7 +221,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
72312 {
72313 struct compat_timeval ctv;
72314 struct compat_timespec cts[3];
72315- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
72316+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
72317 struct compat_cmsghdr cmhdr;
72318 int cmlen;
72319
72320@@ -273,7 +273,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
72321
72322 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
72323 {
72324- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
72325+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
72326 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
72327 int fdnum = scm->fp->count;
72328 struct file **fp = scm->fp->fp;
72329@@ -370,7 +370,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
72330 return -EFAULT;
72331 old_fs = get_fs();
72332 set_fs(KERNEL_DS);
72333- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
72334+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
72335 set_fs(old_fs);
72336
72337 return err;
72338@@ -431,7 +431,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
72339 len = sizeof(ktime);
72340 old_fs = get_fs();
72341 set_fs(KERNEL_DS);
72342- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
72343+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
72344 set_fs(old_fs);
72345
72346 if (!err) {
72347@@ -566,7 +566,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
72348 case MCAST_JOIN_GROUP:
72349 case MCAST_LEAVE_GROUP:
72350 {
72351- struct compat_group_req __user *gr32 = (void *)optval;
72352+ struct compat_group_req __user *gr32 = (void __user *)optval;
72353 struct group_req __user *kgr =
72354 compat_alloc_user_space(sizeof(struct group_req));
72355 u32 interface;
72356@@ -587,7 +587,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
72357 case MCAST_BLOCK_SOURCE:
72358 case MCAST_UNBLOCK_SOURCE:
72359 {
72360- struct compat_group_source_req __user *gsr32 = (void *)optval;
72361+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
72362 struct group_source_req __user *kgsr = compat_alloc_user_space(
72363 sizeof(struct group_source_req));
72364 u32 interface;
72365@@ -608,7 +608,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
72366 }
72367 case MCAST_MSFILTER:
72368 {
72369- struct compat_group_filter __user *gf32 = (void *)optval;
72370+ struct compat_group_filter __user *gf32 = (void __user *)optval;
72371 struct group_filter __user *kgf;
72372 u32 interface, fmode, numsrc;
72373
72374@@ -646,7 +646,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
72375 char __user *optval, int __user *optlen,
72376 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
72377 {
72378- struct compat_group_filter __user *gf32 = (void *)optval;
72379+ struct compat_group_filter __user *gf32 = (void __user *)optval;
72380 struct group_filter __user *kgf;
72381 int __user *koptlen;
72382 u32 interface, fmode, numsrc;
72383diff --git a/net/core/datagram.c b/net/core/datagram.c
72384index 68bbf9f..5ef0d12 100644
72385--- a/net/core/datagram.c
72386+++ b/net/core/datagram.c
72387@@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
72388 }
72389
72390 kfree_skb(skb);
72391- atomic_inc(&sk->sk_drops);
72392+ atomic_inc_unchecked(&sk->sk_drops);
72393 sk_mem_reclaim_partial(sk);
72394
72395 return err;
72396diff --git a/net/core/dev.c b/net/core/dev.c
72397index 5a13edf..a6f2bd2 100644
72398--- a/net/core/dev.c
72399+++ b/net/core/dev.c
72400@@ -1139,10 +1139,14 @@ void dev_load(struct net *net, const char *name)
72401 if (no_module && capable(CAP_NET_ADMIN))
72402 no_module = request_module("netdev-%s", name);
72403 if (no_module && capable(CAP_SYS_MODULE)) {
72404+#ifdef CONFIG_GRKERNSEC_MODHARDEN
72405+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
72406+#else
72407 if (!request_module("%s", name))
72408 pr_err("Loading kernel module for a network device "
72409 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
72410 "instead\n", name);
72411+#endif
72412 }
72413 }
72414 EXPORT_SYMBOL(dev_load);
72415@@ -1573,7 +1577,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
72416 {
72417 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
72418 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
72419- atomic_long_inc(&dev->rx_dropped);
72420+ atomic_long_inc_unchecked(&dev->rx_dropped);
72421 kfree_skb(skb);
72422 return NET_RX_DROP;
72423 }
72424@@ -1583,7 +1587,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
72425 nf_reset(skb);
72426
72427 if (unlikely(!is_skb_forwardable(dev, skb))) {
72428- atomic_long_inc(&dev->rx_dropped);
72429+ atomic_long_inc_unchecked(&dev->rx_dropped);
72430 kfree_skb(skb);
72431 return NET_RX_DROP;
72432 }
72433@@ -2036,7 +2040,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
72434
72435 struct dev_gso_cb {
72436 void (*destructor)(struct sk_buff *skb);
72437-};
72438+} __no_const;
72439
72440 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
72441
72442@@ -2970,7 +2974,7 @@ enqueue:
72443
72444 local_irq_restore(flags);
72445
72446- atomic_long_inc(&skb->dev->rx_dropped);
72447+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
72448 kfree_skb(skb);
72449 return NET_RX_DROP;
72450 }
72451@@ -3044,7 +3048,7 @@ int netif_rx_ni(struct sk_buff *skb)
72452 }
72453 EXPORT_SYMBOL(netif_rx_ni);
72454
72455-static void net_tx_action(struct softirq_action *h)
72456+static void net_tx_action(void)
72457 {
72458 struct softnet_data *sd = &__get_cpu_var(softnet_data);
72459
72460@@ -3333,7 +3337,7 @@ ncls:
72461 if (pt_prev) {
72462 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
72463 } else {
72464- atomic_long_inc(&skb->dev->rx_dropped);
72465+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
72466 kfree_skb(skb);
72467 /* Jamal, now you will not able to escape explaining
72468 * me how you were going to use this. :-)
72469@@ -3891,7 +3895,7 @@ void netif_napi_del(struct napi_struct *napi)
72470 }
72471 EXPORT_SYMBOL(netif_napi_del);
72472
72473-static void net_rx_action(struct softirq_action *h)
72474+static void net_rx_action(void)
72475 {
72476 struct softnet_data *sd = &__get_cpu_var(softnet_data);
72477 unsigned long time_limit = jiffies + 2;
72478@@ -5949,7 +5953,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
72479 } else {
72480 netdev_stats_to_stats64(storage, &dev->stats);
72481 }
72482- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
72483+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
72484 return storage;
72485 }
72486 EXPORT_SYMBOL(dev_get_stats);
72487diff --git a/net/core/flow.c b/net/core/flow.c
72488index e318c7e..168b1d0 100644
72489--- a/net/core/flow.c
72490+++ b/net/core/flow.c
72491@@ -61,7 +61,7 @@ struct flow_cache {
72492 struct timer_list rnd_timer;
72493 };
72494
72495-atomic_t flow_cache_genid = ATOMIC_INIT(0);
72496+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
72497 EXPORT_SYMBOL(flow_cache_genid);
72498 static struct flow_cache flow_cache_global;
72499 static struct kmem_cache *flow_cachep __read_mostly;
72500@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
72501
72502 static int flow_entry_valid(struct flow_cache_entry *fle)
72503 {
72504- if (atomic_read(&flow_cache_genid) != fle->genid)
72505+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
72506 return 0;
72507 if (fle->object && !fle->object->ops->check(fle->object))
72508 return 0;
72509@@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
72510 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
72511 fcp->hash_count++;
72512 }
72513- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
72514+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
72515 flo = fle->object;
72516 if (!flo)
72517 goto ret_object;
72518@@ -280,7 +280,7 @@ nocache:
72519 }
72520 flo = resolver(net, key, family, dir, flo, ctx);
72521 if (fle) {
72522- fle->genid = atomic_read(&flow_cache_genid);
72523+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
72524 if (!IS_ERR(flo))
72525 fle->object = flo;
72526 else
72527diff --git a/net/core/iovec.c b/net/core/iovec.c
72528index c40f27e..7f49254 100644
72529--- a/net/core/iovec.c
72530+++ b/net/core/iovec.c
72531@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
72532 if (m->msg_namelen) {
72533 if (mode == VERIFY_READ) {
72534 void __user *namep;
72535- namep = (void __user __force *) m->msg_name;
72536+ namep = (void __force_user *) m->msg_name;
72537 err = move_addr_to_kernel(namep, m->msg_namelen,
72538 address);
72539 if (err < 0)
72540@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
72541 }
72542
72543 size = m->msg_iovlen * sizeof(struct iovec);
72544- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
72545+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
72546 return -EFAULT;
72547
72548 m->msg_iov = iov;
72549diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
72550index 9083e82..1673203 100644
72551--- a/net/core/rtnetlink.c
72552+++ b/net/core/rtnetlink.c
72553@@ -57,7 +57,7 @@ struct rtnl_link {
72554 rtnl_doit_func doit;
72555 rtnl_dumpit_func dumpit;
72556 rtnl_calcit_func calcit;
72557-};
72558+} __no_const;
72559
72560 static DEFINE_MUTEX(rtnl_mutex);
72561 static u16 min_ifinfo_dump_size;
72562diff --git a/net/core/scm.c b/net/core/scm.c
72563index ff52ad0..aff1c0f 100644
72564--- a/net/core/scm.c
72565+++ b/net/core/scm.c
72566@@ -220,7 +220,7 @@ EXPORT_SYMBOL(__scm_send);
72567 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
72568 {
72569 struct cmsghdr __user *cm
72570- = (__force struct cmsghdr __user *)msg->msg_control;
72571+ = (struct cmsghdr __force_user *)msg->msg_control;
72572 struct cmsghdr cmhdr;
72573 int cmlen = CMSG_LEN(len);
72574 int err;
72575@@ -243,7 +243,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
72576 err = -EFAULT;
72577 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
72578 goto out;
72579- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
72580+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
72581 goto out;
72582 cmlen = CMSG_SPACE(len);
72583 if (msg->msg_controllen < cmlen)
72584@@ -259,7 +259,7 @@ EXPORT_SYMBOL(put_cmsg);
72585 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
72586 {
72587 struct cmsghdr __user *cm
72588- = (__force struct cmsghdr __user*)msg->msg_control;
72589+ = (struct cmsghdr __force_user *)msg->msg_control;
72590
72591 int fdmax = 0;
72592 int fdnum = scm->fp->count;
72593@@ -279,7 +279,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
72594 if (fdnum < fdmax)
72595 fdmax = fdnum;
72596
72597- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
72598+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
72599 i++, cmfptr++)
72600 {
72601 int new_fd;
72602diff --git a/net/core/sock.c b/net/core/sock.c
72603index b23f174..b9a0d26 100644
72604--- a/net/core/sock.c
72605+++ b/net/core/sock.c
72606@@ -289,7 +289,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72607 struct sk_buff_head *list = &sk->sk_receive_queue;
72608
72609 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
72610- atomic_inc(&sk->sk_drops);
72611+ atomic_inc_unchecked(&sk->sk_drops);
72612 trace_sock_rcvqueue_full(sk, skb);
72613 return -ENOMEM;
72614 }
72615@@ -299,7 +299,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72616 return err;
72617
72618 if (!sk_rmem_schedule(sk, skb->truesize)) {
72619- atomic_inc(&sk->sk_drops);
72620+ atomic_inc_unchecked(&sk->sk_drops);
72621 return -ENOBUFS;
72622 }
72623
72624@@ -319,7 +319,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
72625 skb_dst_force(skb);
72626
72627 spin_lock_irqsave(&list->lock, flags);
72628- skb->dropcount = atomic_read(&sk->sk_drops);
72629+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
72630 __skb_queue_tail(list, skb);
72631 spin_unlock_irqrestore(&list->lock, flags);
72632
72633@@ -339,7 +339,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
72634 skb->dev = NULL;
72635
72636 if (sk_rcvqueues_full(sk, skb)) {
72637- atomic_inc(&sk->sk_drops);
72638+ atomic_inc_unchecked(&sk->sk_drops);
72639 goto discard_and_relse;
72640 }
72641 if (nested)
72642@@ -357,7 +357,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
72643 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
72644 } else if (sk_add_backlog(sk, skb)) {
72645 bh_unlock_sock(sk);
72646- atomic_inc(&sk->sk_drops);
72647+ atomic_inc_unchecked(&sk->sk_drops);
72648 goto discard_and_relse;
72649 }
72650
72651@@ -917,7 +917,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
72652 if (len > sizeof(peercred))
72653 len = sizeof(peercred);
72654 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
72655- if (copy_to_user(optval, &peercred, len))
72656+ if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
72657 return -EFAULT;
72658 goto lenout;
72659 }
72660@@ -930,7 +930,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
72661 return -ENOTCONN;
72662 if (lv < len)
72663 return -EINVAL;
72664- if (copy_to_user(optval, address, len))
72665+ if (len > sizeof(address) || copy_to_user(optval, address, len))
72666 return -EFAULT;
72667 goto lenout;
72668 }
72669@@ -963,7 +963,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
72670
72671 if (len > lv)
72672 len = lv;
72673- if (copy_to_user(optval, &v, len))
72674+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
72675 return -EFAULT;
72676 lenout:
72677 if (put_user(len, optlen))
72678@@ -2020,7 +2020,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
72679 */
72680 smp_wmb();
72681 atomic_set(&sk->sk_refcnt, 1);
72682- atomic_set(&sk->sk_drops, 0);
72683+ atomic_set_unchecked(&sk->sk_drops, 0);
72684 }
72685 EXPORT_SYMBOL(sock_init_data);
72686
72687diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
72688index 02e75d1..9a57a7c 100644
72689--- a/net/decnet/sysctl_net_decnet.c
72690+++ b/net/decnet/sysctl_net_decnet.c
72691@@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
72692
72693 if (len > *lenp) len = *lenp;
72694
72695- if (copy_to_user(buffer, addr, len))
72696+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
72697 return -EFAULT;
72698
72699 *lenp = len;
72700@@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
72701
72702 if (len > *lenp) len = *lenp;
72703
72704- if (copy_to_user(buffer, devname, len))
72705+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
72706 return -EFAULT;
72707
72708 *lenp = len;
72709diff --git a/net/econet/Kconfig b/net/econet/Kconfig
72710index 39a2d29..f39c0fe 100644
72711--- a/net/econet/Kconfig
72712+++ b/net/econet/Kconfig
72713@@ -4,7 +4,7 @@
72714
72715 config ECONET
72716 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
72717- depends on EXPERIMENTAL && INET
72718+ depends on EXPERIMENTAL && INET && BROKEN
72719 ---help---
72720 Econet is a fairly old and slow networking protocol mainly used by
72721 Acorn computers to access file and print servers. It uses native
72722diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
72723index 92fc5f6..b790d91 100644
72724--- a/net/ipv4/fib_frontend.c
72725+++ b/net/ipv4/fib_frontend.c
72726@@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
72727 #ifdef CONFIG_IP_ROUTE_MULTIPATH
72728 fib_sync_up(dev);
72729 #endif
72730- atomic_inc(&net->ipv4.dev_addr_genid);
72731+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
72732 rt_cache_flush(dev_net(dev), -1);
72733 break;
72734 case NETDEV_DOWN:
72735 fib_del_ifaddr(ifa, NULL);
72736- atomic_inc(&net->ipv4.dev_addr_genid);
72737+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
72738 if (ifa->ifa_dev->ifa_list == NULL) {
72739 /* Last address was deleted from this interface.
72740 * Disable IP.
72741@@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
72742 #ifdef CONFIG_IP_ROUTE_MULTIPATH
72743 fib_sync_up(dev);
72744 #endif
72745- atomic_inc(&net->ipv4.dev_addr_genid);
72746+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
72747 rt_cache_flush(dev_net(dev), -1);
72748 break;
72749 case NETDEV_DOWN:
72750diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
72751index 80106d8..232e898 100644
72752--- a/net/ipv4/fib_semantics.c
72753+++ b/net/ipv4/fib_semantics.c
72754@@ -699,7 +699,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
72755 nh->nh_saddr = inet_select_addr(nh->nh_dev,
72756 nh->nh_gw,
72757 nh->nh_parent->fib_scope);
72758- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
72759+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
72760
72761 return nh->nh_saddr;
72762 }
72763diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
72764index ccee270..db23c3c 100644
72765--- a/net/ipv4/inet_diag.c
72766+++ b/net/ipv4/inet_diag.c
72767@@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct sock *sk,
72768 r->idiag_retrans = 0;
72769
72770 r->id.idiag_if = sk->sk_bound_dev_if;
72771+
72772+#ifdef CONFIG_GRKERNSEC_HIDESYM
72773+ r->id.idiag_cookie[0] = 0;
72774+ r->id.idiag_cookie[1] = 0;
72775+#else
72776 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
72777 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
72778+#endif
72779
72780 r->id.idiag_sport = inet->inet_sport;
72781 r->id.idiag_dport = inet->inet_dport;
72782@@ -210,8 +216,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
72783 r->idiag_family = tw->tw_family;
72784 r->idiag_retrans = 0;
72785 r->id.idiag_if = tw->tw_bound_dev_if;
72786+
72787+#ifdef CONFIG_GRKERNSEC_HIDESYM
72788+ r->id.idiag_cookie[0] = 0;
72789+ r->id.idiag_cookie[1] = 0;
72790+#else
72791 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
72792 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
72793+#endif
72794+
72795 r->id.idiag_sport = tw->tw_sport;
72796 r->id.idiag_dport = tw->tw_dport;
72797 r->id.idiag_src[0] = tw->tw_rcv_saddr;
72798@@ -294,12 +307,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
72799 if (sk == NULL)
72800 goto unlock;
72801
72802+#ifndef CONFIG_GRKERNSEC_HIDESYM
72803 err = -ESTALE;
72804 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
72805 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
72806 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
72807 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
72808 goto out;
72809+#endif
72810
72811 err = -ENOMEM;
72812 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
72813@@ -589,8 +604,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
72814 r->idiag_retrans = req->retrans;
72815
72816 r->id.idiag_if = sk->sk_bound_dev_if;
72817+
72818+#ifdef CONFIG_GRKERNSEC_HIDESYM
72819+ r->id.idiag_cookie[0] = 0;
72820+ r->id.idiag_cookie[1] = 0;
72821+#else
72822 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
72823 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
72824+#endif
72825
72826 tmo = req->expires - jiffies;
72827 if (tmo < 0)
72828diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
72829index 984ec65..97ac518 100644
72830--- a/net/ipv4/inet_hashtables.c
72831+++ b/net/ipv4/inet_hashtables.c
72832@@ -18,12 +18,15 @@
72833 #include <linux/sched.h>
72834 #include <linux/slab.h>
72835 #include <linux/wait.h>
72836+#include <linux/security.h>
72837
72838 #include <net/inet_connection_sock.h>
72839 #include <net/inet_hashtables.h>
72840 #include <net/secure_seq.h>
72841 #include <net/ip.h>
72842
72843+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
72844+
72845 /*
72846 * Allocate and initialize a new local port bind bucket.
72847 * The bindhash mutex for snum's hash chain must be held here.
72848@@ -530,6 +533,8 @@ ok:
72849 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
72850 spin_unlock(&head->lock);
72851
72852+ gr_update_task_in_ip_table(current, inet_sk(sk));
72853+
72854 if (tw) {
72855 inet_twsk_deschedule(tw, death_row);
72856 while (twrefcnt) {
72857diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
72858index 86f13c67..59a35b5 100644
72859--- a/net/ipv4/inetpeer.c
72860+++ b/net/ipv4/inetpeer.c
72861@@ -436,8 +436,8 @@ relookup:
72862 if (p) {
72863 p->daddr = *daddr;
72864 atomic_set(&p->refcnt, 1);
72865- atomic_set(&p->rid, 0);
72866- atomic_set(&p->ip_id_count,
72867+ atomic_set_unchecked(&p->rid, 0);
72868+ atomic_set_unchecked(&p->ip_id_count,
72869 (daddr->family == AF_INET) ?
72870 secure_ip_id(daddr->addr.a4) :
72871 secure_ipv6_id(daddr->addr.a6));
72872diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
72873index fdaabf2..0ec3205 100644
72874--- a/net/ipv4/ip_fragment.c
72875+++ b/net/ipv4/ip_fragment.c
72876@@ -316,7 +316,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
72877 return 0;
72878
72879 start = qp->rid;
72880- end = atomic_inc_return(&peer->rid);
72881+ end = atomic_inc_return_unchecked(&peer->rid);
72882 qp->rid = end;
72883
72884 rc = qp->q.fragments && (end - start) > max;
72885diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
72886index 09ff51b..d3968eb 100644
72887--- a/net/ipv4/ip_sockglue.c
72888+++ b/net/ipv4/ip_sockglue.c
72889@@ -1111,7 +1111,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
72890 len = min_t(unsigned int, len, opt->optlen);
72891 if (put_user(len, optlen))
72892 return -EFAULT;
72893- if (copy_to_user(optval, opt->__data, len))
72894+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
72895+ copy_to_user(optval, opt->__data, len))
72896 return -EFAULT;
72897 return 0;
72898 }
72899@@ -1239,7 +1240,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
72900 if (sk->sk_type != SOCK_STREAM)
72901 return -ENOPROTOOPT;
72902
72903- msg.msg_control = optval;
72904+ msg.msg_control = (void __force_kernel *)optval;
72905 msg.msg_controllen = len;
72906 msg.msg_flags = flags;
72907
72908diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
72909index 99ec116..c5628fe 100644
72910--- a/net/ipv4/ipconfig.c
72911+++ b/net/ipv4/ipconfig.c
72912@@ -318,7 +318,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
72913
72914 mm_segment_t oldfs = get_fs();
72915 set_fs(get_ds());
72916- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
72917+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
72918 set_fs(oldfs);
72919 return res;
72920 }
72921@@ -329,7 +329,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
72922
72923 mm_segment_t oldfs = get_fs();
72924 set_fs(get_ds());
72925- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
72926+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
72927 set_fs(oldfs);
72928 return res;
72929 }
72930@@ -340,7 +340,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
72931
72932 mm_segment_t oldfs = get_fs();
72933 set_fs(get_ds());
72934- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
72935+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
72936 set_fs(oldfs);
72937 return res;
72938 }
72939diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
72940index 2133c30..5c4b40b 100644
72941--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
72942+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
72943@@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
72944
72945 *len = 0;
72946
72947- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
72948+ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
72949 if (*octets == NULL)
72950 return 0;
72951
72952diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
72953index 43d4c3b..1914409 100644
72954--- a/net/ipv4/ping.c
72955+++ b/net/ipv4/ping.c
72956@@ -836,7 +836,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
72957 sk_rmem_alloc_get(sp),
72958 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
72959 atomic_read(&sp->sk_refcnt), sp,
72960- atomic_read(&sp->sk_drops), len);
72961+ atomic_read_unchecked(&sp->sk_drops), len);
72962 }
72963
72964 static int ping_seq_show(struct seq_file *seq, void *v)
72965diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
72966index 007e2eb..85a18a0 100644
72967--- a/net/ipv4/raw.c
72968+++ b/net/ipv4/raw.c
72969@@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
72970 int raw_rcv(struct sock *sk, struct sk_buff *skb)
72971 {
72972 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
72973- atomic_inc(&sk->sk_drops);
72974+ atomic_inc_unchecked(&sk->sk_drops);
72975 kfree_skb(skb);
72976 return NET_RX_DROP;
72977 }
72978@@ -738,16 +738,20 @@ static int raw_init(struct sock *sk)
72979
72980 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
72981 {
72982+ struct icmp_filter filter;
72983+
72984 if (optlen > sizeof(struct icmp_filter))
72985 optlen = sizeof(struct icmp_filter);
72986- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
72987+ if (copy_from_user(&filter, optval, optlen))
72988 return -EFAULT;
72989+ raw_sk(sk)->filter = filter;
72990 return 0;
72991 }
72992
72993 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
72994 {
72995 int len, ret = -EFAULT;
72996+ struct icmp_filter filter;
72997
72998 if (get_user(len, optlen))
72999 goto out;
73000@@ -757,8 +761,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
73001 if (len > sizeof(struct icmp_filter))
73002 len = sizeof(struct icmp_filter);
73003 ret = -EFAULT;
73004- if (put_user(len, optlen) ||
73005- copy_to_user(optval, &raw_sk(sk)->filter, len))
73006+ filter = raw_sk(sk)->filter;
73007+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
73008 goto out;
73009 ret = 0;
73010 out: return ret;
73011@@ -986,7 +990,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
73012 sk_wmem_alloc_get(sp),
73013 sk_rmem_alloc_get(sp),
73014 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
73015- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
73016+ atomic_read(&sp->sk_refcnt),
73017+#ifdef CONFIG_GRKERNSEC_HIDESYM
73018+ NULL,
73019+#else
73020+ sp,
73021+#endif
73022+ atomic_read_unchecked(&sp->sk_drops));
73023 }
73024
73025 static int raw_seq_show(struct seq_file *seq, void *v)
73026diff --git a/net/ipv4/route.c b/net/ipv4/route.c
73027index 94cdbc5..0cb0063 100644
73028--- a/net/ipv4/route.c
73029+++ b/net/ipv4/route.c
73030@@ -313,7 +313,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
73031
73032 static inline int rt_genid(struct net *net)
73033 {
73034- return atomic_read(&net->ipv4.rt_genid);
73035+ return atomic_read_unchecked(&net->ipv4.rt_genid);
73036 }
73037
73038 #ifdef CONFIG_PROC_FS
73039@@ -937,7 +937,7 @@ static void rt_cache_invalidate(struct net *net)
73040 unsigned char shuffle;
73041
73042 get_random_bytes(&shuffle, sizeof(shuffle));
73043- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
73044+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
73045 redirect_genid++;
73046 }
73047
73048@@ -3022,7 +3022,7 @@ static int rt_fill_info(struct net *net,
73049 error = rt->dst.error;
73050 if (peer) {
73051 inet_peer_refcheck(rt->peer);
73052- id = atomic_read(&peer->ip_id_count) & 0xffff;
73053+ id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
73054 if (peer->tcp_ts_stamp) {
73055 ts = peer->tcp_ts;
73056 tsage = get_seconds() - peer->tcp_ts_stamp;
73057diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
73058index a9db4b1..3c03301 100644
73059--- a/net/ipv4/tcp_ipv4.c
73060+++ b/net/ipv4/tcp_ipv4.c
73061@@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
73062 int sysctl_tcp_low_latency __read_mostly;
73063 EXPORT_SYMBOL(sysctl_tcp_low_latency);
73064
73065+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73066+extern int grsec_enable_blackhole;
73067+#endif
73068
73069 #ifdef CONFIG_TCP_MD5SIG
73070 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
73071@@ -1627,6 +1630,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
73072 return 0;
73073
73074 reset:
73075+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73076+ if (!grsec_enable_blackhole)
73077+#endif
73078 tcp_v4_send_reset(rsk, skb);
73079 discard:
73080 kfree_skb(skb);
73081@@ -1689,12 +1695,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
73082 TCP_SKB_CB(skb)->sacked = 0;
73083
73084 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
73085- if (!sk)
73086+ if (!sk) {
73087+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73088+ ret = 1;
73089+#endif
73090 goto no_tcp_socket;
73091-
73092+ }
73093 process:
73094- if (sk->sk_state == TCP_TIME_WAIT)
73095+ if (sk->sk_state == TCP_TIME_WAIT) {
73096+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73097+ ret = 2;
73098+#endif
73099 goto do_time_wait;
73100+ }
73101
73102 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
73103 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
73104@@ -1744,6 +1757,10 @@ no_tcp_socket:
73105 bad_packet:
73106 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
73107 } else {
73108+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73109+ if (!grsec_enable_blackhole || (ret == 1 &&
73110+ (skb->dev->flags & IFF_LOOPBACK)))
73111+#endif
73112 tcp_v4_send_reset(NULL, skb);
73113 }
73114
73115@@ -2404,7 +2421,11 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
73116 0, /* non standard timer */
73117 0, /* open_requests have no inode */
73118 atomic_read(&sk->sk_refcnt),
73119+#ifdef CONFIG_GRKERNSEC_HIDESYM
73120+ NULL,
73121+#else
73122 req,
73123+#endif
73124 len);
73125 }
73126
73127@@ -2454,7 +2475,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
73128 sock_i_uid(sk),
73129 icsk->icsk_probes_out,
73130 sock_i_ino(sk),
73131- atomic_read(&sk->sk_refcnt), sk,
73132+ atomic_read(&sk->sk_refcnt),
73133+#ifdef CONFIG_GRKERNSEC_HIDESYM
73134+ NULL,
73135+#else
73136+ sk,
73137+#endif
73138 jiffies_to_clock_t(icsk->icsk_rto),
73139 jiffies_to_clock_t(icsk->icsk_ack.ato),
73140 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
73141@@ -2482,7 +2508,13 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
73142 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
73143 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
73144 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
73145- atomic_read(&tw->tw_refcnt), tw, len);
73146+ atomic_read(&tw->tw_refcnt),
73147+#ifdef CONFIG_GRKERNSEC_HIDESYM
73148+ NULL,
73149+#else
73150+ tw,
73151+#endif
73152+ len);
73153 }
73154
73155 #define TMPSZ 150
73156diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
73157index 66363b6..b0654a3 100644
73158--- a/net/ipv4/tcp_minisocks.c
73159+++ b/net/ipv4/tcp_minisocks.c
73160@@ -27,6 +27,10 @@
73161 #include <net/inet_common.h>
73162 #include <net/xfrm.h>
73163
73164+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73165+extern int grsec_enable_blackhole;
73166+#endif
73167+
73168 int sysctl_tcp_syncookies __read_mostly = 1;
73169 EXPORT_SYMBOL(sysctl_tcp_syncookies);
73170
73171@@ -751,6 +755,10 @@ listen_overflow:
73172
73173 embryonic_reset:
73174 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
73175+
73176+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73177+ if (!grsec_enable_blackhole)
73178+#endif
73179 if (!(flg & TCP_FLAG_RST))
73180 req->rsk_ops->send_reset(sk, skb);
73181
73182diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
73183index 85ee7eb..53277ab 100644
73184--- a/net/ipv4/tcp_probe.c
73185+++ b/net/ipv4/tcp_probe.c
73186@@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
73187 if (cnt + width >= len)
73188 break;
73189
73190- if (copy_to_user(buf + cnt, tbuf, width))
73191+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
73192 return -EFAULT;
73193 cnt += width;
73194 }
73195diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
73196index 2e0f0af..e2948bf 100644
73197--- a/net/ipv4/tcp_timer.c
73198+++ b/net/ipv4/tcp_timer.c
73199@@ -22,6 +22,10 @@
73200 #include <linux/gfp.h>
73201 #include <net/tcp.h>
73202
73203+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73204+extern int grsec_lastack_retries;
73205+#endif
73206+
73207 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
73208 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
73209 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
73210@@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock *sk)
73211 }
73212 }
73213
73214+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73215+ if ((sk->sk_state == TCP_LAST_ACK) &&
73216+ (grsec_lastack_retries > 0) &&
73217+ (grsec_lastack_retries < retry_until))
73218+ retry_until = grsec_lastack_retries;
73219+#endif
73220+
73221 if (retransmits_timed_out(sk, retry_until,
73222 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
73223 /* Has it gone just too far? */
73224diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
73225index 5a65eea..bd913a1 100644
73226--- a/net/ipv4/udp.c
73227+++ b/net/ipv4/udp.c
73228@@ -86,6 +86,7 @@
73229 #include <linux/types.h>
73230 #include <linux/fcntl.h>
73231 #include <linux/module.h>
73232+#include <linux/security.h>
73233 #include <linux/socket.h>
73234 #include <linux/sockios.h>
73235 #include <linux/igmp.h>
73236@@ -108,6 +109,10 @@
73237 #include <trace/events/udp.h>
73238 #include "udp_impl.h"
73239
73240+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73241+extern int grsec_enable_blackhole;
73242+#endif
73243+
73244 struct udp_table udp_table __read_mostly;
73245 EXPORT_SYMBOL(udp_table);
73246
73247@@ -565,6 +570,9 @@ found:
73248 return s;
73249 }
73250
73251+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
73252+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
73253+
73254 /*
73255 * This routine is called by the ICMP module when it gets some
73256 * sort of error condition. If err < 0 then the socket should
73257@@ -856,9 +864,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
73258 dport = usin->sin_port;
73259 if (dport == 0)
73260 return -EINVAL;
73261+
73262+ err = gr_search_udp_sendmsg(sk, usin);
73263+ if (err)
73264+ return err;
73265 } else {
73266 if (sk->sk_state != TCP_ESTABLISHED)
73267 return -EDESTADDRREQ;
73268+
73269+ err = gr_search_udp_sendmsg(sk, NULL);
73270+ if (err)
73271+ return err;
73272+
73273 daddr = inet->inet_daddr;
73274 dport = inet->inet_dport;
73275 /* Open fast path for connected socket.
73276@@ -1099,7 +1116,7 @@ static unsigned int first_packet_length(struct sock *sk)
73277 udp_lib_checksum_complete(skb)) {
73278 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
73279 IS_UDPLITE(sk));
73280- atomic_inc(&sk->sk_drops);
73281+ atomic_inc_unchecked(&sk->sk_drops);
73282 __skb_unlink(skb, rcvq);
73283 __skb_queue_tail(&list_kill, skb);
73284 }
73285@@ -1185,6 +1202,10 @@ try_again:
73286 if (!skb)
73287 goto out;
73288
73289+ err = gr_search_udp_recvmsg(sk, skb);
73290+ if (err)
73291+ goto out_free;
73292+
73293 ulen = skb->len - sizeof(struct udphdr);
73294 copied = len;
73295 if (copied > ulen)
73296@@ -1487,7 +1508,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
73297
73298 drop:
73299 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
73300- atomic_inc(&sk->sk_drops);
73301+ atomic_inc_unchecked(&sk->sk_drops);
73302 kfree_skb(skb);
73303 return -1;
73304 }
73305@@ -1506,7 +1527,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
73306 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
73307
73308 if (!skb1) {
73309- atomic_inc(&sk->sk_drops);
73310+ atomic_inc_unchecked(&sk->sk_drops);
73311 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
73312 IS_UDPLITE(sk));
73313 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
73314@@ -1675,6 +1696,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
73315 goto csum_error;
73316
73317 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
73318+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73319+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
73320+#endif
73321 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
73322
73323 /*
73324@@ -2098,8 +2122,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
73325 sk_wmem_alloc_get(sp),
73326 sk_rmem_alloc_get(sp),
73327 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
73328- atomic_read(&sp->sk_refcnt), sp,
73329- atomic_read(&sp->sk_drops), len);
73330+ atomic_read(&sp->sk_refcnt),
73331+#ifdef CONFIG_GRKERNSEC_HIDESYM
73332+ NULL,
73333+#else
73334+ sp,
73335+#endif
73336+ atomic_read_unchecked(&sp->sk_drops), len);
73337 }
73338
73339 int udp4_seq_show(struct seq_file *seq, void *v)
73340diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
73341index 36806de..b86f74c 100644
73342--- a/net/ipv6/addrconf.c
73343+++ b/net/ipv6/addrconf.c
73344@@ -2149,7 +2149,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
73345 p.iph.ihl = 5;
73346 p.iph.protocol = IPPROTO_IPV6;
73347 p.iph.ttl = 64;
73348- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
73349+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
73350
73351 if (ops->ndo_do_ioctl) {
73352 mm_segment_t oldfs = get_fs();
73353diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
73354index 1567fb1..29af910 100644
73355--- a/net/ipv6/inet6_connection_sock.c
73356+++ b/net/ipv6/inet6_connection_sock.c
73357@@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
73358 #ifdef CONFIG_XFRM
73359 {
73360 struct rt6_info *rt = (struct rt6_info *)dst;
73361- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
73362+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
73363 }
73364 #endif
73365 }
73366@@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
73367 #ifdef CONFIG_XFRM
73368 if (dst) {
73369 struct rt6_info *rt = (struct rt6_info *)dst;
73370- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
73371+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
73372 __sk_dst_reset(sk);
73373 dst = NULL;
73374 }
73375diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
73376index 26cb08c..8af9877 100644
73377--- a/net/ipv6/ipv6_sockglue.c
73378+++ b/net/ipv6/ipv6_sockglue.c
73379@@ -960,7 +960,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
73380 if (sk->sk_type != SOCK_STREAM)
73381 return -ENOPROTOOPT;
73382
73383- msg.msg_control = optval;
73384+ msg.msg_control = (void __force_kernel *)optval;
73385 msg.msg_controllen = len;
73386 msg.msg_flags = flags;
73387
73388diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
73389index 331af3b..7789844 100644
73390--- a/net/ipv6/raw.c
73391+++ b/net/ipv6/raw.c
73392@@ -377,7 +377,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
73393 {
73394 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
73395 skb_checksum_complete(skb)) {
73396- atomic_inc(&sk->sk_drops);
73397+ atomic_inc_unchecked(&sk->sk_drops);
73398 kfree_skb(skb);
73399 return NET_RX_DROP;
73400 }
73401@@ -404,7 +404,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
73402 struct raw6_sock *rp = raw6_sk(sk);
73403
73404 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
73405- atomic_inc(&sk->sk_drops);
73406+ atomic_inc_unchecked(&sk->sk_drops);
73407 kfree_skb(skb);
73408 return NET_RX_DROP;
73409 }
73410@@ -428,7 +428,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
73411
73412 if (inet->hdrincl) {
73413 if (skb_checksum_complete(skb)) {
73414- atomic_inc(&sk->sk_drops);
73415+ atomic_inc_unchecked(&sk->sk_drops);
73416 kfree_skb(skb);
73417 return NET_RX_DROP;
73418 }
73419@@ -601,7 +601,7 @@ out:
73420 return err;
73421 }
73422
73423-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
73424+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
73425 struct flowi6 *fl6, struct dst_entry **dstp,
73426 unsigned int flags)
73427 {
73428@@ -909,12 +909,15 @@ do_confirm:
73429 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
73430 char __user *optval, int optlen)
73431 {
73432+ struct icmp6_filter filter;
73433+
73434 switch (optname) {
73435 case ICMPV6_FILTER:
73436 if (optlen > sizeof(struct icmp6_filter))
73437 optlen = sizeof(struct icmp6_filter);
73438- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
73439+ if (copy_from_user(&filter, optval, optlen))
73440 return -EFAULT;
73441+ raw6_sk(sk)->filter = filter;
73442 return 0;
73443 default:
73444 return -ENOPROTOOPT;
73445@@ -927,6 +930,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
73446 char __user *optval, int __user *optlen)
73447 {
73448 int len;
73449+ struct icmp6_filter filter;
73450
73451 switch (optname) {
73452 case ICMPV6_FILTER:
73453@@ -938,7 +942,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
73454 len = sizeof(struct icmp6_filter);
73455 if (put_user(len, optlen))
73456 return -EFAULT;
73457- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
73458+ filter = raw6_sk(sk)->filter;
73459+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
73460 return -EFAULT;
73461 return 0;
73462 default:
73463@@ -1245,7 +1250,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
73464 0, 0L, 0,
73465 sock_i_uid(sp), 0,
73466 sock_i_ino(sp),
73467- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
73468+ atomic_read(&sp->sk_refcnt),
73469+#ifdef CONFIG_GRKERNSEC_HIDESYM
73470+ NULL,
73471+#else
73472+ sp,
73473+#endif
73474+ atomic_read_unchecked(&sp->sk_drops));
73475 }
73476
73477 static int raw6_seq_show(struct seq_file *seq, void *v)
73478diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
73479index 2dea4bb..dca8ac5 100644
73480--- a/net/ipv6/tcp_ipv6.c
73481+++ b/net/ipv6/tcp_ipv6.c
73482@@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
73483 }
73484 #endif
73485
73486+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73487+extern int grsec_enable_blackhole;
73488+#endif
73489+
73490 static void tcp_v6_hash(struct sock *sk)
73491 {
73492 if (sk->sk_state != TCP_CLOSE) {
73493@@ -1651,6 +1655,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
73494 return 0;
73495
73496 reset:
73497+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73498+ if (!grsec_enable_blackhole)
73499+#endif
73500 tcp_v6_send_reset(sk, skb);
73501 discard:
73502 if (opt_skb)
73503@@ -1730,12 +1737,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
73504 TCP_SKB_CB(skb)->sacked = 0;
73505
73506 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
73507- if (!sk)
73508+ if (!sk) {
73509+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73510+ ret = 1;
73511+#endif
73512 goto no_tcp_socket;
73513+ }
73514
73515 process:
73516- if (sk->sk_state == TCP_TIME_WAIT)
73517+ if (sk->sk_state == TCP_TIME_WAIT) {
73518+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73519+ ret = 2;
73520+#endif
73521 goto do_time_wait;
73522+ }
73523
73524 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
73525 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
73526@@ -1783,6 +1798,10 @@ no_tcp_socket:
73527 bad_packet:
73528 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
73529 } else {
73530+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73531+ if (!grsec_enable_blackhole || (ret == 1 &&
73532+ (skb->dev->flags & IFF_LOOPBACK)))
73533+#endif
73534 tcp_v6_send_reset(NULL, skb);
73535 }
73536
73537@@ -2043,7 +2062,13 @@ static void get_openreq6(struct seq_file *seq,
73538 uid,
73539 0, /* non standard timer */
73540 0, /* open_requests have no inode */
73541- 0, req);
73542+ 0,
73543+#ifdef CONFIG_GRKERNSEC_HIDESYM
73544+ NULL
73545+#else
73546+ req
73547+#endif
73548+ );
73549 }
73550
73551 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
73552@@ -2093,7 +2118,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
73553 sock_i_uid(sp),
73554 icsk->icsk_probes_out,
73555 sock_i_ino(sp),
73556- atomic_read(&sp->sk_refcnt), sp,
73557+ atomic_read(&sp->sk_refcnt),
73558+#ifdef CONFIG_GRKERNSEC_HIDESYM
73559+ NULL,
73560+#else
73561+ sp,
73562+#endif
73563 jiffies_to_clock_t(icsk->icsk_rto),
73564 jiffies_to_clock_t(icsk->icsk_ack.ato),
73565 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
73566@@ -2128,7 +2158,13 @@ static void get_timewait6_sock(struct seq_file *seq,
73567 dest->s6_addr32[2], dest->s6_addr32[3], destp,
73568 tw->tw_substate, 0, 0,
73569 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
73570- atomic_read(&tw->tw_refcnt), tw);
73571+ atomic_read(&tw->tw_refcnt),
73572+#ifdef CONFIG_GRKERNSEC_HIDESYM
73573+ NULL
73574+#else
73575+ tw
73576+#endif
73577+ );
73578 }
73579
73580 static int tcp6_seq_show(struct seq_file *seq, void *v)
73581diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
73582index 8c25419..47a51ae 100644
73583--- a/net/ipv6/udp.c
73584+++ b/net/ipv6/udp.c
73585@@ -50,6 +50,10 @@
73586 #include <linux/seq_file.h>
73587 #include "udp_impl.h"
73588
73589+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73590+extern int grsec_enable_blackhole;
73591+#endif
73592+
73593 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
73594 {
73595 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
73596@@ -549,7 +553,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
73597
73598 return 0;
73599 drop:
73600- atomic_inc(&sk->sk_drops);
73601+ atomic_inc_unchecked(&sk->sk_drops);
73602 drop_no_sk_drops_inc:
73603 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
73604 kfree_skb(skb);
73605@@ -625,7 +629,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
73606 continue;
73607 }
73608 drop:
73609- atomic_inc(&sk->sk_drops);
73610+ atomic_inc_unchecked(&sk->sk_drops);
73611 UDP6_INC_STATS_BH(sock_net(sk),
73612 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
73613 UDP6_INC_STATS_BH(sock_net(sk),
73614@@ -780,6 +784,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
73615 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
73616 proto == IPPROTO_UDPLITE);
73617
73618+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73619+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
73620+#endif
73621 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
73622
73623 kfree_skb(skb);
73624@@ -796,7 +803,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
73625 if (!sock_owned_by_user(sk))
73626 udpv6_queue_rcv_skb(sk, skb);
73627 else if (sk_add_backlog(sk, skb)) {
73628- atomic_inc(&sk->sk_drops);
73629+ atomic_inc_unchecked(&sk->sk_drops);
73630 bh_unlock_sock(sk);
73631 sock_put(sk);
73632 goto discard;
73633@@ -1407,8 +1414,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
73634 0, 0L, 0,
73635 sock_i_uid(sp), 0,
73636 sock_i_ino(sp),
73637- atomic_read(&sp->sk_refcnt), sp,
73638- atomic_read(&sp->sk_drops));
73639+ atomic_read(&sp->sk_refcnt),
73640+#ifdef CONFIG_GRKERNSEC_HIDESYM
73641+ NULL,
73642+#else
73643+ sp,
73644+#endif
73645+ atomic_read_unchecked(&sp->sk_drops));
73646 }
73647
73648 int udp6_seq_show(struct seq_file *seq, void *v)
73649diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
73650index 253695d..9481ce8 100644
73651--- a/net/irda/ircomm/ircomm_tty.c
73652+++ b/net/irda/ircomm/ircomm_tty.c
73653@@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
73654 add_wait_queue(&self->open_wait, &wait);
73655
73656 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
73657- __FILE__,__LINE__, tty->driver->name, self->open_count );
73658+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
73659
73660 /* As far as I can see, we protect open_count - Jean II */
73661 spin_lock_irqsave(&self->spinlock, flags);
73662 if (!tty_hung_up_p(filp)) {
73663 extra_count = 1;
73664- self->open_count--;
73665+ local_dec(&self->open_count);
73666 }
73667 spin_unlock_irqrestore(&self->spinlock, flags);
73668- self->blocked_open++;
73669+ local_inc(&self->blocked_open);
73670
73671 while (1) {
73672 if (tty->termios->c_cflag & CBAUD) {
73673@@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
73674 }
73675
73676 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
73677- __FILE__,__LINE__, tty->driver->name, self->open_count );
73678+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
73679
73680 schedule();
73681 }
73682@@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
73683 if (extra_count) {
73684 /* ++ is not atomic, so this should be protected - Jean II */
73685 spin_lock_irqsave(&self->spinlock, flags);
73686- self->open_count++;
73687+ local_inc(&self->open_count);
73688 spin_unlock_irqrestore(&self->spinlock, flags);
73689 }
73690- self->blocked_open--;
73691+ local_dec(&self->blocked_open);
73692
73693 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
73694- __FILE__,__LINE__, tty->driver->name, self->open_count);
73695+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
73696
73697 if (!retval)
73698 self->flags |= ASYNC_NORMAL_ACTIVE;
73699@@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
73700 }
73701 /* ++ is not atomic, so this should be protected - Jean II */
73702 spin_lock_irqsave(&self->spinlock, flags);
73703- self->open_count++;
73704+ local_inc(&self->open_count);
73705
73706 tty->driver_data = self;
73707 self->tty = tty;
73708 spin_unlock_irqrestore(&self->spinlock, flags);
73709
73710 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
73711- self->line, self->open_count);
73712+ self->line, local_read(&self->open_count));
73713
73714 /* Not really used by us, but lets do it anyway */
73715 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
73716@@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
73717 return;
73718 }
73719
73720- if ((tty->count == 1) && (self->open_count != 1)) {
73721+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
73722 /*
73723 * Uh, oh. tty->count is 1, which means that the tty
73724 * structure will be freed. state->count should always
73725@@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
73726 */
73727 IRDA_DEBUG(0, "%s(), bad serial port count; "
73728 "tty->count is 1, state->count is %d\n", __func__ ,
73729- self->open_count);
73730- self->open_count = 1;
73731+ local_read(&self->open_count));
73732+ local_set(&self->open_count, 1);
73733 }
73734
73735- if (--self->open_count < 0) {
73736+ if (local_dec_return(&self->open_count) < 0) {
73737 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
73738- __func__, self->line, self->open_count);
73739- self->open_count = 0;
73740+ __func__, self->line, local_read(&self->open_count));
73741+ local_set(&self->open_count, 0);
73742 }
73743- if (self->open_count) {
73744+ if (local_read(&self->open_count)) {
73745 spin_unlock_irqrestore(&self->spinlock, flags);
73746
73747 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
73748@@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
73749 tty->closing = 0;
73750 self->tty = NULL;
73751
73752- if (self->blocked_open) {
73753+ if (local_read(&self->blocked_open)) {
73754 if (self->close_delay)
73755 schedule_timeout_interruptible(self->close_delay);
73756 wake_up_interruptible(&self->open_wait);
73757@@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
73758 spin_lock_irqsave(&self->spinlock, flags);
73759 self->flags &= ~ASYNC_NORMAL_ACTIVE;
73760 self->tty = NULL;
73761- self->open_count = 0;
73762+ local_set(&self->open_count, 0);
73763 spin_unlock_irqrestore(&self->spinlock, flags);
73764
73765 wake_up_interruptible(&self->open_wait);
73766@@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
73767 seq_putc(m, '\n');
73768
73769 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
73770- seq_printf(m, "Open count: %d\n", self->open_count);
73771+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
73772 seq_printf(m, "Max data size: %d\n", self->max_data_size);
73773 seq_printf(m, "Max header size: %d\n", self->max_header_size);
73774
73775diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
73776index 274d150..656a144 100644
73777--- a/net/iucv/af_iucv.c
73778+++ b/net/iucv/af_iucv.c
73779@@ -787,10 +787,10 @@ static int iucv_sock_autobind(struct sock *sk)
73780
73781 write_lock_bh(&iucv_sk_list.lock);
73782
73783- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
73784+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
73785 while (__iucv_get_sock_by_name(name)) {
73786 sprintf(name, "%08x",
73787- atomic_inc_return(&iucv_sk_list.autobind_name));
73788+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
73789 }
73790
73791 write_unlock_bh(&iucv_sk_list.lock);
73792diff --git a/net/key/af_key.c b/net/key/af_key.c
73793index 1e733e9..3d73c9f 100644
73794--- a/net/key/af_key.c
73795+++ b/net/key/af_key.c
73796@@ -3016,10 +3016,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
73797 static u32 get_acqseq(void)
73798 {
73799 u32 res;
73800- static atomic_t acqseq;
73801+ static atomic_unchecked_t acqseq;
73802
73803 do {
73804- res = atomic_inc_return(&acqseq);
73805+ res = atomic_inc_return_unchecked(&acqseq);
73806 } while (!res);
73807 return res;
73808 }
73809diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
73810index 73495f1..ad51356 100644
73811--- a/net/mac80211/ieee80211_i.h
73812+++ b/net/mac80211/ieee80211_i.h
73813@@ -27,6 +27,7 @@
73814 #include <net/ieee80211_radiotap.h>
73815 #include <net/cfg80211.h>
73816 #include <net/mac80211.h>
73817+#include <asm/local.h>
73818 #include "key.h"
73819 #include "sta_info.h"
73820
73821@@ -764,7 +765,7 @@ struct ieee80211_local {
73822 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
73823 spinlock_t queue_stop_reason_lock;
73824
73825- int open_count;
73826+ local_t open_count;
73827 int monitors, cooked_mntrs;
73828 /* number of interfaces with corresponding FIF_ flags */
73829 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
73830diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
73831index 30d7355..e260095 100644
73832--- a/net/mac80211/iface.c
73833+++ b/net/mac80211/iface.c
73834@@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
73835 break;
73836 }
73837
73838- if (local->open_count == 0) {
73839+ if (local_read(&local->open_count) == 0) {
73840 res = drv_start(local);
73841 if (res)
73842 goto err_del_bss;
73843@@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
73844 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
73845
73846 if (!is_valid_ether_addr(dev->dev_addr)) {
73847- if (!local->open_count)
73848+ if (!local_read(&local->open_count))
73849 drv_stop(local);
73850 return -EADDRNOTAVAIL;
73851 }
73852@@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
73853 mutex_unlock(&local->mtx);
73854
73855 if (coming_up)
73856- local->open_count++;
73857+ local_inc(&local->open_count);
73858
73859 if (hw_reconf_flags) {
73860 ieee80211_hw_config(local, hw_reconf_flags);
73861@@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
73862 err_del_interface:
73863 drv_remove_interface(local, &sdata->vif);
73864 err_stop:
73865- if (!local->open_count)
73866+ if (!local_read(&local->open_count))
73867 drv_stop(local);
73868 err_del_bss:
73869 sdata->bss = NULL;
73870@@ -472,7 +472,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
73871 }
73872
73873 if (going_down)
73874- local->open_count--;
73875+ local_dec(&local->open_count);
73876
73877 switch (sdata->vif.type) {
73878 case NL80211_IFTYPE_AP_VLAN:
73879@@ -531,7 +531,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
73880
73881 ieee80211_recalc_ps(local, -1);
73882
73883- if (local->open_count == 0) {
73884+ if (local_read(&local->open_count) == 0) {
73885 if (local->ops->napi_poll)
73886 napi_disable(&local->napi);
73887 ieee80211_clear_tx_pending(local);
73888diff --git a/net/mac80211/main.c b/net/mac80211/main.c
73889index a7536fd..4039cc0 100644
73890--- a/net/mac80211/main.c
73891+++ b/net/mac80211/main.c
73892@@ -163,7 +163,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
73893 local->hw.conf.power_level = power;
73894 }
73895
73896- if (changed && local->open_count) {
73897+ if (changed && local_read(&local->open_count)) {
73898 ret = drv_config(local, changed);
73899 /*
73900 * Goal:
73901diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
73902index 9ee7164..56c5061 100644
73903--- a/net/mac80211/pm.c
73904+++ b/net/mac80211/pm.c
73905@@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
73906 struct ieee80211_sub_if_data *sdata;
73907 struct sta_info *sta;
73908
73909- if (!local->open_count)
73910+ if (!local_read(&local->open_count))
73911 goto suspend;
73912
73913 ieee80211_scan_cancel(local);
73914@@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
73915 cancel_work_sync(&local->dynamic_ps_enable_work);
73916 del_timer_sync(&local->dynamic_ps_timer);
73917
73918- local->wowlan = wowlan && local->open_count;
73919+ local->wowlan = wowlan && local_read(&local->open_count);
73920 if (local->wowlan) {
73921 int err = drv_suspend(local, wowlan);
73922 if (err < 0) {
73923@@ -129,7 +129,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
73924 }
73925
73926 /* stop hardware - this must stop RX */
73927- if (local->open_count)
73928+ if (local_read(&local->open_count))
73929 ieee80211_stop_device(local);
73930
73931 suspend:
73932diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
73933index 5a5a776..9600b11 100644
73934--- a/net/mac80211/rate.c
73935+++ b/net/mac80211/rate.c
73936@@ -401,7 +401,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
73937
73938 ASSERT_RTNL();
73939
73940- if (local->open_count)
73941+ if (local_read(&local->open_count))
73942 return -EBUSY;
73943
73944 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
73945diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
73946index c97a065..ff61928 100644
73947--- a/net/mac80211/rc80211_pid_debugfs.c
73948+++ b/net/mac80211/rc80211_pid_debugfs.c
73949@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
73950
73951 spin_unlock_irqrestore(&events->lock, status);
73952
73953- if (copy_to_user(buf, pb, p))
73954+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
73955 return -EFAULT;
73956
73957 return p;
73958diff --git a/net/mac80211/util.c b/net/mac80211/util.c
73959index d5230ec..c604b21 100644
73960--- a/net/mac80211/util.c
73961+++ b/net/mac80211/util.c
73962@@ -1000,7 +1000,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
73963 drv_set_coverage_class(local, hw->wiphy->coverage_class);
73964
73965 /* everything else happens only if HW was up & running */
73966- if (!local->open_count)
73967+ if (!local_read(&local->open_count))
73968 goto wake_up;
73969
73970 /*
73971diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
73972index d5597b7..ab6d39c 100644
73973--- a/net/netfilter/Kconfig
73974+++ b/net/netfilter/Kconfig
73975@@ -779,6 +779,16 @@ config NETFILTER_XT_MATCH_ESP
73976
73977 To compile it as a module, choose M here. If unsure, say N.
73978
73979+config NETFILTER_XT_MATCH_GRADM
73980+ tristate '"gradm" match support'
73981+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
73982+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
73983+ ---help---
73984+ The gradm match allows to match on grsecurity RBAC being enabled.
73985+ It is useful when iptables rules are applied early on bootup to
73986+ prevent connections to the machine (except from a trusted host)
73987+ while the RBAC system is disabled.
73988+
73989 config NETFILTER_XT_MATCH_HASHLIMIT
73990 tristate '"hashlimit" match support'
73991 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
73992diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
73993index 1a02853..5d8c22e 100644
73994--- a/net/netfilter/Makefile
73995+++ b/net/netfilter/Makefile
73996@@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
73997 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
73998 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
73999 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
74000+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
74001 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
74002 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
74003 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
74004diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
74005index 29fa5ba..8debc79 100644
74006--- a/net/netfilter/ipvs/ip_vs_conn.c
74007+++ b/net/netfilter/ipvs/ip_vs_conn.c
74008@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
74009 /* Increase the refcnt counter of the dest */
74010 atomic_inc(&dest->refcnt);
74011
74012- conn_flags = atomic_read(&dest->conn_flags);
74013+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
74014 if (cp->protocol != IPPROTO_UDP)
74015 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
74016 /* Bind with the destination and its corresponding transmitter */
74017@@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
74018 atomic_set(&cp->refcnt, 1);
74019
74020 atomic_set(&cp->n_control, 0);
74021- atomic_set(&cp->in_pkts, 0);
74022+ atomic_set_unchecked(&cp->in_pkts, 0);
74023
74024 atomic_inc(&ipvs->conn_count);
74025 if (flags & IP_VS_CONN_F_NO_CPORT)
74026@@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
74027
74028 /* Don't drop the entry if its number of incoming packets is not
74029 located in [0, 8] */
74030- i = atomic_read(&cp->in_pkts);
74031+ i = atomic_read_unchecked(&cp->in_pkts);
74032 if (i > 8 || i < 0) return 0;
74033
74034 if (!todrop_rate[i]) return 0;
74035diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
74036index 093cc32..9209ae1 100644
74037--- a/net/netfilter/ipvs/ip_vs_core.c
74038+++ b/net/netfilter/ipvs/ip_vs_core.c
74039@@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
74040 ret = cp->packet_xmit(skb, cp, pd->pp);
74041 /* do not touch skb anymore */
74042
74043- atomic_inc(&cp->in_pkts);
74044+ atomic_inc_unchecked(&cp->in_pkts);
74045 ip_vs_conn_put(cp);
74046 return ret;
74047 }
74048@@ -1611,7 +1611,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
74049 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
74050 pkts = sysctl_sync_threshold(ipvs);
74051 else
74052- pkts = atomic_add_return(1, &cp->in_pkts);
74053+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
74054
74055 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
74056 cp->protocol == IPPROTO_SCTP) {
74057diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
74058index e1a66cf..0910076 100644
74059--- a/net/netfilter/ipvs/ip_vs_ctl.c
74060+++ b/net/netfilter/ipvs/ip_vs_ctl.c
74061@@ -788,7 +788,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
74062 ip_vs_rs_hash(ipvs, dest);
74063 write_unlock_bh(&ipvs->rs_lock);
74064 }
74065- atomic_set(&dest->conn_flags, conn_flags);
74066+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
74067
74068 /* bind the service */
74069 if (!dest->svc) {
74070@@ -2028,7 +2028,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
74071 " %-7s %-6d %-10d %-10d\n",
74072 &dest->addr.in6,
74073 ntohs(dest->port),
74074- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
74075+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
74076 atomic_read(&dest->weight),
74077 atomic_read(&dest->activeconns),
74078 atomic_read(&dest->inactconns));
74079@@ -2039,7 +2039,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
74080 "%-7s %-6d %-10d %-10d\n",
74081 ntohl(dest->addr.ip),
74082 ntohs(dest->port),
74083- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
74084+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
74085 atomic_read(&dest->weight),
74086 atomic_read(&dest->activeconns),
74087 atomic_read(&dest->inactconns));
74088@@ -2509,7 +2509,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
74089
74090 entry.addr = dest->addr.ip;
74091 entry.port = dest->port;
74092- entry.conn_flags = atomic_read(&dest->conn_flags);
74093+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
74094 entry.weight = atomic_read(&dest->weight);
74095 entry.u_threshold = dest->u_threshold;
74096 entry.l_threshold = dest->l_threshold;
74097@@ -3042,7 +3042,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
74098 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
74099
74100 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
74101- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
74102+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
74103 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
74104 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
74105 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
74106diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
74107index 2b6678c0..aaa41fc 100644
74108--- a/net/netfilter/ipvs/ip_vs_sync.c
74109+++ b/net/netfilter/ipvs/ip_vs_sync.c
74110@@ -649,7 +649,7 @@ control:
74111 * i.e only increment in_pkts for Templates.
74112 */
74113 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
74114- int pkts = atomic_add_return(1, &cp->in_pkts);
74115+ int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
74116
74117 if (pkts % sysctl_sync_period(ipvs) != 1)
74118 return;
74119@@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
74120
74121 if (opt)
74122 memcpy(&cp->in_seq, opt, sizeof(*opt));
74123- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
74124+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
74125 cp->state = state;
74126 cp->old_state = cp->state;
74127 /*
74128diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
74129index aa2d720..d8aa111 100644
74130--- a/net/netfilter/ipvs/ip_vs_xmit.c
74131+++ b/net/netfilter/ipvs/ip_vs_xmit.c
74132@@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
74133 else
74134 rc = NF_ACCEPT;
74135 /* do not touch skb anymore */
74136- atomic_inc(&cp->in_pkts);
74137+ atomic_inc_unchecked(&cp->in_pkts);
74138 goto out;
74139 }
74140
74141@@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
74142 else
74143 rc = NF_ACCEPT;
74144 /* do not touch skb anymore */
74145- atomic_inc(&cp->in_pkts);
74146+ atomic_inc_unchecked(&cp->in_pkts);
74147 goto out;
74148 }
74149
74150diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
74151index 66b2c54..c7884e3 100644
74152--- a/net/netfilter/nfnetlink_log.c
74153+++ b/net/netfilter/nfnetlink_log.c
74154@@ -70,7 +70,7 @@ struct nfulnl_instance {
74155 };
74156
74157 static DEFINE_SPINLOCK(instances_lock);
74158-static atomic_t global_seq;
74159+static atomic_unchecked_t global_seq;
74160
74161 #define INSTANCE_BUCKETS 16
74162 static struct hlist_head instance_table[INSTANCE_BUCKETS];
74163@@ -502,7 +502,7 @@ __build_packet_message(struct nfulnl_instance *inst,
74164 /* global sequence number */
74165 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
74166 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
74167- htonl(atomic_inc_return(&global_seq)));
74168+ htonl(atomic_inc_return_unchecked(&global_seq)));
74169
74170 if (data_len) {
74171 struct nlattr *nla;
74172diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
74173new file mode 100644
74174index 0000000..6905327
74175--- /dev/null
74176+++ b/net/netfilter/xt_gradm.c
74177@@ -0,0 +1,51 @@
74178+/*
74179+ * gradm match for netfilter
74180